blob: c842f40c11734bd1023c2b54aa160e996d66e232 [file] [log] [blame]
Kyle Swenson8d8f6542021-03-15 11:02:55 -06001/*
2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
10
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
23*/
24
25/* Bluetooth HCI sockets. */
26
27#include <linux/export.h>
28#include <asm/unaligned.h>
29
30#include <net/bluetooth/bluetooth.h>
31#include <net/bluetooth/hci_core.h>
32#include <net/bluetooth/hci_mon.h>
33#include <net/bluetooth/mgmt.h>
34
35#include "mgmt_util.h"
36
37static LIST_HEAD(mgmt_chan_list);
38static DEFINE_MUTEX(mgmt_chan_list_lock);
39
40static atomic_t monitor_promisc = ATOMIC_INIT(0);
41
42/* ----- HCI socket interface ----- */
43
44/* Socket info */
45#define hci_pi(sk) ((struct hci_pinfo *) sk)
46
47struct hci_pinfo {
48 struct bt_sock bt;
49 struct hci_dev *hdev;
50 struct hci_filter filter;
51 __u32 cmsg_mask;
52 unsigned short channel;
53 unsigned long flags;
54};
55
56void hci_sock_set_flag(struct sock *sk, int nr)
57{
58 set_bit(nr, &hci_pi(sk)->flags);
59}
60
61void hci_sock_clear_flag(struct sock *sk, int nr)
62{
63 clear_bit(nr, &hci_pi(sk)->flags);
64}
65
66int hci_sock_test_flag(struct sock *sk, int nr)
67{
68 return test_bit(nr, &hci_pi(sk)->flags);
69}
70
71unsigned short hci_sock_get_channel(struct sock *sk)
72{
73 return hci_pi(sk)->channel;
74}
75
76static inline int hci_test_bit(int nr, const void *addr)
77{
78 return *((const __u32 *) addr + (nr >> 5)) & ((__u32) 1 << (nr & 31));
79}
80
81/* Security filter */
82#define HCI_SFLT_MAX_OGF 5
83
84struct hci_sec_filter {
85 __u32 type_mask;
86 __u32 event_mask[2];
87 __u32 ocf_mask[HCI_SFLT_MAX_OGF + 1][4];
88};
89
90static const struct hci_sec_filter hci_sec_filter = {
91 /* Packet types */
92 0x10,
93 /* Events */
94 { 0x1000d9fe, 0x0000b00c },
95 /* Commands */
96 {
97 { 0x0 },
98 /* OGF_LINK_CTL */
99 { 0xbe000006, 0x00000001, 0x00000000, 0x00 },
100 /* OGF_LINK_POLICY */
101 { 0x00005200, 0x00000000, 0x00000000, 0x00 },
102 /* OGF_HOST_CTL */
103 { 0xaab00200, 0x2b402aaa, 0x05220154, 0x00 },
104 /* OGF_INFO_PARAM */
105 { 0x000002be, 0x00000000, 0x00000000, 0x00 },
106 /* OGF_STATUS_PARAM */
107 { 0x000000ea, 0x00000000, 0x00000000, 0x00 }
108 }
109};
110
111static struct bt_sock_list hci_sk_list = {
112 .lock = __RW_LOCK_UNLOCKED(hci_sk_list.lock)
113};
114
115static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
116{
117 struct hci_filter *flt;
118 int flt_type, flt_event;
119
120 /* Apply filter */
121 flt = &hci_pi(sk)->filter;
122
123 flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
124
125 if (!test_bit(flt_type, &flt->type_mask))
126 return true;
127
128 /* Extra filter for event packets only */
129 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT)
130 return false;
131
132 flt_event = (*(__u8 *)skb->data & HCI_FLT_EVENT_BITS);
133
134 if (!hci_test_bit(flt_event, &flt->event_mask))
135 return true;
136
137 /* Check filter only when opcode is set */
138 if (!flt->opcode)
139 return false;
140
141 if (flt_event == HCI_EV_CMD_COMPLETE &&
142 flt->opcode != get_unaligned((__le16 *)(skb->data + 3)))
143 return true;
144
145 if (flt_event == HCI_EV_CMD_STATUS &&
146 flt->opcode != get_unaligned((__le16 *)(skb->data + 4)))
147 return true;
148
149 return false;
150}
151
152/* Send frame to RAW socket */
153void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
154{
155 struct sock *sk;
156 struct sk_buff *skb_copy = NULL;
157
158 BT_DBG("hdev %p len %d", hdev, skb->len);
159
160 read_lock(&hci_sk_list.lock);
161
162 sk_for_each(sk, &hci_sk_list.head) {
163 struct sk_buff *nskb;
164
165 if (sk->sk_state != BT_BOUND || hci_pi(sk)->hdev != hdev)
166 continue;
167
168 /* Don't send frame to the socket it came from */
169 if (skb->sk == sk)
170 continue;
171
172 if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
173 if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
174 bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
175 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
176 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
177 continue;
178 if (is_filtered_packet(sk, skb))
179 continue;
180 } else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
181 if (!bt_cb(skb)->incoming)
182 continue;
183 if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
184 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
185 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
186 continue;
187 } else {
188 /* Don't send frame to other channel types */
189 continue;
190 }
191
192 if (!skb_copy) {
193 /* Create a private copy with headroom */
194 skb_copy = __pskb_copy_fclone(skb, 1, GFP_ATOMIC, true);
195 if (!skb_copy)
196 continue;
197
198 /* Put type byte before the data */
199 memcpy(skb_push(skb_copy, 1), &bt_cb(skb)->pkt_type, 1);
200 }
201
202 nskb = skb_clone(skb_copy, GFP_ATOMIC);
203 if (!nskb)
204 continue;
205
206 if (sock_queue_rcv_skb(sk, nskb))
207 kfree_skb(nskb);
208 }
209
210 read_unlock(&hci_sk_list.lock);
211
212 kfree_skb(skb_copy);
213}
214
215/* Send frame to sockets with specific channel */
216void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
217 int flag, struct sock *skip_sk)
218{
219 struct sock *sk;
220
221 BT_DBG("channel %u len %d", channel, skb->len);
222
223 read_lock(&hci_sk_list.lock);
224
225 sk_for_each(sk, &hci_sk_list.head) {
226 struct sk_buff *nskb;
227
228 /* Ignore socket without the flag set */
229 if (!hci_sock_test_flag(sk, flag))
230 continue;
231
232 /* Skip the original socket */
233 if (sk == skip_sk)
234 continue;
235
236 if (sk->sk_state != BT_BOUND)
237 continue;
238
239 if (hci_pi(sk)->channel != channel)
240 continue;
241
242 nskb = skb_clone(skb, GFP_ATOMIC);
243 if (!nskb)
244 continue;
245
246 if (sock_queue_rcv_skb(sk, nskb))
247 kfree_skb(nskb);
248 }
249
250 read_unlock(&hci_sk_list.lock);
251}
252
253/* Send frame to monitor socket */
254void hci_send_to_monitor(struct hci_dev *hdev, struct sk_buff *skb)
255{
256 struct sk_buff *skb_copy = NULL;
257 struct hci_mon_hdr *hdr;
258 __le16 opcode;
259
260 if (!atomic_read(&monitor_promisc))
261 return;
262
263 BT_DBG("hdev %p len %d", hdev, skb->len);
264
265 switch (bt_cb(skb)->pkt_type) {
266 case HCI_COMMAND_PKT:
267 opcode = cpu_to_le16(HCI_MON_COMMAND_PKT);
268 break;
269 case HCI_EVENT_PKT:
270 opcode = cpu_to_le16(HCI_MON_EVENT_PKT);
271 break;
272 case HCI_ACLDATA_PKT:
273 if (bt_cb(skb)->incoming)
274 opcode = cpu_to_le16(HCI_MON_ACL_RX_PKT);
275 else
276 opcode = cpu_to_le16(HCI_MON_ACL_TX_PKT);
277 break;
278 case HCI_SCODATA_PKT:
279 if (bt_cb(skb)->incoming)
280 opcode = cpu_to_le16(HCI_MON_SCO_RX_PKT);
281 else
282 opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT);
283 break;
284 case HCI_DIAG_PKT:
285 opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG);
286 break;
287 default:
288 return;
289 }
290
291 /* Create a private copy with headroom */
292 skb_copy = __pskb_copy_fclone(skb, HCI_MON_HDR_SIZE, GFP_ATOMIC, true);
293 if (!skb_copy)
294 return;
295
296 /* Put header before the data */
297 hdr = (void *) skb_push(skb_copy, HCI_MON_HDR_SIZE);
298 hdr->opcode = opcode;
299 hdr->index = cpu_to_le16(hdev->id);
300 hdr->len = cpu_to_le16(skb->len);
301
302 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb_copy,
303 HCI_SOCK_TRUSTED, NULL);
304 kfree_skb(skb_copy);
305}
306
307static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
308{
309 struct hci_mon_hdr *hdr;
310 struct hci_mon_new_index *ni;
311 struct hci_mon_index_info *ii;
312 struct sk_buff *skb;
313 __le16 opcode;
314
315 switch (event) {
316 case HCI_DEV_REG:
317 skb = bt_skb_alloc(HCI_MON_NEW_INDEX_SIZE, GFP_ATOMIC);
318 if (!skb)
319 return NULL;
320
321 ni = (void *)skb_put(skb, HCI_MON_NEW_INDEX_SIZE);
322 ni->type = hdev->dev_type;
323 ni->bus = hdev->bus;
324 bacpy(&ni->bdaddr, &hdev->bdaddr);
325 memcpy(ni->name, hdev->name, 8);
326
327 opcode = cpu_to_le16(HCI_MON_NEW_INDEX);
328 break;
329
330 case HCI_DEV_UNREG:
331 skb = bt_skb_alloc(0, GFP_ATOMIC);
332 if (!skb)
333 return NULL;
334
335 opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
336 break;
337
338 case HCI_DEV_SETUP:
339 if (hdev->manufacturer == 0xffff)
340 return NULL;
341
342 /* fall through */
343
344 case HCI_DEV_UP:
345 skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
346 if (!skb)
347 return NULL;
348
349 ii = (void *)skb_put(skb, HCI_MON_INDEX_INFO_SIZE);
350 bacpy(&ii->bdaddr, &hdev->bdaddr);
351 ii->manufacturer = cpu_to_le16(hdev->manufacturer);
352
353 opcode = cpu_to_le16(HCI_MON_INDEX_INFO);
354 break;
355
356 case HCI_DEV_OPEN:
357 skb = bt_skb_alloc(0, GFP_ATOMIC);
358 if (!skb)
359 return NULL;
360
361 opcode = cpu_to_le16(HCI_MON_OPEN_INDEX);
362 break;
363
364 case HCI_DEV_CLOSE:
365 skb = bt_skb_alloc(0, GFP_ATOMIC);
366 if (!skb)
367 return NULL;
368
369 opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX);
370 break;
371
372 default:
373 return NULL;
374 }
375
376 __net_timestamp(skb);
377
378 hdr = (void *) skb_push(skb, HCI_MON_HDR_SIZE);
379 hdr->opcode = opcode;
380 hdr->index = cpu_to_le16(hdev->id);
381 hdr->len = cpu_to_le16(skb->len - HCI_MON_HDR_SIZE);
382
383 return skb;
384}
385
386static void send_monitor_replay(struct sock *sk)
387{
388 struct hci_dev *hdev;
389
390 read_lock(&hci_dev_list_lock);
391
392 list_for_each_entry(hdev, &hci_dev_list, list) {
393 struct sk_buff *skb;
394
395 skb = create_monitor_event(hdev, HCI_DEV_REG);
396 if (!skb)
397 continue;
398
399 if (sock_queue_rcv_skb(sk, skb))
400 kfree_skb(skb);
401
402 if (!test_bit(HCI_RUNNING, &hdev->flags))
403 continue;
404
405 skb = create_monitor_event(hdev, HCI_DEV_OPEN);
406 if (!skb)
407 continue;
408
409 if (sock_queue_rcv_skb(sk, skb))
410 kfree_skb(skb);
411
412 if (test_bit(HCI_UP, &hdev->flags))
413 skb = create_monitor_event(hdev, HCI_DEV_UP);
414 else if (hci_dev_test_flag(hdev, HCI_SETUP))
415 skb = create_monitor_event(hdev, HCI_DEV_SETUP);
416 else
417 skb = NULL;
418
419 if (skb) {
420 if (sock_queue_rcv_skb(sk, skb))
421 kfree_skb(skb);
422 }
423 }
424
425 read_unlock(&hci_dev_list_lock);
426}
427
428/* Generate internal stack event */
429static void hci_si_event(struct hci_dev *hdev, int type, int dlen, void *data)
430{
431 struct hci_event_hdr *hdr;
432 struct hci_ev_stack_internal *ev;
433 struct sk_buff *skb;
434
435 skb = bt_skb_alloc(HCI_EVENT_HDR_SIZE + sizeof(*ev) + dlen, GFP_ATOMIC);
436 if (!skb)
437 return;
438
439 hdr = (void *) skb_put(skb, HCI_EVENT_HDR_SIZE);
440 hdr->evt = HCI_EV_STACK_INTERNAL;
441 hdr->plen = sizeof(*ev) + dlen;
442
443 ev = (void *) skb_put(skb, sizeof(*ev) + dlen);
444 ev->type = type;
445 memcpy(ev->data, data, dlen);
446
447 bt_cb(skb)->incoming = 1;
448 __net_timestamp(skb);
449
450 bt_cb(skb)->pkt_type = HCI_EVENT_PKT;
451 hci_send_to_sock(hdev, skb);
452 kfree_skb(skb);
453}
454
455void hci_sock_dev_event(struct hci_dev *hdev, int event)
456{
457 BT_DBG("hdev %s event %d", hdev->name, event);
458
459 if (atomic_read(&monitor_promisc)) {
460 struct sk_buff *skb;
461
462 /* Send event to monitor */
463 skb = create_monitor_event(hdev, event);
464 if (skb) {
465 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb,
466 HCI_SOCK_TRUSTED, NULL);
467 kfree_skb(skb);
468 }
469 }
470
471 if (event <= HCI_DEV_DOWN) {
472 struct hci_ev_si_device ev;
473
474 /* Send event to sockets */
475 ev.event = event;
476 ev.dev_id = hdev->id;
477 hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev);
478 }
479
480 if (event == HCI_DEV_UNREG) {
481 struct sock *sk;
482
483 /* Detach sockets from device */
484 read_lock(&hci_sk_list.lock);
485 sk_for_each(sk, &hci_sk_list.head) {
486 bh_lock_sock_nested(sk);
487 if (hci_pi(sk)->hdev == hdev) {
488 hci_pi(sk)->hdev = NULL;
489 sk->sk_err = EPIPE;
490 sk->sk_state = BT_OPEN;
491 sk->sk_state_change(sk);
492
493 hci_dev_put(hdev);
494 }
495 bh_unlock_sock(sk);
496 }
497 read_unlock(&hci_sk_list.lock);
498 }
499}
500
501static struct hci_mgmt_chan *__hci_mgmt_chan_find(unsigned short channel)
502{
503 struct hci_mgmt_chan *c;
504
505 list_for_each_entry(c, &mgmt_chan_list, list) {
506 if (c->channel == channel)
507 return c;
508 }
509
510 return NULL;
511}
512
513static struct hci_mgmt_chan *hci_mgmt_chan_find(unsigned short channel)
514{
515 struct hci_mgmt_chan *c;
516
517 mutex_lock(&mgmt_chan_list_lock);
518 c = __hci_mgmt_chan_find(channel);
519 mutex_unlock(&mgmt_chan_list_lock);
520
521 return c;
522}
523
524int hci_mgmt_chan_register(struct hci_mgmt_chan *c)
525{
526 if (c->channel < HCI_CHANNEL_CONTROL)
527 return -EINVAL;
528
529 mutex_lock(&mgmt_chan_list_lock);
530 if (__hci_mgmt_chan_find(c->channel)) {
531 mutex_unlock(&mgmt_chan_list_lock);
532 return -EALREADY;
533 }
534
535 list_add_tail(&c->list, &mgmt_chan_list);
536
537 mutex_unlock(&mgmt_chan_list_lock);
538
539 return 0;
540}
541EXPORT_SYMBOL(hci_mgmt_chan_register);
542
543void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c)
544{
545 mutex_lock(&mgmt_chan_list_lock);
546 list_del(&c->list);
547 mutex_unlock(&mgmt_chan_list_lock);
548}
549EXPORT_SYMBOL(hci_mgmt_chan_unregister);
550
551static int hci_sock_release(struct socket *sock)
552{
553 struct sock *sk = sock->sk;
554 struct hci_dev *hdev;
555
556 BT_DBG("sock %p sk %p", sock, sk);
557
558 if (!sk)
559 return 0;
560
561 hdev = hci_pi(sk)->hdev;
562
563 if (hci_pi(sk)->channel == HCI_CHANNEL_MONITOR)
564 atomic_dec(&monitor_promisc);
565
566 bt_sock_unlink(&hci_sk_list, sk);
567
568 if (hdev) {
569 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
570 /* When releasing an user channel exclusive access,
571 * call hci_dev_do_close directly instead of calling
572 * hci_dev_close to ensure the exclusive access will
573 * be released and the controller brought back down.
574 *
575 * The checking of HCI_AUTO_OFF is not needed in this
576 * case since it will have been cleared already when
577 * opening the user channel.
578 */
579 hci_dev_do_close(hdev);
580 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
581 mgmt_index_added(hdev);
582 }
583
584 atomic_dec(&hdev->promisc);
585 hci_dev_put(hdev);
586 }
587
588 sock_orphan(sk);
589
590 skb_queue_purge(&sk->sk_receive_queue);
591 skb_queue_purge(&sk->sk_write_queue);
592
593 sock_put(sk);
594 return 0;
595}
596
597static int hci_sock_blacklist_add(struct hci_dev *hdev, void __user *arg)
598{
599 bdaddr_t bdaddr;
600 int err;
601
602 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
603 return -EFAULT;
604
605 hci_dev_lock(hdev);
606
607 err = hci_bdaddr_list_add(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
608
609 hci_dev_unlock(hdev);
610
611 return err;
612}
613
614static int hci_sock_blacklist_del(struct hci_dev *hdev, void __user *arg)
615{
616 bdaddr_t bdaddr;
617 int err;
618
619 if (copy_from_user(&bdaddr, arg, sizeof(bdaddr)))
620 return -EFAULT;
621
622 hci_dev_lock(hdev);
623
624 err = hci_bdaddr_list_del(&hdev->blacklist, &bdaddr, BDADDR_BREDR);
625
626 hci_dev_unlock(hdev);
627
628 return err;
629}
630
631/* Ioctls that require bound socket */
632static int hci_sock_bound_ioctl(struct sock *sk, unsigned int cmd,
633 unsigned long arg)
634{
635 struct hci_dev *hdev = hci_pi(sk)->hdev;
636
637 if (!hdev)
638 return -EBADFD;
639
640 if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
641 return -EBUSY;
642
643 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED))
644 return -EOPNOTSUPP;
645
646 if (hdev->dev_type != HCI_BREDR)
647 return -EOPNOTSUPP;
648
649 switch (cmd) {
650 case HCISETRAW:
651 if (!capable(CAP_NET_ADMIN))
652 return -EPERM;
653 return -EOPNOTSUPP;
654
655 case HCIGETCONNINFO:
656 return hci_get_conn_info(hdev, (void __user *) arg);
657
658 case HCIGETAUTHINFO:
659 return hci_get_auth_info(hdev, (void __user *) arg);
660
661 case HCIBLOCKADDR:
662 if (!capable(CAP_NET_ADMIN))
663 return -EPERM;
664 return hci_sock_blacklist_add(hdev, (void __user *) arg);
665
666 case HCIUNBLOCKADDR:
667 if (!capable(CAP_NET_ADMIN))
668 return -EPERM;
669 return hci_sock_blacklist_del(hdev, (void __user *) arg);
670 }
671
672 return -ENOIOCTLCMD;
673}
674
675static int hci_sock_ioctl(struct socket *sock, unsigned int cmd,
676 unsigned long arg)
677{
678 void __user *argp = (void __user *) arg;
679 struct sock *sk = sock->sk;
680 int err;
681
682 BT_DBG("cmd %x arg %lx", cmd, arg);
683
684 lock_sock(sk);
685
686 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
687 err = -EBADFD;
688 goto done;
689 }
690
691 release_sock(sk);
692
693 switch (cmd) {
694 case HCIGETDEVLIST:
695 return hci_get_dev_list(argp);
696
697 case HCIGETDEVINFO:
698 return hci_get_dev_info(argp);
699
700 case HCIGETCONNLIST:
701 return hci_get_conn_list(argp);
702
703 case HCIDEVUP:
704 if (!capable(CAP_NET_ADMIN))
705 return -EPERM;
706 return hci_dev_open(arg);
707
708 case HCIDEVDOWN:
709 if (!capable(CAP_NET_ADMIN))
710 return -EPERM;
711 return hci_dev_close(arg);
712
713 case HCIDEVRESET:
714 if (!capable(CAP_NET_ADMIN))
715 return -EPERM;
716 return hci_dev_reset(arg);
717
718 case HCIDEVRESTAT:
719 if (!capable(CAP_NET_ADMIN))
720 return -EPERM;
721 return hci_dev_reset_stat(arg);
722
723 case HCISETSCAN:
724 case HCISETAUTH:
725 case HCISETENCRYPT:
726 case HCISETPTYPE:
727 case HCISETLINKPOL:
728 case HCISETLINKMODE:
729 case HCISETACLMTU:
730 case HCISETSCOMTU:
731 if (!capable(CAP_NET_ADMIN))
732 return -EPERM;
733 return hci_dev_cmd(cmd, argp);
734
735 case HCIINQUIRY:
736 return hci_inquiry(argp);
737 }
738
739 lock_sock(sk);
740
741 err = hci_sock_bound_ioctl(sk, cmd, arg);
742
743done:
744 release_sock(sk);
745 return err;
746}
747
748static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,
749 int addr_len)
750{
751 struct sockaddr_hci haddr;
752 struct sock *sk = sock->sk;
753 struct hci_dev *hdev = NULL;
754 int len, err = 0;
755
756 BT_DBG("sock %p sk %p", sock, sk);
757
758 if (!addr)
759 return -EINVAL;
760
761 memset(&haddr, 0, sizeof(haddr));
762 len = min_t(unsigned int, sizeof(haddr), addr_len);
763 memcpy(&haddr, addr, len);
764
765 if (haddr.hci_family != AF_BLUETOOTH)
766 return -EINVAL;
767
768 lock_sock(sk);
769
770 if (sk->sk_state == BT_BOUND) {
771 err = -EALREADY;
772 goto done;
773 }
774
775 switch (haddr.hci_channel) {
776 case HCI_CHANNEL_RAW:
777 if (hci_pi(sk)->hdev) {
778 err = -EALREADY;
779 goto done;
780 }
781
782 if (haddr.hci_dev != HCI_DEV_NONE) {
783 hdev = hci_dev_get(haddr.hci_dev);
784 if (!hdev) {
785 err = -ENODEV;
786 goto done;
787 }
788
789 atomic_inc(&hdev->promisc);
790 }
791
792 hci_pi(sk)->hdev = hdev;
793 break;
794
795 case HCI_CHANNEL_USER:
796 if (hci_pi(sk)->hdev) {
797 err = -EALREADY;
798 goto done;
799 }
800
801 if (haddr.hci_dev == HCI_DEV_NONE) {
802 err = -EINVAL;
803 goto done;
804 }
805
806 if (!capable(CAP_NET_ADMIN)) {
807 err = -EPERM;
808 goto done;
809 }
810
811 hdev = hci_dev_get(haddr.hci_dev);
812 if (!hdev) {
813 err = -ENODEV;
814 goto done;
815 }
816
817 if (test_bit(HCI_INIT, &hdev->flags) ||
818 hci_dev_test_flag(hdev, HCI_SETUP) ||
819 hci_dev_test_flag(hdev, HCI_CONFIG) ||
820 (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) &&
821 test_bit(HCI_UP, &hdev->flags))) {
822 err = -EBUSY;
823 hci_dev_put(hdev);
824 goto done;
825 }
826
827 if (hci_dev_test_and_set_flag(hdev, HCI_USER_CHANNEL)) {
828 err = -EUSERS;
829 hci_dev_put(hdev);
830 goto done;
831 }
832
833 mgmt_index_removed(hdev);
834
835 err = hci_dev_open(hdev->id);
836 if (err) {
837 if (err == -EALREADY) {
838 /* In case the transport is already up and
839 * running, clear the error here.
840 *
841 * This can happen when opening an user
842 * channel and HCI_AUTO_OFF grace period
843 * is still active.
844 */
845 err = 0;
846 } else {
847 hci_dev_clear_flag(hdev, HCI_USER_CHANNEL);
848 mgmt_index_added(hdev);
849 hci_dev_put(hdev);
850 goto done;
851 }
852 }
853
854 atomic_inc(&hdev->promisc);
855
856 hci_pi(sk)->hdev = hdev;
857 break;
858
859 case HCI_CHANNEL_MONITOR:
860 if (haddr.hci_dev != HCI_DEV_NONE) {
861 err = -EINVAL;
862 goto done;
863 }
864
865 if (!capable(CAP_NET_RAW)) {
866 err = -EPERM;
867 goto done;
868 }
869
870 /* The monitor interface is restricted to CAP_NET_RAW
871 * capabilities and with that implicitly trusted.
872 */
873 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
874
875 send_monitor_replay(sk);
876
877 atomic_inc(&monitor_promisc);
878 break;
879
880 default:
881 if (!hci_mgmt_chan_find(haddr.hci_channel)) {
882 err = -EINVAL;
883 goto done;
884 }
885
886 if (haddr.hci_dev != HCI_DEV_NONE) {
887 err = -EINVAL;
888 goto done;
889 }
890
891 /* Users with CAP_NET_ADMIN capabilities are allowed
892 * access to all management commands and events. For
893 * untrusted users the interface is restricted and
894 * also only untrusted events are sent.
895 */
896 if (capable(CAP_NET_ADMIN))
897 hci_sock_set_flag(sk, HCI_SOCK_TRUSTED);
898
899 /* At the moment the index and unconfigured index events
900 * are enabled unconditionally. Setting them on each
901 * socket when binding keeps this functionality. They
902 * however might be cleared later and then sending of these
903 * events will be disabled, but that is then intentional.
904 *
905 * This also enables generic events that are safe to be
906 * received by untrusted users. Example for such events
907 * are changes to settings, class of device, name etc.
908 */
909 if (haddr.hci_channel == HCI_CHANNEL_CONTROL) {
910 hci_sock_set_flag(sk, HCI_MGMT_INDEX_EVENTS);
911 hci_sock_set_flag(sk, HCI_MGMT_UNCONF_INDEX_EVENTS);
912 hci_sock_set_flag(sk, HCI_MGMT_GENERIC_EVENTS);
913 }
914 break;
915 }
916
917
918 hci_pi(sk)->channel = haddr.hci_channel;
919 sk->sk_state = BT_BOUND;
920
921done:
922 release_sock(sk);
923 return err;
924}
925
926static int hci_sock_getname(struct socket *sock, struct sockaddr *addr,
927 int *addr_len, int peer)
928{
929 struct sockaddr_hci *haddr = (struct sockaddr_hci *) addr;
930 struct sock *sk = sock->sk;
931 struct hci_dev *hdev;
932 int err = 0;
933
934 BT_DBG("sock %p sk %p", sock, sk);
935
936 if (peer)
937 return -EOPNOTSUPP;
938
939 lock_sock(sk);
940
941 hdev = hci_pi(sk)->hdev;
942 if (!hdev) {
943 err = -EBADFD;
944 goto done;
945 }
946
947 *addr_len = sizeof(*haddr);
948 haddr->hci_family = AF_BLUETOOTH;
949 haddr->hci_dev = hdev->id;
950 haddr->hci_channel= hci_pi(sk)->channel;
951
952done:
953 release_sock(sk);
954 return err;
955}
956
957static void hci_sock_cmsg(struct sock *sk, struct msghdr *msg,
958 struct sk_buff *skb)
959{
960 __u32 mask = hci_pi(sk)->cmsg_mask;
961
962 if (mask & HCI_CMSG_DIR) {
963 int incoming = bt_cb(skb)->incoming;
964 put_cmsg(msg, SOL_HCI, HCI_CMSG_DIR, sizeof(incoming),
965 &incoming);
966 }
967
968 if (mask & HCI_CMSG_TSTAMP) {
969#ifdef CONFIG_COMPAT
970 struct compat_timeval ctv;
971#endif
972 struct timeval tv;
973 void *data;
974 int len;
975
976 skb_get_timestamp(skb, &tv);
977
978 data = &tv;
979 len = sizeof(tv);
980#ifdef CONFIG_COMPAT
981 if (!COMPAT_USE_64BIT_TIME &&
982 (msg->msg_flags & MSG_CMSG_COMPAT)) {
983 ctv.tv_sec = tv.tv_sec;
984 ctv.tv_usec = tv.tv_usec;
985 data = &ctv;
986 len = sizeof(ctv);
987 }
988#endif
989
990 put_cmsg(msg, SOL_HCI, HCI_CMSG_TSTAMP, len, data);
991 }
992}
993
994static int hci_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
995 int flags)
996{
997 int noblock = flags & MSG_DONTWAIT;
998 struct sock *sk = sock->sk;
999 struct sk_buff *skb;
1000 int copied, err;
1001
1002 BT_DBG("sock %p, sk %p", sock, sk);
1003
1004 if (flags & MSG_OOB)
1005 return -EOPNOTSUPP;
1006
1007 if (sk->sk_state == BT_CLOSED)
1008 return 0;
1009
1010 skb = skb_recv_datagram(sk, flags, noblock, &err);
1011 if (!skb)
1012 return err;
1013
1014 copied = skb->len;
1015 if (len < copied) {
1016 msg->msg_flags |= MSG_TRUNC;
1017 copied = len;
1018 }
1019
1020 skb_reset_transport_header(skb);
1021 err = skb_copy_datagram_msg(skb, 0, msg, copied);
1022
1023 switch (hci_pi(sk)->channel) {
1024 case HCI_CHANNEL_RAW:
1025 hci_sock_cmsg(sk, msg, skb);
1026 break;
1027 case HCI_CHANNEL_USER:
1028 case HCI_CHANNEL_MONITOR:
1029 sock_recv_timestamp(msg, sk, skb);
1030 break;
1031 default:
1032 if (hci_mgmt_chan_find(hci_pi(sk)->channel))
1033 sock_recv_timestamp(msg, sk, skb);
1034 break;
1035 }
1036
1037 skb_free_datagram(sk, skb);
1038
1039 return err ? : copied;
1040}
1041
1042static int hci_mgmt_cmd(struct hci_mgmt_chan *chan, struct sock *sk,
1043 struct msghdr *msg, size_t msglen)
1044{
1045 void *buf;
1046 u8 *cp;
1047 struct mgmt_hdr *hdr;
1048 u16 opcode, index, len;
1049 struct hci_dev *hdev = NULL;
1050 const struct hci_mgmt_handler *handler;
1051 bool var_len, no_hdev;
1052 int err;
1053
1054 BT_DBG("got %zu bytes", msglen);
1055
1056 if (msglen < sizeof(*hdr))
1057 return -EINVAL;
1058
1059 buf = kmalloc(msglen, GFP_KERNEL);
1060 if (!buf)
1061 return -ENOMEM;
1062
1063 if (memcpy_from_msg(buf, msg, msglen)) {
1064 err = -EFAULT;
1065 goto done;
1066 }
1067
1068 hdr = buf;
1069 opcode = __le16_to_cpu(hdr->opcode);
1070 index = __le16_to_cpu(hdr->index);
1071 len = __le16_to_cpu(hdr->len);
1072
1073 if (len != msglen - sizeof(*hdr)) {
1074 err = -EINVAL;
1075 goto done;
1076 }
1077
1078 if (opcode >= chan->handler_count ||
1079 chan->handlers[opcode].func == NULL) {
1080 BT_DBG("Unknown op %u", opcode);
1081 err = mgmt_cmd_status(sk, index, opcode,
1082 MGMT_STATUS_UNKNOWN_COMMAND);
1083 goto done;
1084 }
1085
1086 handler = &chan->handlers[opcode];
1087
1088 if (!hci_sock_test_flag(sk, HCI_SOCK_TRUSTED) &&
1089 !(handler->flags & HCI_MGMT_UNTRUSTED)) {
1090 err = mgmt_cmd_status(sk, index, opcode,
1091 MGMT_STATUS_PERMISSION_DENIED);
1092 goto done;
1093 }
1094
1095 if (index != MGMT_INDEX_NONE) {
1096 hdev = hci_dev_get(index);
1097 if (!hdev) {
1098 err = mgmt_cmd_status(sk, index, opcode,
1099 MGMT_STATUS_INVALID_INDEX);
1100 goto done;
1101 }
1102
1103 if (hci_dev_test_flag(hdev, HCI_SETUP) ||
1104 hci_dev_test_flag(hdev, HCI_CONFIG) ||
1105 hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
1106 err = mgmt_cmd_status(sk, index, opcode,
1107 MGMT_STATUS_INVALID_INDEX);
1108 goto done;
1109 }
1110
1111 if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
1112 !(handler->flags & HCI_MGMT_UNCONFIGURED)) {
1113 err = mgmt_cmd_status(sk, index, opcode,
1114 MGMT_STATUS_INVALID_INDEX);
1115 goto done;
1116 }
1117 }
1118
1119 no_hdev = (handler->flags & HCI_MGMT_NO_HDEV);
1120 if (no_hdev != !hdev) {
1121 err = mgmt_cmd_status(sk, index, opcode,
1122 MGMT_STATUS_INVALID_INDEX);
1123 goto done;
1124 }
1125
1126 var_len = (handler->flags & HCI_MGMT_VAR_LEN);
1127 if ((var_len && len < handler->data_len) ||
1128 (!var_len && len != handler->data_len)) {
1129 err = mgmt_cmd_status(sk, index, opcode,
1130 MGMT_STATUS_INVALID_PARAMS);
1131 goto done;
1132 }
1133
1134 if (hdev && chan->hdev_init)
1135 chan->hdev_init(sk, hdev);
1136
1137 cp = buf + sizeof(*hdr);
1138
1139 err = handler->func(sk, hdev, cp, len);
1140 if (err < 0)
1141 goto done;
1142
1143 err = msglen;
1144
1145done:
1146 if (hdev)
1147 hci_dev_put(hdev);
1148
1149 kfree(buf);
1150 return err;
1151}
1152
1153static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
1154 size_t len)
1155{
1156 struct sock *sk = sock->sk;
1157 struct hci_mgmt_chan *chan;
1158 struct hci_dev *hdev;
1159 struct sk_buff *skb;
1160 int err;
1161
1162 BT_DBG("sock %p sk %p", sock, sk);
1163
1164 if (msg->msg_flags & MSG_OOB)
1165 return -EOPNOTSUPP;
1166
1167 if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_NOSIGNAL|MSG_ERRQUEUE|
1168 MSG_CMSG_COMPAT))
1169 return -EINVAL;
1170
1171 if (len < 4 || len > HCI_MAX_FRAME_SIZE)
1172 return -EINVAL;
1173
1174 lock_sock(sk);
1175
1176 switch (hci_pi(sk)->channel) {
1177 case HCI_CHANNEL_RAW:
1178 case HCI_CHANNEL_USER:
1179 break;
1180 case HCI_CHANNEL_MONITOR:
1181 err = -EOPNOTSUPP;
1182 goto done;
1183 default:
1184 mutex_lock(&mgmt_chan_list_lock);
1185 chan = __hci_mgmt_chan_find(hci_pi(sk)->channel);
1186 if (chan)
1187 err = hci_mgmt_cmd(chan, sk, msg, len);
1188 else
1189 err = -EINVAL;
1190
1191 mutex_unlock(&mgmt_chan_list_lock);
1192 goto done;
1193 }
1194
1195 hdev = hci_pi(sk)->hdev;
1196 if (!hdev) {
1197 err = -EBADFD;
1198 goto done;
1199 }
1200
1201 if (!test_bit(HCI_UP, &hdev->flags)) {
1202 err = -ENETDOWN;
1203 goto done;
1204 }
1205
1206 skb = bt_skb_send_alloc(sk, len, msg->msg_flags & MSG_DONTWAIT, &err);
1207 if (!skb)
1208 goto done;
1209
1210 if (memcpy_from_msg(skb_put(skb, len), msg, len)) {
1211 err = -EFAULT;
1212 goto drop;
1213 }
1214
1215 bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
1216 skb_pull(skb, 1);
1217
1218 if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
1219 /* No permission check is needed for user channel
1220 * since that gets enforced when binding the socket.
1221 *
1222 * However check that the packet type is valid.
1223 */
1224 if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
1225 bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
1226 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
1227 err = -EINVAL;
1228 goto drop;
1229 }
1230
1231 skb_queue_tail(&hdev->raw_q, skb);
1232 queue_work(hdev->workqueue, &hdev->tx_work);
1233 } else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
1234 u16 opcode = get_unaligned_le16(skb->data);
1235 u16 ogf = hci_opcode_ogf(opcode);
1236 u16 ocf = hci_opcode_ocf(opcode);
1237
1238 if (((ogf > HCI_SFLT_MAX_OGF) ||
1239 !hci_test_bit(ocf & HCI_FLT_OCF_BITS,
1240 &hci_sec_filter.ocf_mask[ogf])) &&
1241 !capable(CAP_NET_RAW)) {
1242 err = -EPERM;
1243 goto drop;
1244 }
1245
1246 if (ogf == 0x3f) {
1247 skb_queue_tail(&hdev->raw_q, skb);
1248 queue_work(hdev->workqueue, &hdev->tx_work);
1249 } else {
1250 /* Stand-alone HCI commands must be flagged as
1251 * single-command requests.
1252 */
1253 bt_cb(skb)->hci.req_start = true;
1254
1255 skb_queue_tail(&hdev->cmd_q, skb);
1256 queue_work(hdev->workqueue, &hdev->cmd_work);
1257 }
1258 } else {
1259 if (!capable(CAP_NET_RAW)) {
1260 err = -EPERM;
1261 goto drop;
1262 }
1263
1264 if (bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
1265 bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
1266 err = -EINVAL;
1267 goto drop;
1268 }
1269
1270 skb_queue_tail(&hdev->raw_q, skb);
1271 queue_work(hdev->workqueue, &hdev->tx_work);
1272 }
1273
1274 err = len;
1275
1276done:
1277 release_sock(sk);
1278 return err;
1279
1280drop:
1281 kfree_skb(skb);
1282 goto done;
1283}
1284
1285static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
1286 char __user *optval, unsigned int len)
1287{
1288 struct hci_ufilter uf = { .opcode = 0 };
1289 struct sock *sk = sock->sk;
1290 int err = 0, opt = 0;
1291
1292 BT_DBG("sk %p, opt %d", sk, optname);
1293
1294 lock_sock(sk);
1295
1296 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1297 err = -EBADFD;
1298 goto done;
1299 }
1300
1301 switch (optname) {
1302 case HCI_DATA_DIR:
1303 if (get_user(opt, (int __user *)optval)) {
1304 err = -EFAULT;
1305 break;
1306 }
1307
1308 if (opt)
1309 hci_pi(sk)->cmsg_mask |= HCI_CMSG_DIR;
1310 else
1311 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_DIR;
1312 break;
1313
1314 case HCI_TIME_STAMP:
1315 if (get_user(opt, (int __user *)optval)) {
1316 err = -EFAULT;
1317 break;
1318 }
1319
1320 if (opt)
1321 hci_pi(sk)->cmsg_mask |= HCI_CMSG_TSTAMP;
1322 else
1323 hci_pi(sk)->cmsg_mask &= ~HCI_CMSG_TSTAMP;
1324 break;
1325
1326 case HCI_FILTER:
1327 {
1328 struct hci_filter *f = &hci_pi(sk)->filter;
1329
1330 uf.type_mask = f->type_mask;
1331 uf.opcode = f->opcode;
1332 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1333 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1334 }
1335
1336 len = min_t(unsigned int, len, sizeof(uf));
1337 if (copy_from_user(&uf, optval, len)) {
1338 err = -EFAULT;
1339 break;
1340 }
1341
1342 if (!capable(CAP_NET_RAW)) {
1343 uf.type_mask &= hci_sec_filter.type_mask;
1344 uf.event_mask[0] &= *((u32 *) hci_sec_filter.event_mask + 0);
1345 uf.event_mask[1] &= *((u32 *) hci_sec_filter.event_mask + 1);
1346 }
1347
1348 {
1349 struct hci_filter *f = &hci_pi(sk)->filter;
1350
1351 f->type_mask = uf.type_mask;
1352 f->opcode = uf.opcode;
1353 *((u32 *) f->event_mask + 0) = uf.event_mask[0];
1354 *((u32 *) f->event_mask + 1) = uf.event_mask[1];
1355 }
1356 break;
1357
1358 default:
1359 err = -ENOPROTOOPT;
1360 break;
1361 }
1362
1363done:
1364 release_sock(sk);
1365 return err;
1366}
1367
1368static int hci_sock_getsockopt(struct socket *sock, int level, int optname,
1369 char __user *optval, int __user *optlen)
1370{
1371 struct hci_ufilter uf;
1372 struct sock *sk = sock->sk;
1373 int len, opt, err = 0;
1374
1375 BT_DBG("sk %p, opt %d", sk, optname);
1376
1377 if (get_user(len, optlen))
1378 return -EFAULT;
1379
1380 lock_sock(sk);
1381
1382 if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) {
1383 err = -EBADFD;
1384 goto done;
1385 }
1386
1387 switch (optname) {
1388 case HCI_DATA_DIR:
1389 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_DIR)
1390 opt = 1;
1391 else
1392 opt = 0;
1393
1394 if (put_user(opt, optval))
1395 err = -EFAULT;
1396 break;
1397
1398 case HCI_TIME_STAMP:
1399 if (hci_pi(sk)->cmsg_mask & HCI_CMSG_TSTAMP)
1400 opt = 1;
1401 else
1402 opt = 0;
1403
1404 if (put_user(opt, optval))
1405 err = -EFAULT;
1406 break;
1407
1408 case HCI_FILTER:
1409 {
1410 struct hci_filter *f = &hci_pi(sk)->filter;
1411
1412 memset(&uf, 0, sizeof(uf));
1413 uf.type_mask = f->type_mask;
1414 uf.opcode = f->opcode;
1415 uf.event_mask[0] = *((u32 *) f->event_mask + 0);
1416 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
1417 }
1418
1419 len = min_t(unsigned int, len, sizeof(uf));
1420 if (copy_to_user(optval, &uf, len))
1421 err = -EFAULT;
1422 break;
1423
1424 default:
1425 err = -ENOPROTOOPT;
1426 break;
1427 }
1428
1429done:
1430 release_sock(sk);
1431 return err;
1432}
1433
1434static const struct proto_ops hci_sock_ops = {
1435 .family = PF_BLUETOOTH,
1436 .owner = THIS_MODULE,
1437 .release = hci_sock_release,
1438 .bind = hci_sock_bind,
1439 .getname = hci_sock_getname,
1440 .sendmsg = hci_sock_sendmsg,
1441 .recvmsg = hci_sock_recvmsg,
1442 .ioctl = hci_sock_ioctl,
1443 .poll = datagram_poll,
1444 .listen = sock_no_listen,
1445 .shutdown = sock_no_shutdown,
1446 .setsockopt = hci_sock_setsockopt,
1447 .getsockopt = hci_sock_getsockopt,
1448 .connect = sock_no_connect,
1449 .socketpair = sock_no_socketpair,
1450 .accept = sock_no_accept,
1451 .mmap = sock_no_mmap
1452};
1453
1454static struct proto hci_sk_proto = {
1455 .name = "HCI",
1456 .owner = THIS_MODULE,
1457 .obj_size = sizeof(struct hci_pinfo)
1458};
1459
1460static int hci_sock_create(struct net *net, struct socket *sock, int protocol,
1461 int kern)
1462{
1463 struct sock *sk;
1464
1465 BT_DBG("sock %p", sock);
1466
1467 if (sock->type != SOCK_RAW)
1468 return -ESOCKTNOSUPPORT;
1469
1470 sock->ops = &hci_sock_ops;
1471
1472 sk = sk_alloc(net, PF_BLUETOOTH, GFP_ATOMIC, &hci_sk_proto, kern);
1473 if (!sk)
1474 return -ENOMEM;
1475
1476 sock_init_data(sock, sk);
1477
1478 sock_reset_flag(sk, SOCK_ZAPPED);
1479
1480 sk->sk_protocol = protocol;
1481
1482 sock->state = SS_UNCONNECTED;
1483 sk->sk_state = BT_OPEN;
1484
1485 bt_sock_link(&hci_sk_list, sk);
1486 return 0;
1487}
1488
1489static const struct net_proto_family hci_sock_family_ops = {
1490 .family = PF_BLUETOOTH,
1491 .owner = THIS_MODULE,
1492 .create = hci_sock_create,
1493};
1494
1495int __init hci_sock_init(void)
1496{
1497 int err;
1498
1499 BUILD_BUG_ON(sizeof(struct sockaddr_hci) > sizeof(struct sockaddr));
1500
1501 err = proto_register(&hci_sk_proto, 0);
1502 if (err < 0)
1503 return err;
1504
1505 err = bt_sock_register(BTPROTO_HCI, &hci_sock_family_ops);
1506 if (err < 0) {
1507 BT_ERR("HCI socket registration failed");
1508 goto error;
1509 }
1510
1511 err = bt_procfs_init(&init_net, "hci", &hci_sk_list, NULL);
1512 if (err < 0) {
1513 BT_ERR("Failed to create HCI proc file");
1514 bt_sock_unregister(BTPROTO_HCI);
1515 goto error;
1516 }
1517
1518 BT_INFO("HCI socket layer initialized");
1519
1520 return 0;
1521
1522error:
1523 proto_unregister(&hci_sk_proto);
1524 return err;
1525}
1526
1527void hci_sock_cleanup(void)
1528{
1529 bt_procfs_cleanup(&init_net, "hci");
1530 bt_sock_unregister(BTPROTO_HCI);
1531 proto_unregister(&hci_sk_proto);
1532}