tpm: merge duplicate transmit_cmd() functions
[cascardo/linux.git] / net / bluetooth / hci_conn.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved.
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI connection handling. */
26
27 #include <linux/export.h>
28
29 #include <net/bluetooth/bluetooth.h>
30 #include <net/bluetooth/hci_core.h>
31 #include <net/bluetooth/l2cap.h>
32
33 #include "smp.h"
34 #include "a2mp.h"
35
36 struct sco_param {
37         u16 pkt_type;
38         u16 max_latency;
39         u8  retrans_effort;
40 };
41
42 static const struct sco_param esco_param_cvsd[] = {
43         { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a,   0x01 }, /* S3 */
44         { EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007,   0x01 }, /* S2 */
45         { EDR_ESCO_MASK | ESCO_EV3,   0x0007,   0x01 }, /* S1 */
46         { EDR_ESCO_MASK | ESCO_HV3,   0xffff,   0x01 }, /* D1 */
47         { EDR_ESCO_MASK | ESCO_HV1,   0xffff,   0x01 }, /* D0 */
48 };
49
50 static const struct sco_param sco_param_cvsd[] = {
51         { EDR_ESCO_MASK | ESCO_HV3,   0xffff,   0xff }, /* D1 */
52         { EDR_ESCO_MASK | ESCO_HV1,   0xffff,   0xff }, /* D0 */
53 };
54
55 static const struct sco_param esco_param_msbc[] = {
56         { EDR_ESCO_MASK & ~ESCO_2EV3, 0x000d,   0x02 }, /* T2 */
57         { EDR_ESCO_MASK | ESCO_EV3,   0x0008,   0x02 }, /* T1 */
58 };
59
60 static void hci_le_create_connection_cancel(struct hci_conn *conn)
61 {
62         hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
63 }
64
65 static void hci_acl_create_connection(struct hci_conn *conn)
66 {
67         struct hci_dev *hdev = conn->hdev;
68         struct inquiry_entry *ie;
69         struct hci_cp_create_conn cp;
70
71         BT_DBG("hcon %p", conn);
72
73         conn->state = BT_CONNECT;
74         conn->out = true;
75         conn->role = HCI_ROLE_MASTER;
76
77         conn->attempt++;
78
79         conn->link_policy = hdev->link_policy;
80
81         memset(&cp, 0, sizeof(cp));
82         bacpy(&cp.bdaddr, &conn->dst);
83         cp.pscan_rep_mode = 0x02;
84
85         ie = hci_inquiry_cache_lookup(hdev, &conn->dst);
86         if (ie) {
87                 if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) {
88                         cp.pscan_rep_mode = ie->data.pscan_rep_mode;
89                         cp.pscan_mode     = ie->data.pscan_mode;
90                         cp.clock_offset   = ie->data.clock_offset |
91                                             cpu_to_le16(0x8000);
92                 }
93
94                 memcpy(conn->dev_class, ie->data.dev_class, 3);
95                 if (ie->data.ssp_mode > 0)
96                         set_bit(HCI_CONN_SSP_ENABLED, &conn->flags);
97         }
98
99         cp.pkt_type = cpu_to_le16(conn->pkt_type);
100         if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER))
101                 cp.role_switch = 0x01;
102         else
103                 cp.role_switch = 0x00;
104
105         hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
106 }
107
108 static void hci_acl_create_connection_cancel(struct hci_conn *conn)
109 {
110         struct hci_cp_create_conn_cancel cp;
111
112         BT_DBG("hcon %p", conn);
113
114         if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2)
115                 return;
116
117         bacpy(&cp.bdaddr, &conn->dst);
118         hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp);
119 }
120
121 static void hci_reject_sco(struct hci_conn *conn)
122 {
123         struct hci_cp_reject_sync_conn_req cp;
124
125         cp.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
126         bacpy(&cp.bdaddr, &conn->dst);
127
128         hci_send_cmd(conn->hdev, HCI_OP_REJECT_SYNC_CONN_REQ, sizeof(cp), &cp);
129 }
130
131 int hci_disconnect(struct hci_conn *conn, __u8 reason)
132 {
133         struct hci_cp_disconnect cp;
134
135         BT_DBG("hcon %p", conn);
136
137         /* When we are master of an established connection and it enters
138          * the disconnect timeout, then go ahead and try to read the
139          * current clock offset.  Processing of the result is done
140          * within the event handling and hci_clock_offset_evt function.
141          */
142         if (conn->type == ACL_LINK && conn->role == HCI_ROLE_MASTER) {
143                 struct hci_dev *hdev = conn->hdev;
144                 struct hci_cp_read_clock_offset clkoff_cp;
145
146                 clkoff_cp.handle = cpu_to_le16(conn->handle);
147                 hci_send_cmd(hdev, HCI_OP_READ_CLOCK_OFFSET, sizeof(clkoff_cp),
148                              &clkoff_cp);
149         }
150
151         conn->state = BT_DISCONN;
152
153         cp.handle = cpu_to_le16(conn->handle);
154         cp.reason = reason;
155         return hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
156 }
157
158 static void hci_amp_disconn(struct hci_conn *conn)
159 {
160         struct hci_cp_disconn_phy_link cp;
161
162         BT_DBG("hcon %p", conn);
163
164         conn->state = BT_DISCONN;
165
166         cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
167         cp.reason = hci_proto_disconn_ind(conn);
168         hci_send_cmd(conn->hdev, HCI_OP_DISCONN_PHY_LINK,
169                      sizeof(cp), &cp);
170 }
171
172 static void hci_add_sco(struct hci_conn *conn, __u16 handle)
173 {
174         struct hci_dev *hdev = conn->hdev;
175         struct hci_cp_add_sco cp;
176
177         BT_DBG("hcon %p", conn);
178
179         conn->state = BT_CONNECT;
180         conn->out = true;
181
182         conn->attempt++;
183
184         cp.handle   = cpu_to_le16(handle);
185         cp.pkt_type = cpu_to_le16(conn->pkt_type);
186
187         hci_send_cmd(hdev, HCI_OP_ADD_SCO, sizeof(cp), &cp);
188 }
189
190 bool hci_setup_sync(struct hci_conn *conn, __u16 handle)
191 {
192         struct hci_dev *hdev = conn->hdev;
193         struct hci_cp_setup_sync_conn cp;
194         const struct sco_param *param;
195
196         BT_DBG("hcon %p", conn);
197
198         conn->state = BT_CONNECT;
199         conn->out = true;
200
201         conn->attempt++;
202
203         cp.handle   = cpu_to_le16(handle);
204
205         cp.tx_bandwidth   = cpu_to_le32(0x00001f40);
206         cp.rx_bandwidth   = cpu_to_le32(0x00001f40);
207         cp.voice_setting  = cpu_to_le16(conn->setting);
208
209         switch (conn->setting & SCO_AIRMODE_MASK) {
210         case SCO_AIRMODE_TRANSP:
211                 if (conn->attempt > ARRAY_SIZE(esco_param_msbc))
212                         return false;
213                 param = &esco_param_msbc[conn->attempt - 1];
214                 break;
215         case SCO_AIRMODE_CVSD:
216                 if (lmp_esco_capable(conn->link)) {
217                         if (conn->attempt > ARRAY_SIZE(esco_param_cvsd))
218                                 return false;
219                         param = &esco_param_cvsd[conn->attempt - 1];
220                 } else {
221                         if (conn->attempt > ARRAY_SIZE(sco_param_cvsd))
222                                 return false;
223                         param = &sco_param_cvsd[conn->attempt - 1];
224                 }
225                 break;
226         default:
227                 return false;
228         }
229
230         cp.retrans_effort = param->retrans_effort;
231         cp.pkt_type = __cpu_to_le16(param->pkt_type);
232         cp.max_latency = __cpu_to_le16(param->max_latency);
233
234         if (hci_send_cmd(hdev, HCI_OP_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0)
235                 return false;
236
237         return true;
238 }
239
240 u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
241                       u16 to_multiplier)
242 {
243         struct hci_dev *hdev = conn->hdev;
244         struct hci_conn_params *params;
245         struct hci_cp_le_conn_update cp;
246
247         hci_dev_lock(hdev);
248
249         params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
250         if (params) {
251                 params->conn_min_interval = min;
252                 params->conn_max_interval = max;
253                 params->conn_latency = latency;
254                 params->supervision_timeout = to_multiplier;
255         }
256
257         hci_dev_unlock(hdev);
258
259         memset(&cp, 0, sizeof(cp));
260         cp.handle               = cpu_to_le16(conn->handle);
261         cp.conn_interval_min    = cpu_to_le16(min);
262         cp.conn_interval_max    = cpu_to_le16(max);
263         cp.conn_latency         = cpu_to_le16(latency);
264         cp.supervision_timeout  = cpu_to_le16(to_multiplier);
265         cp.min_ce_len           = cpu_to_le16(0x0000);
266         cp.max_ce_len           = cpu_to_le16(0x0000);
267
268         hci_send_cmd(hdev, HCI_OP_LE_CONN_UPDATE, sizeof(cp), &cp);
269
270         if (params)
271                 return 0x01;
272
273         return 0x00;
274 }
275
276 void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand,
277                       __u8 ltk[16])
278 {
279         struct hci_dev *hdev = conn->hdev;
280         struct hci_cp_le_start_enc cp;
281
282         BT_DBG("hcon %p", conn);
283
284         memset(&cp, 0, sizeof(cp));
285
286         cp.handle = cpu_to_le16(conn->handle);
287         cp.rand = rand;
288         cp.ediv = ediv;
289         memcpy(cp.ltk, ltk, sizeof(cp.ltk));
290
291         hci_send_cmd(hdev, HCI_OP_LE_START_ENC, sizeof(cp), &cp);
292 }
293
294 /* Device _must_ be locked */
295 void hci_sco_setup(struct hci_conn *conn, __u8 status)
296 {
297         struct hci_conn *sco = conn->link;
298
299         if (!sco)
300                 return;
301
302         BT_DBG("hcon %p", conn);
303
304         if (!status) {
305                 if (lmp_esco_capable(conn->hdev))
306                         hci_setup_sync(sco, conn->handle);
307                 else
308                         hci_add_sco(sco, conn->handle);
309         } else {
310                 hci_proto_connect_cfm(sco, status);
311                 hci_conn_del(sco);
312         }
313 }
314
315 static void hci_conn_timeout(struct work_struct *work)
316 {
317         struct hci_conn *conn = container_of(work, struct hci_conn,
318                                              disc_work.work);
319         int refcnt = atomic_read(&conn->refcnt);
320
321         BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
322
323         WARN_ON(refcnt < 0);
324
325         /* FIXME: It was observed that in pairing failed scenario, refcnt
326          * drops below 0. Probably this is because l2cap_conn_del calls
327          * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is
328          * dropped. After that loop hci_chan_del is called which also drops
329          * conn. For now make sure that ACL is alive if refcnt is higher then 0,
330          * otherwise drop it.
331          */
332         if (refcnt > 0)
333                 return;
334
335         switch (conn->state) {
336         case BT_CONNECT:
337         case BT_CONNECT2:
338                 if (conn->out) {
339                         if (conn->type == ACL_LINK)
340                                 hci_acl_create_connection_cancel(conn);
341                         else if (conn->type == LE_LINK)
342                                 hci_le_create_connection_cancel(conn);
343                 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
344                         hci_reject_sco(conn);
345                 }
346                 break;
347         case BT_CONFIG:
348         case BT_CONNECTED:
349                 if (conn->type == AMP_LINK) {
350                         hci_amp_disconn(conn);
351                 } else {
352                         __u8 reason = hci_proto_disconn_ind(conn);
353                         hci_disconnect(conn, reason);
354                 }
355                 break;
356         default:
357                 conn->state = BT_CLOSED;
358                 break;
359         }
360 }
361
362 /* Enter sniff mode */
363 static void hci_conn_idle(struct work_struct *work)
364 {
365         struct hci_conn *conn = container_of(work, struct hci_conn,
366                                              idle_work.work);
367         struct hci_dev *hdev = conn->hdev;
368
369         BT_DBG("hcon %p mode %d", conn, conn->mode);
370
371         if (!lmp_sniff_capable(hdev) || !lmp_sniff_capable(conn))
372                 return;
373
374         if (conn->mode != HCI_CM_ACTIVE || !(conn->link_policy & HCI_LP_SNIFF))
375                 return;
376
377         if (lmp_sniffsubr_capable(hdev) && lmp_sniffsubr_capable(conn)) {
378                 struct hci_cp_sniff_subrate cp;
379                 cp.handle             = cpu_to_le16(conn->handle);
380                 cp.max_latency        = cpu_to_le16(0);
381                 cp.min_remote_timeout = cpu_to_le16(0);
382                 cp.min_local_timeout  = cpu_to_le16(0);
383                 hci_send_cmd(hdev, HCI_OP_SNIFF_SUBRATE, sizeof(cp), &cp);
384         }
385
386         if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
387                 struct hci_cp_sniff_mode cp;
388                 cp.handle       = cpu_to_le16(conn->handle);
389                 cp.max_interval = cpu_to_le16(hdev->sniff_max_interval);
390                 cp.min_interval = cpu_to_le16(hdev->sniff_min_interval);
391                 cp.attempt      = cpu_to_le16(4);
392                 cp.timeout      = cpu_to_le16(1);
393                 hci_send_cmd(hdev, HCI_OP_SNIFF_MODE, sizeof(cp), &cp);
394         }
395 }
396
397 static void hci_conn_auto_accept(struct work_struct *work)
398 {
399         struct hci_conn *conn = container_of(work, struct hci_conn,
400                                              auto_accept_work.work);
401
402         hci_send_cmd(conn->hdev, HCI_OP_USER_CONFIRM_REPLY, sizeof(conn->dst),
403                      &conn->dst);
404 }
405
406 static void le_conn_timeout(struct work_struct *work)
407 {
408         struct hci_conn *conn = container_of(work, struct hci_conn,
409                                              le_conn_timeout.work);
410         struct hci_dev *hdev = conn->hdev;
411
412         BT_DBG("");
413
414         /* We could end up here due to having done directed advertising,
415          * so clean up the state if necessary. This should however only
416          * happen with broken hardware or if low duty cycle was used
417          * (which doesn't have a timeout of its own).
418          */
419         if (conn->role == HCI_ROLE_SLAVE) {
420                 u8 enable = 0x00;
421                 hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
422                              &enable);
423                 hci_le_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
424                 return;
425         }
426
427         hci_le_create_connection_cancel(conn);
428 }
429
430 struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
431                               u8 role)
432 {
433         struct hci_conn *conn;
434
435         BT_DBG("%s dst %pMR", hdev->name, dst);
436
437         conn = kzalloc(sizeof(*conn), GFP_KERNEL);
438         if (!conn)
439                 return NULL;
440
441         bacpy(&conn->dst, dst);
442         bacpy(&conn->src, &hdev->bdaddr);
443         conn->hdev  = hdev;
444         conn->type  = type;
445         conn->role  = role;
446         conn->mode  = HCI_CM_ACTIVE;
447         conn->state = BT_OPEN;
448         conn->auth_type = HCI_AT_GENERAL_BONDING;
449         conn->io_capability = hdev->io_capability;
450         conn->remote_auth = 0xff;
451         conn->key_type = 0xff;
452         conn->rssi = HCI_RSSI_INVALID;
453         conn->tx_power = HCI_TX_POWER_INVALID;
454         conn->max_tx_power = HCI_TX_POWER_INVALID;
455
456         set_bit(HCI_CONN_POWER_SAVE, &conn->flags);
457         conn->disc_timeout = HCI_DISCONN_TIMEOUT;
458
459         if (conn->role == HCI_ROLE_MASTER)
460                 conn->out = true;
461
462         switch (type) {
463         case ACL_LINK:
464                 conn->pkt_type = hdev->pkt_type & ACL_PTYPE_MASK;
465                 break;
466         case LE_LINK:
467                 /* conn->src should reflect the local identity address */
468                 hci_copy_identity_address(hdev, &conn->src, &conn->src_type);
469                 break;
470         case SCO_LINK:
471                 if (lmp_esco_capable(hdev))
472                         conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) |
473                                         (hdev->esco_type & EDR_ESCO_MASK);
474                 else
475                         conn->pkt_type = hdev->pkt_type & SCO_PTYPE_MASK;
476                 break;
477         case ESCO_LINK:
478                 conn->pkt_type = hdev->esco_type & ~EDR_ESCO_MASK;
479                 break;
480         }
481
482         skb_queue_head_init(&conn->data_q);
483
484         INIT_LIST_HEAD(&conn->chan_list);
485
486         INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout);
487         INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
488         INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
489         INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
490
491         atomic_set(&conn->refcnt, 0);
492
493         hci_dev_hold(hdev);
494
495         hci_conn_hash_add(hdev, conn);
496         if (hdev->notify)
497                 hdev->notify(hdev, HCI_NOTIFY_CONN_ADD);
498
499         hci_conn_init_sysfs(conn);
500
501         return conn;
502 }
503
504 int hci_conn_del(struct hci_conn *conn)
505 {
506         struct hci_dev *hdev = conn->hdev;
507
508         BT_DBG("%s hcon %p handle %d", hdev->name, conn, conn->handle);
509
510         cancel_delayed_work_sync(&conn->disc_work);
511         cancel_delayed_work_sync(&conn->auto_accept_work);
512         cancel_delayed_work_sync(&conn->idle_work);
513
514         if (conn->type == ACL_LINK) {
515                 struct hci_conn *sco = conn->link;
516                 if (sco)
517                         sco->link = NULL;
518
519                 /* Unacked frames */
520                 hdev->acl_cnt += conn->sent;
521         } else if (conn->type == LE_LINK) {
522                 cancel_delayed_work(&conn->le_conn_timeout);
523
524                 if (hdev->le_pkts)
525                         hdev->le_cnt += conn->sent;
526                 else
527                         hdev->acl_cnt += conn->sent;
528         } else {
529                 struct hci_conn *acl = conn->link;
530                 if (acl) {
531                         acl->link = NULL;
532                         hci_conn_drop(acl);
533                 }
534         }
535
536         hci_chan_list_flush(conn);
537
538         if (conn->amp_mgr)
539                 amp_mgr_put(conn->amp_mgr);
540
541         hci_conn_hash_del(hdev, conn);
542         if (hdev->notify)
543                 hdev->notify(hdev, HCI_NOTIFY_CONN_DEL);
544
545         skb_queue_purge(&conn->data_q);
546
547         hci_conn_del_sysfs(conn);
548
549         if (test_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags))
550                 hci_conn_params_del(conn->hdev, &conn->dst, conn->dst_type);
551
552         hci_dev_put(hdev);
553
554         hci_conn_put(conn);
555
556         return 0;
557 }
558
559 struct hci_dev *hci_get_route(bdaddr_t *dst, bdaddr_t *src)
560 {
561         int use_src = bacmp(src, BDADDR_ANY);
562         struct hci_dev *hdev = NULL, *d;
563
564         BT_DBG("%pMR -> %pMR", src, dst);
565
566         read_lock(&hci_dev_list_lock);
567
568         list_for_each_entry(d, &hci_dev_list, list) {
569                 if (!test_bit(HCI_UP, &d->flags) ||
570                     test_bit(HCI_USER_CHANNEL, &d->dev_flags) ||
571                     d->dev_type != HCI_BREDR)
572                         continue;
573
574                 /* Simple routing:
575                  *   No source address - find interface with bdaddr != dst
576                  *   Source address    - find interface with bdaddr == src
577                  */
578
579                 if (use_src) {
580                         if (!bacmp(&d->bdaddr, src)) {
581                                 hdev = d; break;
582                         }
583                 } else {
584                         if (bacmp(&d->bdaddr, dst)) {
585                                 hdev = d; break;
586                         }
587                 }
588         }
589
590         if (hdev)
591                 hdev = hci_dev_hold(hdev);
592
593         read_unlock(&hci_dev_list_lock);
594         return hdev;
595 }
596 EXPORT_SYMBOL(hci_get_route);
597
598 /* This function requires the caller holds hdev->lock */
599 void hci_le_conn_failed(struct hci_conn *conn, u8 status)
600 {
601         struct hci_dev *hdev = conn->hdev;
602         struct hci_conn_params *params;
603
604         params = hci_pend_le_action_lookup(&hdev->pend_le_conns, &conn->dst,
605                                            conn->dst_type);
606         if (params && params->conn) {
607                 hci_conn_drop(params->conn);
608                 hci_conn_put(params->conn);
609                 params->conn = NULL;
610         }
611
612         conn->state = BT_CLOSED;
613
614         mgmt_connect_failed(hdev, &conn->dst, conn->type, conn->dst_type,
615                             status);
616
617         hci_proto_connect_cfm(conn, status);
618
619         hci_conn_del(conn);
620
621         /* Since we may have temporarily stopped the background scanning in
622          * favor of connection establishment, we should restart it.
623          */
624         hci_update_background_scan(hdev);
625
626         /* Re-enable advertising in case this was a failed connection
627          * attempt as a peripheral.
628          */
629         mgmt_reenable_advertising(hdev);
630 }
631
632 static void create_le_conn_complete(struct hci_dev *hdev, u8 status)
633 {
634         struct hci_conn *conn;
635
636         if (status == 0)
637                 return;
638
639         BT_ERR("HCI request failed to create LE connection: status 0x%2.2x",
640                status);
641
642         hci_dev_lock(hdev);
643
644         conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
645         if (!conn)
646                 goto done;
647
648         hci_le_conn_failed(conn, status);
649
650 done:
651         hci_dev_unlock(hdev);
652 }
653
654 static void hci_req_add_le_create_conn(struct hci_request *req,
655                                        struct hci_conn *conn)
656 {
657         struct hci_cp_le_create_conn cp;
658         struct hci_dev *hdev = conn->hdev;
659         u8 own_addr_type;
660
661         memset(&cp, 0, sizeof(cp));
662
663         /* Update random address, but set require_privacy to false so
664          * that we never connect with an non-resolvable address.
665          */
666         if (hci_update_random_address(req, false, &own_addr_type))
667                 return;
668
669         cp.scan_interval = cpu_to_le16(hdev->le_scan_interval);
670         cp.scan_window = cpu_to_le16(hdev->le_scan_window);
671         bacpy(&cp.peer_addr, &conn->dst);
672         cp.peer_addr_type = conn->dst_type;
673         cp.own_address_type = own_addr_type;
674         cp.conn_interval_min = cpu_to_le16(conn->le_conn_min_interval);
675         cp.conn_interval_max = cpu_to_le16(conn->le_conn_max_interval);
676         cp.conn_latency = cpu_to_le16(conn->le_conn_latency);
677         cp.supervision_timeout = cpu_to_le16(conn->le_supv_timeout);
678         cp.min_ce_len = cpu_to_le16(0x0000);
679         cp.max_ce_len = cpu_to_le16(0x0000);
680
681         hci_req_add(req, HCI_OP_LE_CREATE_CONN, sizeof(cp), &cp);
682
683         conn->state = BT_CONNECT;
684 }
685
686 static void hci_req_directed_advertising(struct hci_request *req,
687                                          struct hci_conn *conn)
688 {
689         struct hci_dev *hdev = req->hdev;
690         struct hci_cp_le_set_adv_param cp;
691         u8 own_addr_type;
692         u8 enable;
693
694         /* Clear the HCI_LE_ADV bit temporarily so that the
695          * hci_update_random_address knows that it's safe to go ahead
696          * and write a new random address. The flag will be set back on
697          * as soon as the SET_ADV_ENABLE HCI command completes.
698          */
699         clear_bit(HCI_LE_ADV, &hdev->dev_flags);
700
701         /* Set require_privacy to false so that the remote device has a
702          * chance of identifying us.
703          */
704         if (hci_update_random_address(req, false, &own_addr_type) < 0)
705                 return;
706
707         memset(&cp, 0, sizeof(cp));
708         cp.type = LE_ADV_DIRECT_IND;
709         cp.own_address_type = own_addr_type;
710         cp.direct_addr_type = conn->dst_type;
711         bacpy(&cp.direct_addr, &conn->dst);
712         cp.channel_map = hdev->le_adv_channel_map;
713
714         hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
715
716         enable = 0x01;
717         hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
718
719         conn->state = BT_CONNECT;
720 }
721
722 struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
723                                 u8 dst_type, u8 sec_level, u16 conn_timeout,
724                                 u8 role)
725 {
726         struct hci_conn_params *params;
727         struct hci_conn *conn;
728         struct smp_irk *irk;
729         struct hci_request req;
730         int err;
731
732         /* Some devices send ATT messages as soon as the physical link is
733          * established. To be able to handle these ATT messages, the user-
734          * space first establishes the connection and then starts the pairing
735          * process.
736          *
737          * So if a hci_conn object already exists for the following connection
738          * attempt, we simply update pending_sec_level and auth_type fields
739          * and return the object found.
740          */
741         conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
742         if (conn) {
743                 conn->pending_sec_level = sec_level;
744                 goto done;
745         }
746
747         /* Since the controller supports only one LE connection attempt at a
748          * time, we return -EBUSY if there is any connection attempt running.
749          */
750         conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
751         if (conn)
752                 return ERR_PTR(-EBUSY);
753
754         /* When given an identity address with existing identity
755          * resolving key, the connection needs to be established
756          * to a resolvable random address.
757          *
758          * This uses the cached random resolvable address from
759          * a previous scan. When no cached address is available,
760          * try connecting to the identity address instead.
761          *
762          * Storing the resolvable random address is required here
763          * to handle connection failures. The address will later
764          * be resolved back into the original identity address
765          * from the connect request.
766          */
767         irk = hci_find_irk_by_addr(hdev, dst, dst_type);
768         if (irk && bacmp(&irk->rpa, BDADDR_ANY)) {
769                 dst = &irk->rpa;
770                 dst_type = ADDR_LE_DEV_RANDOM;
771         }
772
773         conn = hci_conn_add(hdev, LE_LINK, dst, role);
774         if (!conn)
775                 return ERR_PTR(-ENOMEM);
776
777         conn->dst_type = dst_type;
778         conn->sec_level = BT_SECURITY_LOW;
779         conn->pending_sec_level = sec_level;
780         conn->conn_timeout = conn_timeout;
781
782         hci_req_init(&req, hdev);
783
784         /* Disable advertising if we're active. For master role
785          * connections most controllers will refuse to connect if
786          * advertising is enabled, and for slave role connections we
787          * anyway have to disable it in order to start directed
788          * advertising.
789          */
790         if (test_bit(HCI_LE_ADV, &hdev->dev_flags)) {
791                 u8 enable = 0x00;
792                 hci_req_add(&req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
793                             &enable);
794         }
795
796         /* If requested to connect as slave use directed advertising */
797         if (conn->role == HCI_ROLE_SLAVE) {
798                 /* If we're active scanning most controllers are unable
799                  * to initiate advertising. Simply reject the attempt.
800                  */
801                 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags) &&
802                     hdev->le_scan_type == LE_SCAN_ACTIVE) {
803                         skb_queue_purge(&req.cmd_q);
804                         hci_conn_del(conn);
805                         return ERR_PTR(-EBUSY);
806                 }
807
808                 hci_req_directed_advertising(&req, conn);
809                 goto create_conn;
810         }
811
812         params = hci_conn_params_lookup(hdev, &conn->dst, conn->dst_type);
813         if (params) {
814                 conn->le_conn_min_interval = params->conn_min_interval;
815                 conn->le_conn_max_interval = params->conn_max_interval;
816                 conn->le_conn_latency = params->conn_latency;
817                 conn->le_supv_timeout = params->supervision_timeout;
818         } else {
819                 conn->le_conn_min_interval = hdev->le_conn_min_interval;
820                 conn->le_conn_max_interval = hdev->le_conn_max_interval;
821                 conn->le_conn_latency = hdev->le_conn_latency;
822                 conn->le_supv_timeout = hdev->le_supv_timeout;
823         }
824
825         /* If controller is scanning, we stop it since some controllers are
826          * not able to scan and connect at the same time. Also set the
827          * HCI_LE_SCAN_INTERRUPTED flag so that the command complete
828          * handler for scan disabling knows to set the correct discovery
829          * state.
830          */
831         if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) {
832                 hci_req_add_le_scan_disable(&req);
833                 set_bit(HCI_LE_SCAN_INTERRUPTED, &hdev->dev_flags);
834         }
835
836         hci_req_add_le_create_conn(&req, conn);
837
838 create_conn:
839         err = hci_req_run(&req, create_le_conn_complete);
840         if (err) {
841                 hci_conn_del(conn);
842                 return ERR_PTR(err);
843         }
844
845 done:
846         hci_conn_hold(conn);
847         return conn;
848 }
849
850 struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst,
851                                  u8 sec_level, u8 auth_type)
852 {
853         struct hci_conn *acl;
854
855         if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags))
856                 return ERR_PTR(-EOPNOTSUPP);
857
858         acl = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
859         if (!acl) {
860                 acl = hci_conn_add(hdev, ACL_LINK, dst, HCI_ROLE_MASTER);
861                 if (!acl)
862                         return ERR_PTR(-ENOMEM);
863         }
864
865         hci_conn_hold(acl);
866
867         if (acl->state == BT_OPEN || acl->state == BT_CLOSED) {
868                 acl->sec_level = BT_SECURITY_LOW;
869                 acl->pending_sec_level = sec_level;
870                 acl->auth_type = auth_type;
871                 hci_acl_create_connection(acl);
872         }
873
874         return acl;
875 }
876
877 struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst,
878                                  __u16 setting)
879 {
880         struct hci_conn *acl;
881         struct hci_conn *sco;
882
883         acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING);
884         if (IS_ERR(acl))
885                 return acl;
886
887         sco = hci_conn_hash_lookup_ba(hdev, type, dst);
888         if (!sco) {
889                 sco = hci_conn_add(hdev, type, dst, HCI_ROLE_MASTER);
890                 if (!sco) {
891                         hci_conn_drop(acl);
892                         return ERR_PTR(-ENOMEM);
893                 }
894         }
895
896         acl->link = sco;
897         sco->link = acl;
898
899         hci_conn_hold(sco);
900
901         sco->setting = setting;
902
903         if (acl->state == BT_CONNECTED &&
904             (sco->state == BT_OPEN || sco->state == BT_CLOSED)) {
905                 set_bit(HCI_CONN_POWER_SAVE, &acl->flags);
906                 hci_conn_enter_active_mode(acl, BT_POWER_FORCE_ACTIVE_ON);
907
908                 if (test_bit(HCI_CONN_MODE_CHANGE_PEND, &acl->flags)) {
909                         /* defer SCO setup until mode change completed */
910                         set_bit(HCI_CONN_SCO_SETUP_PEND, &acl->flags);
911                         return sco;
912                 }
913
914                 hci_sco_setup(acl, 0x00);
915         }
916
917         return sco;
918 }
919
920 /* Check link security requirement */
921 int hci_conn_check_link_mode(struct hci_conn *conn)
922 {
923         BT_DBG("hcon %p", conn);
924
925         /* In Secure Connections Only mode, it is required that Secure
926          * Connections is used and the link is encrypted with AES-CCM
927          * using a P-256 authenticated combination key.
928          */
929         if (test_bit(HCI_SC_ONLY, &conn->hdev->flags)) {
930                 if (!hci_conn_sc_enabled(conn) ||
931                     !test_bit(HCI_CONN_AES_CCM, &conn->flags) ||
932                     conn->key_type != HCI_LK_AUTH_COMBINATION_P256)
933                         return 0;
934         }
935
936         if (hci_conn_ssp_enabled(conn) &&
937             !test_bit(HCI_CONN_ENCRYPT, &conn->flags))
938                 return 0;
939
940         return 1;
941 }
942
943 /* Authenticate remote device */
944 static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
945 {
946         BT_DBG("hcon %p", conn);
947
948         if (conn->pending_sec_level > sec_level)
949                 sec_level = conn->pending_sec_level;
950
951         if (sec_level > conn->sec_level)
952                 conn->pending_sec_level = sec_level;
953         else if (test_bit(HCI_CONN_AUTH, &conn->flags))
954                 return 1;
955
956         /* Make sure we preserve an existing MITM requirement*/
957         auth_type |= (conn->auth_type & 0x01);
958
959         conn->auth_type = auth_type;
960
961         if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
962                 struct hci_cp_auth_requested cp;
963
964                 cp.handle = cpu_to_le16(conn->handle);
965                 hci_send_cmd(conn->hdev, HCI_OP_AUTH_REQUESTED,
966                              sizeof(cp), &cp);
967
968                 /* If we're already encrypted set the REAUTH_PEND flag,
969                  * otherwise set the ENCRYPT_PEND.
970                  */
971                 if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
972                         set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
973                 else
974                         set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
975         }
976
977         return 0;
978 }
979
980 /* Encrypt the the link */
981 static void hci_conn_encrypt(struct hci_conn *conn)
982 {
983         BT_DBG("hcon %p", conn);
984
985         if (!test_and_set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags)) {
986                 struct hci_cp_set_conn_encrypt cp;
987                 cp.handle  = cpu_to_le16(conn->handle);
988                 cp.encrypt = 0x01;
989                 hci_send_cmd(conn->hdev, HCI_OP_SET_CONN_ENCRYPT, sizeof(cp),
990                              &cp);
991         }
992 }
993
994 /* Enable security */
995 int hci_conn_security(struct hci_conn *conn, __u8 sec_level, __u8 auth_type,
996                       bool initiator)
997 {
998         BT_DBG("hcon %p", conn);
999
1000         if (conn->type == LE_LINK)
1001                 return smp_conn_security(conn, sec_level);
1002
1003         /* For sdp we don't need the link key. */
1004         if (sec_level == BT_SECURITY_SDP)
1005                 return 1;
1006
1007         /* For non 2.1 devices and low security level we don't need the link
1008            key. */
1009         if (sec_level == BT_SECURITY_LOW && !hci_conn_ssp_enabled(conn))
1010                 return 1;
1011
1012         /* For other security levels we need the link key. */
1013         if (!test_bit(HCI_CONN_AUTH, &conn->flags))
1014                 goto auth;
1015
1016         /* An authenticated FIPS approved combination key has sufficient
1017          * security for security level 4. */
1018         if (conn->key_type == HCI_LK_AUTH_COMBINATION_P256 &&
1019             sec_level == BT_SECURITY_FIPS)
1020                 goto encrypt;
1021
1022         /* An authenticated combination key has sufficient security for
1023            security level 3. */
1024         if ((conn->key_type == HCI_LK_AUTH_COMBINATION_P192 ||
1025              conn->key_type == HCI_LK_AUTH_COMBINATION_P256) &&
1026             sec_level == BT_SECURITY_HIGH)
1027                 goto encrypt;
1028
1029         /* An unauthenticated combination key has sufficient security for
1030            security level 1 and 2. */
1031         if ((conn->key_type == HCI_LK_UNAUTH_COMBINATION_P192 ||
1032              conn->key_type == HCI_LK_UNAUTH_COMBINATION_P256) &&
1033             (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW))
1034                 goto encrypt;
1035
1036         /* A combination key has always sufficient security for the security
1037            levels 1 or 2. High security level requires the combination key
1038            is generated using maximum PIN code length (16).
1039            For pre 2.1 units. */
1040         if (conn->key_type == HCI_LK_COMBINATION &&
1041             (sec_level == BT_SECURITY_MEDIUM || sec_level == BT_SECURITY_LOW ||
1042              conn->pin_length == 16))
1043                 goto encrypt;
1044
1045 auth:
1046         if (test_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags))
1047                 return 0;
1048
1049         if (initiator)
1050                 set_bit(HCI_CONN_AUTH_INITIATOR, &conn->flags);
1051
1052         if (!hci_conn_auth(conn, sec_level, auth_type))
1053                 return 0;
1054
1055 encrypt:
1056         if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1057                 return 1;
1058
1059         hci_conn_encrypt(conn);
1060         return 0;
1061 }
1062 EXPORT_SYMBOL(hci_conn_security);
1063
1064 /* Check secure link requirement */
1065 int hci_conn_check_secure(struct hci_conn *conn, __u8 sec_level)
1066 {
1067         BT_DBG("hcon %p", conn);
1068
1069         /* Accept if non-secure or higher security level is required */
1070         if (sec_level != BT_SECURITY_HIGH && sec_level != BT_SECURITY_FIPS)
1071                 return 1;
1072
1073         /* Accept if secure or higher security level is already present */
1074         if (conn->sec_level == BT_SECURITY_HIGH ||
1075             conn->sec_level == BT_SECURITY_FIPS)
1076                 return 1;
1077
1078         /* Reject not secure link */
1079         return 0;
1080 }
1081 EXPORT_SYMBOL(hci_conn_check_secure);
1082
1083 /* Change link key */
1084 int hci_conn_change_link_key(struct hci_conn *conn)
1085 {
1086         BT_DBG("hcon %p", conn);
1087
1088         if (!test_and_set_bit(HCI_CONN_AUTH_PEND, &conn->flags)) {
1089                 struct hci_cp_change_conn_link_key cp;
1090                 cp.handle = cpu_to_le16(conn->handle);
1091                 hci_send_cmd(conn->hdev, HCI_OP_CHANGE_CONN_LINK_KEY,
1092                              sizeof(cp), &cp);
1093         }
1094
1095         return 0;
1096 }
1097
1098 /* Switch role */
1099 int hci_conn_switch_role(struct hci_conn *conn, __u8 role)
1100 {
1101         BT_DBG("hcon %p", conn);
1102
1103         if (role == conn->role)
1104                 return 1;
1105
1106         if (!test_and_set_bit(HCI_CONN_RSWITCH_PEND, &conn->flags)) {
1107                 struct hci_cp_switch_role cp;
1108                 bacpy(&cp.bdaddr, &conn->dst);
1109                 cp.role = role;
1110                 hci_send_cmd(conn->hdev, HCI_OP_SWITCH_ROLE, sizeof(cp), &cp);
1111         }
1112
1113         return 0;
1114 }
1115 EXPORT_SYMBOL(hci_conn_switch_role);
1116
1117 /* Enter active mode */
1118 void hci_conn_enter_active_mode(struct hci_conn *conn, __u8 force_active)
1119 {
1120         struct hci_dev *hdev = conn->hdev;
1121
1122         BT_DBG("hcon %p mode %d", conn, conn->mode);
1123
1124         if (conn->mode != HCI_CM_SNIFF)
1125                 goto timer;
1126
1127         if (!test_bit(HCI_CONN_POWER_SAVE, &conn->flags) && !force_active)
1128                 goto timer;
1129
1130         if (!test_and_set_bit(HCI_CONN_MODE_CHANGE_PEND, &conn->flags)) {
1131                 struct hci_cp_exit_sniff_mode cp;
1132                 cp.handle = cpu_to_le16(conn->handle);
1133                 hci_send_cmd(hdev, HCI_OP_EXIT_SNIFF_MODE, sizeof(cp), &cp);
1134         }
1135
1136 timer:
1137         if (hdev->idle_timeout > 0)
1138                 queue_delayed_work(hdev->workqueue, &conn->idle_work,
1139                                    msecs_to_jiffies(hdev->idle_timeout));
1140 }
1141
1142 /* Drop all connection on the device */
1143 void hci_conn_hash_flush(struct hci_dev *hdev)
1144 {
1145         struct hci_conn_hash *h = &hdev->conn_hash;
1146         struct hci_conn *c, *n;
1147
1148         BT_DBG("hdev %s", hdev->name);
1149
1150         list_for_each_entry_safe(c, n, &h->list, list) {
1151                 c->state = BT_CLOSED;
1152
1153                 hci_proto_disconn_cfm(c, HCI_ERROR_LOCAL_HOST_TERM);
1154                 hci_conn_del(c);
1155         }
1156 }
1157
1158 /* Check pending connect attempts */
1159 void hci_conn_check_pending(struct hci_dev *hdev)
1160 {
1161         struct hci_conn *conn;
1162
1163         BT_DBG("hdev %s", hdev->name);
1164
1165         hci_dev_lock(hdev);
1166
1167         conn = hci_conn_hash_lookup_state(hdev, ACL_LINK, BT_CONNECT2);
1168         if (conn)
1169                 hci_acl_create_connection(conn);
1170
1171         hci_dev_unlock(hdev);
1172 }
1173
1174 static u32 get_link_mode(struct hci_conn *conn)
1175 {
1176         u32 link_mode = 0;
1177
1178         if (conn->role == HCI_ROLE_MASTER)
1179                 link_mode |= HCI_LM_MASTER;
1180
1181         if (test_bit(HCI_CONN_ENCRYPT, &conn->flags))
1182                 link_mode |= HCI_LM_ENCRYPT;
1183
1184         if (test_bit(HCI_CONN_AUTH, &conn->flags))
1185                 link_mode |= HCI_LM_AUTH;
1186
1187         if (test_bit(HCI_CONN_SECURE, &conn->flags))
1188                 link_mode |= HCI_LM_SECURE;
1189
1190         if (test_bit(HCI_CONN_FIPS, &conn->flags))
1191                 link_mode |= HCI_LM_FIPS;
1192
1193         return link_mode;
1194 }
1195
1196 int hci_get_conn_list(void __user *arg)
1197 {
1198         struct hci_conn *c;
1199         struct hci_conn_list_req req, *cl;
1200         struct hci_conn_info *ci;
1201         struct hci_dev *hdev;
1202         int n = 0, size, err;
1203
1204         if (copy_from_user(&req, arg, sizeof(req)))
1205                 return -EFAULT;
1206
1207         if (!req.conn_num || req.conn_num > (PAGE_SIZE * 2) / sizeof(*ci))
1208                 return -EINVAL;
1209
1210         size = sizeof(req) + req.conn_num * sizeof(*ci);
1211
1212         cl = kmalloc(size, GFP_KERNEL);
1213         if (!cl)
1214                 return -ENOMEM;
1215
1216         hdev = hci_dev_get(req.dev_id);
1217         if (!hdev) {
1218                 kfree(cl);
1219                 return -ENODEV;
1220         }
1221
1222         ci = cl->conn_info;
1223
1224         hci_dev_lock(hdev);
1225         list_for_each_entry(c, &hdev->conn_hash.list, list) {
1226                 bacpy(&(ci + n)->bdaddr, &c->dst);
1227                 (ci + n)->handle = c->handle;
1228                 (ci + n)->type  = c->type;
1229                 (ci + n)->out   = c->out;
1230                 (ci + n)->state = c->state;
1231                 (ci + n)->link_mode = get_link_mode(c);
1232                 if (++n >= req.conn_num)
1233                         break;
1234         }
1235         hci_dev_unlock(hdev);
1236
1237         cl->dev_id = hdev->id;
1238         cl->conn_num = n;
1239         size = sizeof(req) + n * sizeof(*ci);
1240
1241         hci_dev_put(hdev);
1242
1243         err = copy_to_user(arg, cl, size);
1244         kfree(cl);
1245
1246         return err ? -EFAULT : 0;
1247 }
1248
1249 int hci_get_conn_info(struct hci_dev *hdev, void __user *arg)
1250 {
1251         struct hci_conn_info_req req;
1252         struct hci_conn_info ci;
1253         struct hci_conn *conn;
1254         char __user *ptr = arg + sizeof(req);
1255
1256         if (copy_from_user(&req, arg, sizeof(req)))
1257                 return -EFAULT;
1258
1259         hci_dev_lock(hdev);
1260         conn = hci_conn_hash_lookup_ba(hdev, req.type, &req.bdaddr);
1261         if (conn) {
1262                 bacpy(&ci.bdaddr, &conn->dst);
1263                 ci.handle = conn->handle;
1264                 ci.type  = conn->type;
1265                 ci.out   = conn->out;
1266                 ci.state = conn->state;
1267                 ci.link_mode = get_link_mode(conn);
1268         }
1269         hci_dev_unlock(hdev);
1270
1271         if (!conn)
1272                 return -ENOENT;
1273
1274         return copy_to_user(ptr, &ci, sizeof(ci)) ? -EFAULT : 0;
1275 }
1276
1277 int hci_get_auth_info(struct hci_dev *hdev, void __user *arg)
1278 {
1279         struct hci_auth_info_req req;
1280         struct hci_conn *conn;
1281
1282         if (copy_from_user(&req, arg, sizeof(req)))
1283                 return -EFAULT;
1284
1285         hci_dev_lock(hdev);
1286         conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &req.bdaddr);
1287         if (conn)
1288                 req.type = conn->auth_type;
1289         hci_dev_unlock(hdev);
1290
1291         if (!conn)
1292                 return -ENOENT;
1293
1294         return copy_to_user(arg, &req, sizeof(req)) ? -EFAULT : 0;
1295 }
1296
1297 struct hci_chan *hci_chan_create(struct hci_conn *conn)
1298 {
1299         struct hci_dev *hdev = conn->hdev;
1300         struct hci_chan *chan;
1301
1302         BT_DBG("%s hcon %p", hdev->name, conn);
1303
1304         if (test_bit(HCI_CONN_DROP, &conn->flags)) {
1305                 BT_DBG("Refusing to create new hci_chan");
1306                 return NULL;
1307         }
1308
1309         chan = kzalloc(sizeof(*chan), GFP_KERNEL);
1310         if (!chan)
1311                 return NULL;
1312
1313         chan->conn = hci_conn_get(conn);
1314         skb_queue_head_init(&chan->data_q);
1315         chan->state = BT_CONNECTED;
1316
1317         list_add_rcu(&chan->list, &conn->chan_list);
1318
1319         return chan;
1320 }
1321
1322 void hci_chan_del(struct hci_chan *chan)
1323 {
1324         struct hci_conn *conn = chan->conn;
1325         struct hci_dev *hdev = conn->hdev;
1326
1327         BT_DBG("%s hcon %p chan %p", hdev->name, conn, chan);
1328
1329         list_del_rcu(&chan->list);
1330
1331         synchronize_rcu();
1332
1333         /* Prevent new hci_chan's to be created for this hci_conn */
1334         set_bit(HCI_CONN_DROP, &conn->flags);
1335
1336         hci_conn_put(conn);
1337
1338         skb_queue_purge(&chan->data_q);
1339         kfree(chan);
1340 }
1341
1342 void hci_chan_list_flush(struct hci_conn *conn)
1343 {
1344         struct hci_chan *chan, *n;
1345
1346         BT_DBG("hcon %p", conn);
1347
1348         list_for_each_entry_safe(chan, n, &conn->chan_list, list)
1349                 hci_chan_del(chan);
1350 }
1351
1352 static struct hci_chan *__hci_chan_lookup_handle(struct hci_conn *hcon,
1353                                                  __u16 handle)
1354 {
1355         struct hci_chan *hchan;
1356
1357         list_for_each_entry(hchan, &hcon->chan_list, list) {
1358                 if (hchan->handle == handle)
1359                         return hchan;
1360         }
1361
1362         return NULL;
1363 }
1364
1365 struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
1366 {
1367         struct hci_conn_hash *h = &hdev->conn_hash;
1368         struct hci_conn *hcon;
1369         struct hci_chan *hchan = NULL;
1370
1371         rcu_read_lock();
1372
1373         list_for_each_entry_rcu(hcon, &h->list, list) {
1374                 hchan = __hci_chan_lookup_handle(hcon, handle);
1375                 if (hchan)
1376                         break;
1377         }
1378
1379         rcu_read_unlock();
1380
1381         return hchan;
1382 }