diff options
Diffstat (limited to 'net/bluetooth/hci_sync.c')
| -rw-r--r-- | net/bluetooth/hci_sync.c | 433 | 
1 files changed, 386 insertions, 47 deletions
| diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c index 5716345a26df..f6b662369322 100644 --- a/net/bluetooth/hci_sync.c +++ b/net/bluetooth/hci_sync.c @@ -32,6 +32,10 @@ static void hci_cmd_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,  	hdev->req_result = result;  	hdev->req_status = HCI_REQ_DONE; +	/* Free the request command so it is not used as response */ +	kfree_skb(hdev->req_skb); +	hdev->req_skb = NULL; +  	if (skb) {  		struct sock *sk = hci_skb_sk(skb); @@ -39,7 +43,7 @@ static void hci_cmd_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,  		if (sk)  			sock_put(sk); -		hdev->req_skb = skb_get(skb); +		hdev->req_rsp = skb_get(skb);  	}  	wake_up_interruptible(&hdev->req_wait_q); @@ -187,8 +191,8 @@ struct sk_buff *__hci_cmd_sync_sk(struct hci_dev *hdev, u16 opcode, u32 plen,  	hdev->req_status = 0;  	hdev->req_result = 0; -	skb = hdev->req_skb; -	hdev->req_skb = NULL; +	skb = hdev->req_rsp; +	hdev->req_rsp = NULL;  	bt_dev_dbg(hdev, "end: err %d", err); @@ -566,6 +570,17 @@ void hci_cmd_sync_init(struct hci_dev *hdev)  	INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);  } +static void _hci_cmd_sync_cancel_entry(struct hci_dev *hdev, +				       struct hci_cmd_sync_work_entry *entry, +				       int err) +{ +	if (entry->destroy) +		entry->destroy(hdev, entry->data, err); + +	list_del(&entry->list); +	kfree(entry); +} +  void hci_cmd_sync_clear(struct hci_dev *hdev)  {  	struct hci_cmd_sync_work_entry *entry, *tmp; @@ -574,17 +589,12 @@ void hci_cmd_sync_clear(struct hci_dev *hdev)  	cancel_work_sync(&hdev->reenable_adv_work);  	mutex_lock(&hdev->cmd_sync_work_lock); -	list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) { -		if (entry->destroy) -			entry->destroy(hdev, entry->data, -ECANCELED); - -		list_del(&entry->list); -		kfree(entry); -	} +	list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) +		_hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED);  	mutex_unlock(&hdev->cmd_sync_work_lock);  } -void __hci_cmd_sync_cancel(struct hci_dev *hdev, int err) +void hci_cmd_sync_cancel(struct hci_dev *hdev, int err)  {  	bt_dev_dbg(hdev, "err 0x%2.2x", err); @@ -592,15 +602,17 @@ void __hci_cmd_sync_cancel(struct hci_dev *hdev, int err)  		hdev->req_result = err;  		hdev->req_status = HCI_REQ_CANCELED; -		cancel_delayed_work_sync(&hdev->cmd_timer); -		cancel_delayed_work_sync(&hdev->ncmd_timer); -		atomic_set(&hdev->cmd_cnt, 1); - -		wake_up_interruptible(&hdev->req_wait_q); +		queue_work(hdev->workqueue, &hdev->cmd_sync_cancel_work);  	}  } +EXPORT_SYMBOL(hci_cmd_sync_cancel); -void hci_cmd_sync_cancel(struct hci_dev *hdev, int err) +/* Cancel ongoing command request synchronously: + * + * - Set result and mark status to HCI_REQ_CANCELED + * - Wakeup command sync thread + */ +void hci_cmd_sync_cancel_sync(struct hci_dev *hdev, int err)  {  	bt_dev_dbg(hdev, "err 0x%2.2x", err); @@ -608,10 +620,10 @@ void hci_cmd_sync_cancel(struct hci_dev *hdev, int err)  		hdev->req_result = err;  		hdev->req_status = HCI_REQ_CANCELED; -		queue_work(hdev->workqueue, &hdev->cmd_sync_cancel_work); +		wake_up_interruptible(&hdev->req_wait_q);  	}  } -EXPORT_SYMBOL(hci_cmd_sync_cancel); +EXPORT_SYMBOL(hci_cmd_sync_cancel_sync);  /* Submit HCI command to be run in as cmd_sync_work:   * @@ -667,6 +679,115 @@ int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func,  }  EXPORT_SYMBOL(hci_cmd_sync_queue); +static struct hci_cmd_sync_work_entry * +_hci_cmd_sync_lookup_entry(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, +			   void *data, hci_cmd_sync_work_destroy_t destroy) +{ +	struct hci_cmd_sync_work_entry *entry, *tmp; + +	list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) { +		if (func && entry->func != func) +			continue; + +		if (data && entry->data != data) +			continue; + +		if (destroy && entry->destroy != destroy) +			continue; + +		return entry; +	} + +	return NULL; +} + +/* Queue HCI command entry once: + * + * - Lookup if an entry already exist and only if it doesn't creates a new entry + *   and queue it. + */ +int hci_cmd_sync_queue_once(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, +			    void *data, hci_cmd_sync_work_destroy_t destroy) +{ +	if (hci_cmd_sync_lookup_entry(hdev, func, data, destroy)) +		return 0; + +	return hci_cmd_sync_queue(hdev, func, data, destroy); +} +EXPORT_SYMBOL(hci_cmd_sync_queue_once); + +/* Lookup HCI command entry: + * + * - Return first entry that matches by function callback or data or + *   destroy callback. + */ +struct hci_cmd_sync_work_entry * +hci_cmd_sync_lookup_entry(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, +			  void *data, hci_cmd_sync_work_destroy_t destroy) +{ +	struct hci_cmd_sync_work_entry *entry; + +	mutex_lock(&hdev->cmd_sync_work_lock); +	entry = _hci_cmd_sync_lookup_entry(hdev, func, data, destroy); +	mutex_unlock(&hdev->cmd_sync_work_lock); + +	return entry; +} +EXPORT_SYMBOL(hci_cmd_sync_lookup_entry); + +/* Cancel HCI command entry */ +void hci_cmd_sync_cancel_entry(struct hci_dev *hdev, +			       struct hci_cmd_sync_work_entry *entry) +{ +	mutex_lock(&hdev->cmd_sync_work_lock); +	_hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED); +	mutex_unlock(&hdev->cmd_sync_work_lock); +} +EXPORT_SYMBOL(hci_cmd_sync_cancel_entry); + +/* Dequeue one HCI command entry: + * + * - Lookup and cancel first entry that matches. + */ +bool hci_cmd_sync_dequeue_once(struct hci_dev *hdev, +			       hci_cmd_sync_work_func_t func, +			       void *data, hci_cmd_sync_work_destroy_t destroy) +{ +	struct hci_cmd_sync_work_entry *entry; + +	entry = hci_cmd_sync_lookup_entry(hdev, func, data, destroy); +	if (!entry) +		return false; + +	hci_cmd_sync_cancel_entry(hdev, entry); + +	return true; +} +EXPORT_SYMBOL(hci_cmd_sync_dequeue_once); + +/* Dequeue HCI command entry: + * + * - Lookup and cancel any entry that matches by function callback or data or + *   destroy callback. + */ +bool hci_cmd_sync_dequeue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, +			  void *data, hci_cmd_sync_work_destroy_t destroy) +{ +	struct hci_cmd_sync_work_entry *entry; +	bool ret = false; + +	mutex_lock(&hdev->cmd_sync_work_lock); +	while ((entry = _hci_cmd_sync_lookup_entry(hdev, func, data, +						   destroy))) { +		_hci_cmd_sync_cancel_entry(hdev, entry, -ECANCELED); +		ret = true; +	} +	mutex_unlock(&hdev->cmd_sync_work_lock); + +	return ret; +} +EXPORT_SYMBOL(hci_cmd_sync_dequeue); +  int hci_update_eir_sync(struct hci_dev *hdev)  {  	struct hci_cp_write_eir cp; @@ -2445,6 +2566,16 @@ static struct conn_params *conn_params_copy(struct list_head *list, size_t *n)  	return p;  } +/* Clear LE Accept List */ +static int hci_le_clear_accept_list_sync(struct hci_dev *hdev) +{ +	if (!(hdev->commands[26] & 0x80)) +		return 0; + +	return __hci_cmd_sync_status(hdev, HCI_OP_LE_CLEAR_ACCEPT_LIST, 0, NULL, +				     HCI_CMD_TIMEOUT); +} +  /* Device must not be scanning when updating the accept list.   *   * Update is done using the following sequence: @@ -2493,6 +2624,31 @@ static u8 hci_update_accept_list_sync(struct hci_dev *hdev)  		goto done;  	} +	/* Force address filtering if PA Sync is in progress */ +	if (hci_dev_test_flag(hdev, HCI_PA_SYNC)) { +		struct hci_cp_le_pa_create_sync *sent; + +		sent = hci_sent_cmd_data(hdev, HCI_OP_LE_PA_CREATE_SYNC); +		if (sent) { +			struct conn_params pa; + +			memset(&pa, 0, sizeof(pa)); + +			bacpy(&pa.addr, &sent->addr); +			pa.addr_type = sent->addr_type; + +			/* Clear first since there could be addresses left +			 * behind. +			 */ +			hci_le_clear_accept_list_sync(hdev); + +			num_entries = 1; +			err = hci_le_add_accept_list_sync(hdev, &pa, +							  &num_entries); +			goto done; +		} +	} +  	/* Go through the current accept list programmed into the  	 * controller one by one and check if that address is connected or is  	 * still in the list of pending connections or list of devices to @@ -2602,6 +2758,14 @@ done:  	return filter_policy;  } +static void hci_le_scan_phy_params(struct hci_cp_le_scan_phy_params *cp, +				   u8 type, u16 interval, u16 window) +{ +	cp->type = type; +	cp->interval = cpu_to_le16(interval); +	cp->window = cpu_to_le16(window); +} +  static int hci_le_set_ext_scan_param_sync(struct hci_dev *hdev, u8 type,  					  u16 interval, u16 window,  					  u8 own_addr_type, u8 filter_policy) @@ -2609,7 +2773,7 @@ static int hci_le_set_ext_scan_param_sync(struct hci_dev *hdev, u8 type,  	struct hci_cp_le_set_ext_scan_params *cp;  	struct hci_cp_le_scan_phy_params *phy;  	u8 data[sizeof(*cp) + sizeof(*phy) * 2]; -	u8 num_phy = 0; +	u8 num_phy = 0x00;  	cp = (void *)data;  	phy = (void *)cp->data; @@ -2619,28 +2783,64 @@ static int hci_le_set_ext_scan_param_sync(struct hci_dev *hdev, u8 type,  	cp->own_addr_type = own_addr_type;  	cp->filter_policy = filter_policy; +	/* Check if PA Sync is in progress then select the PHY based on the +	 * hci_conn.iso_qos. +	 */ +	if (hci_dev_test_flag(hdev, HCI_PA_SYNC)) { +		struct hci_cp_le_add_to_accept_list *sent; + +		sent = hci_sent_cmd_data(hdev, HCI_OP_LE_ADD_TO_ACCEPT_LIST); +		if (sent) { +			struct hci_conn *conn; + +			conn = hci_conn_hash_lookup_ba(hdev, ISO_LINK, +						       &sent->bdaddr); +			if (conn) { +				struct bt_iso_qos *qos = &conn->iso_qos; + +				if (qos->bcast.in.phy & BT_ISO_PHY_1M || +				    qos->bcast.in.phy & BT_ISO_PHY_2M) { +					cp->scanning_phys |= LE_SCAN_PHY_1M; +					hci_le_scan_phy_params(phy, type, +							       interval, +							       window); +					num_phy++; +					phy++; +				} + +				if (qos->bcast.in.phy & BT_ISO_PHY_CODED) { +					cp->scanning_phys |= LE_SCAN_PHY_CODED; +					hci_le_scan_phy_params(phy, type, +							       interval, +							       window); +					num_phy++; +					phy++; +				} + +				if (num_phy) +					goto done; +			} +		} +	} +  	if (scan_1m(hdev) || scan_2m(hdev)) {  		cp->scanning_phys |= LE_SCAN_PHY_1M; - -		phy->type = type; -		phy->interval = cpu_to_le16(interval); -		phy->window = cpu_to_le16(window); - +		hci_le_scan_phy_params(phy, type, interval, window);  		num_phy++;  		phy++;  	}  	if (scan_coded(hdev)) {  		cp->scanning_phys |= LE_SCAN_PHY_CODED; - -		phy->type = type; -		phy->interval = cpu_to_le16(interval); -		phy->window = cpu_to_le16(window); - +		hci_le_scan_phy_params(phy, type, interval, window);  		num_phy++;  		phy++;  	} +done: +	if (!num_phy) +		return -EINVAL; +  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_PARAMS,  				     sizeof(*cp) + sizeof(*phy) * num_phy,  				     data, HCI_CMD_TIMEOUT); @@ -2879,7 +3079,8 @@ int hci_update_passive_scan(struct hci_dev *hdev)  	    hci_dev_test_flag(hdev, HCI_UNREGISTER))  		return 0; -	return hci_cmd_sync_queue(hdev, update_passive_scan_sync, NULL, NULL); +	return hci_cmd_sync_queue_once(hdev, update_passive_scan_sync, NULL, +				       NULL);  }  int hci_write_sc_support_sync(struct hci_dev *hdev, u8 val) @@ -4098,16 +4299,6 @@ static int hci_le_read_accept_list_size_sync(struct hci_dev *hdev)  				     0, NULL, HCI_CMD_TIMEOUT);  } -/* Clear LE Accept List */ -static int hci_le_clear_accept_list_sync(struct hci_dev *hdev) -{ -	if (!(hdev->commands[26] & 0x80)) -		return 0; - -	return __hci_cmd_sync_status(hdev, HCI_OP_LE_CLEAR_ACCEPT_LIST, 0, NULL, -				     HCI_CMD_TIMEOUT); -} -  /* Read LE Resolving List Size */  static int hci_le_read_resolv_list_size_sync(struct hci_dev *hdev)  { @@ -4834,6 +5025,11 @@ int hci_dev_open_sync(struct hci_dev *hdev)  			hdev->sent_cmd = NULL;  		} +		if (hdev->req_skb) { +			kfree_skb(hdev->req_skb); +			hdev->req_skb = NULL; +		} +  		clear_bit(HCI_RUNNING, &hdev->flags);  		hci_sock_dev_event(hdev, HCI_DEV_CLOSE); @@ -4994,6 +5190,12 @@ int hci_dev_close_sync(struct hci_dev *hdev)  		hdev->sent_cmd = NULL;  	} +	/* Drop last request */ +	if (hdev->req_skb) { +		kfree_skb(hdev->req_skb); +		hdev->req_skb = NULL; +	} +  	clear_bit(HCI_RUNNING, &hdev->flags);  	hci_sock_dev_event(hdev, HCI_DEV_CLOSE); @@ -5403,27 +5605,33 @@ static int hci_power_off_sync(struct hci_dev *hdev)  	if (!test_bit(HCI_UP, &hdev->flags))  		return 0; +	hci_dev_set_flag(hdev, HCI_POWERING_DOWN); +  	if (test_bit(HCI_ISCAN, &hdev->flags) ||  	    test_bit(HCI_PSCAN, &hdev->flags)) {  		err = hci_write_scan_enable_sync(hdev, 0x00);  		if (err) -			return err; +			goto out;  	}  	err = hci_clear_adv_sync(hdev, NULL, false);  	if (err) -		return err; +		goto out;  	err = hci_stop_discovery_sync(hdev);  	if (err) -		return err; +		goto out;  	/* Terminated due to Power Off */  	err = hci_disconnect_all_sync(hdev, HCI_ERROR_REMOTE_POWER_OFF);  	if (err) -		return err; +		goto out; + +	err = hci_dev_close_sync(hdev); -	return hci_dev_close_sync(hdev); +out: +	hci_dev_clear_flag(hdev, HCI_POWERING_DOWN); +	return err;  }  int hci_set_powered_sync(struct hci_dev *hdev, u8 val) @@ -6161,12 +6369,21 @@ static int hci_le_ext_create_conn_sync(struct hci_dev *hdev,  					conn->conn_timeout, NULL);  } -int hci_le_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn) +static int hci_le_create_conn_sync(struct hci_dev *hdev, void *data)  {  	struct hci_cp_le_create_conn cp;  	struct hci_conn_params *params;  	u8 own_addr_type;  	int err; +	struct hci_conn *conn = data; + +	if (!hci_conn_valid(hdev, conn)) +		return -ECANCELED; + +	bt_dev_dbg(hdev, "conn %p", conn); + +	clear_bit(HCI_CONN_SCANNING, &conn->flags); +	conn->state = BT_CONNECT;  	/* If requested to connect as peripheral use directed advertising */  	if (conn->role == HCI_ROLE_SLAVE) { @@ -6484,3 +6701,125 @@ int hci_update_adv_data(struct hci_dev *hdev, u8 instance)  	return hci_cmd_sync_queue(hdev, _update_adv_data_sync,  				  UINT_PTR(instance), NULL);  } + +static int hci_acl_create_conn_sync(struct hci_dev *hdev, void *data) +{ +	struct hci_conn *conn = data; +	struct inquiry_entry *ie; +	struct hci_cp_create_conn cp; +	int err; + +	if (!hci_conn_valid(hdev, conn)) +		return -ECANCELED; + +	/* Many controllers disallow HCI Create Connection while it is doing +	 * HCI Inquiry. So we cancel the Inquiry first before issuing HCI Create +	 * Connection. This may cause the MGMT discovering state to become false +	 * without user space's request but it is okay since the MGMT Discovery +	 * APIs do not promise that discovery should be done forever. Instead, +	 * the user space monitors the status of MGMT discovering and it may +	 * request for discovery again when this flag becomes false. +	 */ +	if (test_bit(HCI_INQUIRY, &hdev->flags)) { +		err = __hci_cmd_sync_status(hdev, HCI_OP_INQUIRY_CANCEL, 0, +					    NULL, HCI_CMD_TIMEOUT); +		if (err) +			bt_dev_warn(hdev, "Failed to cancel inquiry %d", err); +	} + +	conn->state = BT_CONNECT; +	conn->out = true; +	conn->role = HCI_ROLE_MASTER; + +	conn->attempt++; + +	conn->link_policy = hdev->link_policy; + +	memset(&cp, 0, sizeof(cp)); +	bacpy(&cp.bdaddr, &conn->dst); +	cp.pscan_rep_mode = 0x02; + +	ie = hci_inquiry_cache_lookup(hdev, &conn->dst); +	if (ie) { +		if (inquiry_entry_age(ie) <= INQUIRY_ENTRY_AGE_MAX) { +			cp.pscan_rep_mode = ie->data.pscan_rep_mode; +			cp.pscan_mode     = ie->data.pscan_mode; +			cp.clock_offset   = ie->data.clock_offset | +					    cpu_to_le16(0x8000); +		} + +		memcpy(conn->dev_class, ie->data.dev_class, 3); +	} + +	cp.pkt_type = cpu_to_le16(conn->pkt_type); +	if (lmp_rswitch_capable(hdev) && !(hdev->link_mode & HCI_LM_MASTER)) +		cp.role_switch = 0x01; +	else +		cp.role_switch = 0x00; + +	return __hci_cmd_sync_status_sk(hdev, HCI_OP_CREATE_CONN, +					sizeof(cp), &cp, +					HCI_EV_CONN_COMPLETE, +					conn->conn_timeout, NULL); +} + +int hci_connect_acl_sync(struct hci_dev *hdev, struct hci_conn *conn) +{ +	return hci_cmd_sync_queue_once(hdev, hci_acl_create_conn_sync, conn, +				       NULL); +} + +static void create_le_conn_complete(struct hci_dev *hdev, void *data, int err) +{ +	struct hci_conn *conn = data; + +	bt_dev_dbg(hdev, "err %d", err); + +	if (err == -ECANCELED) +		return; + +	hci_dev_lock(hdev); + +	if (!hci_conn_valid(hdev, conn)) +		goto done; + +	if (!err) { +		hci_connect_le_scan_cleanup(conn, 0x00); +		goto done; +	} + +	/* Check if connection is still pending */ +	if (conn != hci_lookup_le_connect(hdev)) +		goto done; + +	/* Flush to make sure we send create conn cancel command if needed */ +	flush_delayed_work(&conn->le_conn_timeout); +	hci_conn_failed(conn, bt_status(err)); + +done: +	hci_dev_unlock(hdev); +} + +int hci_connect_le_sync(struct hci_dev *hdev, struct hci_conn *conn) +{ +	return hci_cmd_sync_queue_once(hdev, hci_le_create_conn_sync, conn, +				       create_le_conn_complete); +} + +int hci_cancel_connect_sync(struct hci_dev *hdev, struct hci_conn *conn) +{ +	if (conn->state != BT_OPEN) +		return -EINVAL; + +	switch (conn->type) { +	case ACL_LINK: +		return !hci_cmd_sync_dequeue_once(hdev, +						  hci_acl_create_conn_sync, +						  conn, NULL); +	case LE_LINK: +		return !hci_cmd_sync_dequeue_once(hdev, hci_le_create_conn_sync, +						  conn, create_le_conn_complete); +	} + +	return -ENOENT; +} | 
