diff options
Diffstat (limited to 'net/bluetooth/hci_core.c')
| -rw-r--r-- | net/bluetooth/hci_core.c | 170 | 
1 files changed, 125 insertions, 45 deletions
| diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 2821a42cefdc..1690ae57a09d 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -908,7 +908,7 @@ int hci_get_dev_info(void __user *arg)  	else  		flags = hdev->flags; -	strcpy(di.name, hdev->name); +	strscpy(di.name, hdev->name, sizeof(di.name));  	di.bdaddr   = hdev->bdaddr;  	di.type     = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);  	di.flags    = flags; @@ -940,20 +940,51 @@ int hci_get_dev_info(void __user *arg)  /* ---- Interface to HCI drivers ---- */ +static int hci_dev_do_poweroff(struct hci_dev *hdev) +{ +	int err; + +	BT_DBG("%s %p", hdev->name, hdev); + +	hci_req_sync_lock(hdev); + +	err = hci_set_powered_sync(hdev, false); + +	hci_req_sync_unlock(hdev); + +	return err; +} +  static int hci_rfkill_set_block(void *data, bool blocked)  {  	struct hci_dev *hdev = data; +	int err;  	BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);  	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL))  		return -EBUSY; +	if (blocked == hci_dev_test_flag(hdev, HCI_RFKILLED)) +		return 0; +  	if (blocked) {  		hci_dev_set_flag(hdev, HCI_RFKILLED); +  		if (!hci_dev_test_flag(hdev, HCI_SETUP) && -		    !hci_dev_test_flag(hdev, HCI_CONFIG)) -			hci_dev_do_close(hdev); +		    !hci_dev_test_flag(hdev, HCI_CONFIG)) { +			err = hci_dev_do_poweroff(hdev); +			if (err) { +				bt_dev_err(hdev, "Error when powering off device on rfkill (%d)", +					   err); + +				/* Make sure the device is still closed even if +				 * anything during power off sequence (eg. +				 * disconnecting devices) failed. +				 */ +				hci_dev_do_close(hdev); +			} +		}  	} else {  		hci_dev_clear_flag(hdev, HCI_RFKILLED);  	} @@ -1491,11 +1522,12 @@ static void hci_cmd_timeout(struct work_struct *work)  	struct hci_dev *hdev = container_of(work, struct hci_dev,  					    cmd_timer.work); -	if (hdev->sent_cmd) { -		struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data; -		u16 opcode = __le16_to_cpu(sent->opcode); +	if (hdev->req_skb) { +		u16 opcode = hci_skb_opcode(hdev->req_skb);  		bt_dev_err(hdev, "command 0x%4.4x tx timeout", opcode); + +		hci_cmd_sync_cancel_sync(hdev, ETIMEDOUT);  	} else {  		bt_dev_err(hdev, "command tx timeout");  	} @@ -2608,10 +2640,11 @@ int hci_register_dev(struct hci_dev *hdev)  	 */  	switch (hdev->dev_type) {  	case HCI_PRIMARY: -		id = ida_simple_get(&hci_index_ida, 0, HCI_MAX_ID, GFP_KERNEL); +		id = ida_alloc_max(&hci_index_ida, HCI_MAX_ID - 1, GFP_KERNEL);  		break;  	case HCI_AMP: -		id = ida_simple_get(&hci_index_ida, 1, HCI_MAX_ID, GFP_KERNEL); +		id = ida_alloc_range(&hci_index_ida, 1, HCI_MAX_ID - 1, +				     GFP_KERNEL);  		break;  	default:  		return -EINVAL; @@ -2710,7 +2743,7 @@ err_wqueue:  	destroy_workqueue(hdev->workqueue);  	destroy_workqueue(hdev->req_workqueue);  err: -	ida_simple_remove(&hci_index_ida, hdev->id); +	ida_free(&hci_index_ida, hdev->id);  	return error;  } @@ -2793,8 +2826,9 @@ void hci_release_dev(struct hci_dev *hdev)  	hci_dev_unlock(hdev);  	ida_destroy(&hdev->unset_handle_ida); -	ida_simple_remove(&hci_index_ida, hdev->id); +	ida_free(&hci_index_ida, hdev->id);  	kfree_skb(hdev->sent_cmd); +	kfree_skb(hdev->req_skb);  	kfree_skb(hdev->recv_event);  	kfree(hdev);  } @@ -2826,6 +2860,23 @@ int hci_unregister_suspend_notifier(struct hci_dev *hdev)  	return ret;  } +/* Cancel ongoing command synchronously: + * + * - Cancel command timer + * - Reset command counter + * - Cancel command request + */ +static void hci_cancel_cmd_sync(struct hci_dev *hdev, int err) +{ +	bt_dev_dbg(hdev, "err 0x%2.2x", err); + +	cancel_delayed_work_sync(&hdev->cmd_timer); +	cancel_delayed_work_sync(&hdev->ncmd_timer); +	atomic_set(&hdev->cmd_cnt, 1); + +	hci_cmd_sync_cancel_sync(hdev, -err); +} +  /* Suspend HCI device */  int hci_suspend_dev(struct hci_dev *hdev)  { @@ -2843,7 +2894,7 @@ int hci_suspend_dev(struct hci_dev *hdev)  		return 0;  	/* Cancel potentially blocking sync operation before suspend */ -	__hci_cmd_sync_cancel(hdev, -EHOSTDOWN); +	hci_cancel_cmd_sync(hdev, -EHOSTDOWN);  	hci_req_sync_lock(hdev);  	ret = hci_suspend_sync(hdev); @@ -3107,21 +3158,33 @@ int __hci_cmd_send(struct hci_dev *hdev, u16 opcode, u32 plen,  EXPORT_SYMBOL(__hci_cmd_send);  /* Get data from the previously sent command */ -void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode) +static void *hci_cmd_data(struct sk_buff *skb, __u16 opcode)  {  	struct hci_command_hdr *hdr; -	if (!hdev->sent_cmd) +	if (!skb || skb->len < HCI_COMMAND_HDR_SIZE)  		return NULL; -	hdr = (void *) hdev->sent_cmd->data; +	hdr = (void *)skb->data;  	if (hdr->opcode != cpu_to_le16(opcode))  		return NULL; -	BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode); +	return skb->data + HCI_COMMAND_HDR_SIZE; +} -	return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE; +/* Get data from the previously sent command */ +void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode) +{ +	void *data; + +	/* Check if opcode matches last sent command */ +	data = hci_cmd_data(hdev->sent_cmd, opcode); +	if (!data) +		/* Check if opcode matches last request */ +		data = hci_cmd_data(hdev->req_skb, opcode); + +	return data;  }  /* Get data from last received event */ @@ -4022,17 +4085,19 @@ void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status,  	if (!status && !hci_req_is_complete(hdev))  		return; +	skb = hdev->req_skb; +  	/* If this was the last command in a request the complete -	 * callback would be found in hdev->sent_cmd instead of the +	 * callback would be found in hdev->req_skb instead of the  	 * command queue (hdev->cmd_q).  	 */ -	if (bt_cb(hdev->sent_cmd)->hci.req_flags & HCI_REQ_SKB) { -		*req_complete_skb = bt_cb(hdev->sent_cmd)->hci.req_complete_skb; +	if (skb && bt_cb(skb)->hci.req_flags & HCI_REQ_SKB) { +		*req_complete_skb = bt_cb(skb)->hci.req_complete_skb;  		return;  	} -	if (bt_cb(hdev->sent_cmd)->hci.req_complete) { -		*req_complete = bt_cb(hdev->sent_cmd)->hci.req_complete; +	if (skb && bt_cb(skb)->hci.req_complete) { +		*req_complete = bt_cb(skb)->hci.req_complete;  		return;  	} @@ -4128,6 +4193,36 @@ static void hci_rx_work(struct work_struct *work)  	}  } +static void hci_send_cmd_sync(struct hci_dev *hdev, struct sk_buff *skb) +{ +	int err; + +	bt_dev_dbg(hdev, "skb %p", skb); + +	kfree_skb(hdev->sent_cmd); + +	hdev->sent_cmd = skb_clone(skb, GFP_KERNEL); +	if (!hdev->sent_cmd) { +		skb_queue_head(&hdev->cmd_q, skb); +		queue_work(hdev->workqueue, &hdev->cmd_work); +		return; +	} + +	err = hci_send_frame(hdev, skb); +	if (err < 0) { +		hci_cmd_sync_cancel_sync(hdev, err); +		return; +	} + +	if (hci_req_status_pend(hdev) && +	    !hci_dev_test_and_set_flag(hdev, HCI_CMD_PENDING)) { +		kfree_skb(hdev->req_skb); +		hdev->req_skb = skb_clone(hdev->sent_cmd, GFP_KERNEL); +	} + +	atomic_dec(&hdev->cmd_cnt); +} +  static void hci_cmd_work(struct work_struct *work)  {  	struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work); @@ -4142,30 +4237,15 @@ static void hci_cmd_work(struct work_struct *work)  		if (!skb)  			return; -		kfree_skb(hdev->sent_cmd); - -		hdev->sent_cmd = skb_clone(skb, GFP_KERNEL); -		if (hdev->sent_cmd) { -			int res; -			if (hci_req_status_pend(hdev)) -				hci_dev_set_flag(hdev, HCI_CMD_PENDING); -			atomic_dec(&hdev->cmd_cnt); +		hci_send_cmd_sync(hdev, skb); -			res = hci_send_frame(hdev, skb); -			if (res < 0) -				__hci_cmd_sync_cancel(hdev, -res); - -			rcu_read_lock(); -			if (test_bit(HCI_RESET, &hdev->flags) || -			    hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE)) -				cancel_delayed_work(&hdev->cmd_timer); -			else -				queue_delayed_work(hdev->workqueue, &hdev->cmd_timer, -						   HCI_CMD_TIMEOUT); -			rcu_read_unlock(); -		} else { -			skb_queue_head(&hdev->cmd_q, skb); -			queue_work(hdev->workqueue, &hdev->cmd_work); -		} +		rcu_read_lock(); +		if (test_bit(HCI_RESET, &hdev->flags) || +		    hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE)) +			cancel_delayed_work(&hdev->cmd_timer); +		else +			queue_delayed_work(hdev->workqueue, &hdev->cmd_timer, +					   HCI_CMD_TIMEOUT); +		rcu_read_unlock();  	}  } | 
