diff options
| -rw-r--r-- | drivers/bluetooth/btintel.c | 20 | ||||
| -rw-r--r-- | drivers/bluetooth/btusb.c | 38 | ||||
| -rw-r--r-- | drivers/bluetooth/hci_ldisc.c | 7 | ||||
| -rw-r--r-- | drivers/bluetooth/hci_serdev.c | 10 | ||||
| -rw-r--r-- | include/net/bluetooth/bluetooth.h | 1 | ||||
| -rw-r--r-- | include/net/bluetooth/hci.h | 4 | ||||
| -rw-r--r-- | include/net/bluetooth/hci_core.h | 17 | ||||
| -rw-r--r-- | include/net/bluetooth/hci_sync.h | 9 | ||||
| -rw-r--r-- | include/net/bluetooth/mgmt.h | 52 | ||||
| -rw-r--r-- | net/bluetooth/hci_conn.c | 162 | ||||
| -rw-r--r-- | net/bluetooth/hci_core.c | 68 | ||||
| -rw-r--r-- | net/bluetooth/hci_debugfs.c | 2 | ||||
| -rw-r--r-- | net/bluetooth/hci_event.c | 175 | ||||
| -rw-r--r-- | net/bluetooth/hci_request.c | 1534 | ||||
| -rw-r--r-- | net/bluetooth/hci_request.h | 53 | ||||
| -rw-r--r-- | net/bluetooth/hci_sock.c | 4 | ||||
| -rw-r--r-- | net/bluetooth/hci_sync.c | 491 | ||||
| -rw-r--r-- | net/bluetooth/hci_sysfs.c | 3 | ||||
| -rw-r--r-- | net/bluetooth/l2cap_core.c | 17 | ||||
| -rw-r--r-- | net/bluetooth/mgmt.c | 610 | ||||
| -rw-r--r-- | net/bluetooth/mgmt_util.c | 74 | ||||
| -rw-r--r-- | net/bluetooth/mgmt_util.h | 18 | ||||
| -rw-r--r-- | net/bluetooth/rfcomm/sock.c | 3 | 
23 files changed, 1701 insertions, 1671 deletions
diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c index 818681c89db8..a657e9a3e96a 100644 --- a/drivers/bluetooth/btintel.c +++ b/drivers/bluetooth/btintel.c @@ -449,6 +449,7 @@ static int btintel_version_info_tlv(struct hci_dev *hdev,  	case 0x17:	/* TyP */  	case 0x18:	/* Slr */  	case 0x19:	/* Slr-F */ +	case 0x1b:      /* Mgr */  		break;  	default:  		bt_dev_err(hdev, "Unsupported Intel hardware variant (0x%x)", @@ -2330,6 +2331,7 @@ static void btintel_set_msft_opcode(struct hci_dev *hdev, u8 hw_variant)  	case 0x17:  	case 0x18:  	case 0x19: +	case 0x1b:  		hci_set_msft_opcode(hdev, 0xFC1E);  		break;  	default: @@ -2439,15 +2441,20 @@ static int btintel_setup_combined(struct hci_dev *hdev)  					       INTEL_ROM_LEGACY_NO_WBS_SUPPORT))  				set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,  					&hdev->quirks); +			if (ver.hw_variant == 0x08 && ver.fw_variant == 0x22) +				set_bit(HCI_QUIRK_VALID_LE_STATES, +					&hdev->quirks);  			err = btintel_legacy_rom_setup(hdev, &ver);  			break;  		case 0x0b:      /* SfP */ -		case 0x0c:      /* WsP */  		case 0x11:      /* JfP */  		case 0x12:      /* ThP */  		case 0x13:      /* HrP */  		case 0x14:      /* CcP */ +			set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks); +			fallthrough; +		case 0x0c:	/* WsP */  			/* Apply the device specific HCI quirks  			 *  			 * All Legacy bootloader devices support WBS @@ -2455,11 +2462,6 @@ static int btintel_setup_combined(struct hci_dev *hdev)  			set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,  				&hdev->quirks); -			/* Valid LE States quirk for JfP/ThP familiy */ -			if (ver.hw_variant == 0x11 || ver.hw_variant == 0x12) -				set_bit(HCI_QUIRK_VALID_LE_STATES, -					&hdev->quirks); -  			/* Setup MSFT Extension support */  			btintel_set_msft_opcode(hdev, ver.hw_variant); @@ -2530,9 +2532,8 @@ static int btintel_setup_combined(struct hci_dev *hdev)  		 */  		set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks); -		/* Valid LE States quirk for JfP/ThP familiy */ -		if (ver.hw_variant == 0x11 || ver.hw_variant == 0x12) -			set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks); +		/* Set Valid LE States quirk */ +		set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);  		/* Setup MSFT Extension support */  		btintel_set_msft_opcode(hdev, ver.hw_variant); @@ -2542,6 +2543,7 @@ static int btintel_setup_combined(struct hci_dev *hdev)  	case 0x17:  	case 0x18:  	case 0x19: +	case 0x1b:  		/* Display version information of TLV type */  		btintel_version_info_tlv(hdev, &ver_tlv); diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 15caa6469538..271963805a38 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -426,6 +426,8 @@ static const struct usb_device_id blacklist_table[] = {  						     BTUSB_WIDEBAND_SPEECH },  	{ USB_DEVICE(0x04ca, 0x4006), .driver_info = BTUSB_REALTEK |  						     BTUSB_WIDEBAND_SPEECH }, +	{ USB_DEVICE(0x0cb8, 0xc549), .driver_info = BTUSB_REALTEK | +						     BTUSB_WIDEBAND_SPEECH },  	/* Realtek 8852CE Bluetooth devices */  	{ USB_DEVICE(0x04ca, 0x4007), .driver_info = BTUSB_REALTEK | @@ -438,6 +440,8 @@ static const struct usb_device_id blacklist_table[] = {  						     BTUSB_WIDEBAND_SPEECH },  	{ USB_DEVICE(0x13d3, 0x3586), .driver_info = BTUSB_REALTEK |  						     BTUSB_WIDEBAND_SPEECH }, +	{ USB_DEVICE(0x13d3, 0x3592), .driver_info = BTUSB_REALTEK | +						     BTUSB_WIDEBAND_SPEECH },  	/* Realtek Bluetooth devices */  	{ USB_VENDOR_AND_INTERFACE_INFO(0x0bda, 0xe0, 0x01, 0x01), @@ -466,6 +470,9 @@ static const struct usb_device_id blacklist_table[] = {  	{ USB_DEVICE(0x0489, 0xe0c8), .driver_info = BTUSB_MEDIATEK |  						     BTUSB_WIDEBAND_SPEECH |  						     BTUSB_VALID_LE_STATES }, +	{ USB_DEVICE(0x0489, 0xe0e0), .driver_info = BTUSB_MEDIATEK | +						     BTUSB_WIDEBAND_SPEECH | +						     BTUSB_VALID_LE_STATES },  	{ USB_DEVICE(0x04ca, 0x3802), .driver_info = BTUSB_MEDIATEK |  						     BTUSB_WIDEBAND_SPEECH |  						     BTUSB_VALID_LE_STATES }, @@ -478,9 +485,18 @@ static const struct usb_device_id blacklist_table[] = {  	{ USB_DEVICE(0x13d3, 0x3567), .driver_info = BTUSB_MEDIATEK |  						     BTUSB_WIDEBAND_SPEECH |  						     BTUSB_VALID_LE_STATES }, +	{ USB_DEVICE(0x13d3, 0x3578), .driver_info = BTUSB_MEDIATEK | +						     BTUSB_WIDEBAND_SPEECH | +						     BTUSB_VALID_LE_STATES }, +	{ USB_DEVICE(0x13d3, 0x3583), .driver_info = BTUSB_MEDIATEK | +						     BTUSB_WIDEBAND_SPEECH | +						     BTUSB_VALID_LE_STATES },  	{ USB_DEVICE(0x0489, 0xe0cd), .driver_info = BTUSB_MEDIATEK |  						     BTUSB_WIDEBAND_SPEECH |  						     BTUSB_VALID_LE_STATES }, +	{ USB_DEVICE(0x0e8d, 0x0608), .driver_info = BTUSB_MEDIATEK | +						     BTUSB_WIDEBAND_SPEECH | +						     BTUSB_VALID_LE_STATES },  	/* MediaTek MT7922A Bluetooth devices */  	{ USB_DEVICE(0x0489, 0xe0d8), .driver_info = BTUSB_MEDIATEK | @@ -516,19 +532,17 @@ static const struct usb_device_id blacklist_table[] = {  	{ USB_DEVICE(0x0bda, 0xb009), .driver_info = BTUSB_REALTEK },  	{ USB_DEVICE(0x2ff8, 0xb011), .driver_info = BTUSB_REALTEK }, -	/* Additional Realtek 8761B Bluetooth devices */ +	/* Additional Realtek 8761BUV Bluetooth devices */  	{ USB_DEVICE(0x2357, 0x0604), .driver_info = BTUSB_REALTEK |  						     BTUSB_WIDEBAND_SPEECH }, - -	/* Additional Realtek 8761BU Bluetooth devices */  	{ USB_DEVICE(0x0b05, 0x190e), .driver_info = BTUSB_REALTEK |  	  					     BTUSB_WIDEBAND_SPEECH },  	{ USB_DEVICE(0x2550, 0x8761), .driver_info = BTUSB_REALTEK |  						     BTUSB_WIDEBAND_SPEECH }, - -	/* Additional Realtek 8761BUV Bluetooth devices */  	{ USB_DEVICE(0x0bda, 0x8771), .driver_info = BTUSB_REALTEK |  						     BTUSB_WIDEBAND_SPEECH }, +	{ USB_DEVICE(0x7392, 0xc611), .driver_info = BTUSB_REALTEK | +						     BTUSB_WIDEBAND_SPEECH },  	/* Additional Realtek 8821AE Bluetooth devices */  	{ USB_DEVICE(0x0b05, 0x17dc), .driver_info = BTUSB_REALTEK }, @@ -2477,15 +2491,29 @@ static int btusb_mtk_hci_wmt_sync(struct hci_dev *hdev,  	set_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags); +	/* WMT cmd/event doesn't follow up the generic HCI cmd/event handling, +	 * it needs constantly polling control pipe until the host received the +	 * WMT event, thus, we should require to specifically acquire PM counter +	 * on the USB to prevent the interface from entering auto suspended +	 * while WMT cmd/event in progress. +	 */ +	err = usb_autopm_get_interface(data->intf); +	if (err < 0) +		goto err_free_wc; +  	err = __hci_cmd_send(hdev, 0xfc6f, hlen, wc);  	if (err < 0) {  		clear_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags); +		usb_autopm_put_interface(data->intf);  		goto err_free_wc;  	}  	/* Submit control IN URB on demand to process the WMT event */  	err = btusb_mtk_submit_wmt_recv_urb(hdev); + +	usb_autopm_put_interface(data->intf); +  	if (err < 0)  		goto err_free_wc; diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c index f537673ede17..865112e96ff9 100644 --- a/drivers/bluetooth/hci_ldisc.c +++ b/drivers/bluetooth/hci_ldisc.c @@ -493,6 +493,11 @@ static int hci_uart_tty_open(struct tty_struct *tty)  		BT_ERR("Can't allocate control structure");  		return -ENFILE;  	} +	if (percpu_init_rwsem(&hu->proto_lock)) { +		BT_ERR("Can't allocate semaphore structure"); +		kfree(hu); +		return -ENOMEM; +	}  	tty->disc_data = hu;  	hu->tty = tty; @@ -505,8 +510,6 @@ static int hci_uart_tty_open(struct tty_struct *tty)  	INIT_WORK(&hu->init_ready, hci_uart_init_work);  	INIT_WORK(&hu->write_work, hci_uart_write_work); -	percpu_init_rwsem(&hu->proto_lock); -  	/* Flush any pending characters in the driver */  	tty_driver_flush_buffer(tty); diff --git a/drivers/bluetooth/hci_serdev.c b/drivers/bluetooth/hci_serdev.c index c0e5f42ec6b7..f16fd79bc02b 100644 --- a/drivers/bluetooth/hci_serdev.c +++ b/drivers/bluetooth/hci_serdev.c @@ -310,11 +310,12 @@ int hci_uart_register_device(struct hci_uart *hu,  	serdev_device_set_client_ops(hu->serdev, &hci_serdev_client_ops); +	if (percpu_init_rwsem(&hu->proto_lock)) +		return -ENOMEM; +  	err = serdev_device_open(hu->serdev);  	if (err) -		return err; - -	percpu_init_rwsem(&hu->proto_lock); +		goto err_rwsem;  	err = p->open(hu);  	if (err) @@ -389,6 +390,8 @@ err_alloc:  	p->close(hu);  err_open:  	serdev_device_close(hu->serdev); +err_rwsem: +	percpu_free_rwsem(&hu->proto_lock);  	return err;  }  EXPORT_SYMBOL_GPL(hci_uart_register_device); @@ -410,5 +413,6 @@ void hci_uart_unregister_device(struct hci_uart *hu)  		clear_bit(HCI_UART_PROTO_READY, &hu->flags);  		serdev_device_close(hu->serdev);  	} +	percpu_free_rwsem(&hu->proto_lock);  }  EXPORT_SYMBOL_GPL(hci_uart_unregister_device); diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h index e72f3b247b5e..bcc5a4cd2c17 100644 --- a/include/net/bluetooth/bluetooth.h +++ b/include/net/bluetooth/bluetooth.h @@ -627,6 +627,7 @@ static inline bool iso_enabled(void)  int mgmt_init(void);  void mgmt_exit(void); +void mgmt_cleanup(struct sock *sk);  void bt_sock_reclassify_lock(struct sock *sk, int proto); diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index cf29511b25a8..e004ba04a9ae 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -354,6 +354,10 @@ enum {  	HCI_LE_SIMULTANEOUS_ROLES,  	HCI_CMD_DRAIN_WORKQUEUE, +	HCI_MESH_EXPERIMENTAL, +	HCI_MESH, +	HCI_MESH_SENDING, +  	__HCI_NUM_FLAGS,  }; diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index e7862903187d..c54bc71254af 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -238,6 +238,7 @@ struct adv_info {  	bool	enabled;  	bool	pending;  	bool	periodic; +	__u8	mesh;  	__u8	instance;  	__u32	flags;  	__u16	timeout; @@ -372,6 +373,8 @@ struct hci_dev {  	__u8		le_resolv_list_size;  	__u8		le_num_of_adv_sets;  	__u8		le_states[8]; +	__u8		mesh_ad_types[16]; +	__u8		mesh_send_ref;  	__u8		commands[64];  	__u8		hci_ver;  	__u16		hci_rev; @@ -511,6 +514,7 @@ struct hci_dev {  	struct list_head	cmd_sync_work_list;  	struct mutex		cmd_sync_work_lock;  	struct work_struct	cmd_sync_cancel_work; +	struct work_struct	reenable_adv_work;  	__u16			discov_timeout;  	struct delayed_work	discov_off; @@ -561,6 +565,7 @@ struct hci_dev {  	struct hci_conn_hash	conn_hash; +	struct list_head	mesh_pending;  	struct list_head	mgmt_pending;  	struct list_head	reject_list;  	struct list_head	accept_list; @@ -614,6 +619,8 @@ struct hci_dev {  	struct delayed_work	rpa_expired;  	bdaddr_t		rpa; +	struct delayed_work	mesh_send_done; +  	enum {  		INTERLEAVE_SCAN_NONE,  		INTERLEAVE_SCAN_NO_FILTER, @@ -1576,7 +1583,8 @@ struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance,  				      u32 flags, u16 adv_data_len, u8 *adv_data,  				      u16 scan_rsp_len, u8 *scan_rsp_data,  				      u16 timeout, u16 duration, s8 tx_power, -				      u32 min_interval, u32 max_interval); +				      u32 min_interval, u32 max_interval, +				      u8 mesh_handle);  struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance,  				      u32 flags, u8 data_len, u8 *data,  				      u32 min_interval, u32 max_interval); @@ -1997,6 +2005,9 @@ void hci_mgmt_chan_unregister(struct hci_mgmt_chan *c);  #define DISCOV_LE_FAST_ADV_INT_MAX	0x00F0	/* 150 msec */  #define DISCOV_LE_PER_ADV_INT_MIN	0x00A0	/* 200 msec */  #define DISCOV_LE_PER_ADV_INT_MAX	0x00A0	/* 200 msec */ +#define DISCOV_LE_ADV_MESH_MIN		0x00A0  /* 100 msec */ +#define DISCOV_LE_ADV_MESH_MAX		0x00A0  /* 100 msec */ +#define INTERVAL_TO_MS(x)		(((x) * 10) / 0x10)  #define NAME_RESOLVE_DURATION		msecs_to_jiffies(10240)	/* 10.24 sec */ @@ -2048,7 +2059,8 @@ void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status);  void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status);  void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,  		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags, -		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len); +		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len, +		       u64 instant);  void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,  		      u8 addr_type, s8 rssi, u8 *name, u8 name_len);  void mgmt_discovering(struct hci_dev *hdev, u8 discovering); @@ -2075,6 +2087,7 @@ int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip);  void mgmt_adv_monitor_device_lost(struct hci_dev *hdev, u16 handle,  				  bdaddr_t *bdaddr, u8 addr_type); +int hci_abort_conn(struct hci_conn *conn, u8 reason);  u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,  		      u16 to_multiplier);  void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand, diff --git a/include/net/bluetooth/hci_sync.h b/include/net/bluetooth/hci_sync.h index 3843f5060c73..17f5a4c32f36 100644 --- a/include/net/bluetooth/hci_sync.h +++ b/include/net/bluetooth/hci_sync.h @@ -16,6 +16,7 @@ struct hci_cmd_sync_work_entry {  	hci_cmd_sync_work_destroy_t destroy;  }; +struct adv_info;  /* Function with sync suffix shall not be called with hdev->lock held as they   * wait the command to complete and in the meantime an event could be received   * which could attempt to acquire hdev->lock causing a deadlock. @@ -51,11 +52,16 @@ int hci_update_class_sync(struct hci_dev *hdev);  int hci_update_name_sync(struct hci_dev *hdev);  int hci_write_ssp_mode_sync(struct hci_dev *hdev, u8 mode); +int hci_get_random_address(struct hci_dev *hdev, bool require_privacy, +			   bool use_rpa, struct adv_info *adv_instance, +			   u8 *own_addr_type, bdaddr_t *rand_addr); +  int hci_update_random_address_sync(struct hci_dev *hdev, bool require_privacy,  				   bool rpa, u8 *own_addr_type);  int hci_update_scan_rsp_data_sync(struct hci_dev *hdev, u8 instance);  int hci_update_adv_data_sync(struct hci_dev *hdev, u8 instance); +int hci_update_adv_data(struct hci_dev *hdev, u8 instance);  int hci_schedule_adv_instance_sync(struct hci_dev *hdev, u8 instance,  				   bool force); @@ -72,7 +78,8 @@ int hci_start_per_adv_sync(struct hci_dev *hdev, u8 instance, u8 data_len,  int hci_remove_advertising_sync(struct hci_dev *hdev, struct sock *sk,  				u8 instance, bool force);  int hci_disable_advertising_sync(struct hci_dev *hdev); - +int hci_clear_adv_instance_sync(struct hci_dev *hdev, struct sock *sk, +				u8 instance, bool force);  int hci_update_passive_scan_sync(struct hci_dev *hdev);  int hci_update_passive_scan(struct hci_dev *hdev);  int hci_read_rssi_sync(struct hci_dev *hdev, __le16 handle); diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h index 7c1ad0f6fcec..743f6f59dff8 100644 --- a/include/net/bluetooth/mgmt.h +++ b/include/net/bluetooth/mgmt.h @@ -837,6 +837,42 @@ struct mgmt_cp_add_adv_patterns_monitor_rssi {  	struct mgmt_adv_pattern patterns[];  } __packed;  #define MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE	8 +#define MGMT_OP_SET_MESH_RECEIVER		0x0057 +struct mgmt_cp_set_mesh { +	__u8   enable; +	__le16 window; +	__le16 period; +	__u8   num_ad_types; +	__u8   ad_types[]; +} __packed; +#define MGMT_SET_MESH_RECEIVER_SIZE	6 + +#define MGMT_OP_MESH_READ_FEATURES	0x0058 +#define MGMT_MESH_READ_FEATURES_SIZE	0 +#define MESH_HANDLES_MAX	3 +struct mgmt_rp_mesh_read_features { +	__le16	index; +	__u8   max_handles; +	__u8   used_handles; +	__u8   handles[MESH_HANDLES_MAX]; +} __packed; + +#define MGMT_OP_MESH_SEND		0x0059 +struct mgmt_cp_mesh_send { +	struct mgmt_addr_info addr; +	__le64  instant; +	__le16  delay; +	__u8   cnt; +	__u8   adv_data_len; +	__u8   adv_data[]; +} __packed; +#define MGMT_MESH_SEND_SIZE		19 + +#define MGMT_OP_MESH_SEND_CANCEL	0x005A +struct mgmt_cp_mesh_send_cancel { +	__u8  handle; +} __packed; +#define MGMT_MESH_SEND_CANCEL_SIZE	1  #define MGMT_EV_CMD_COMPLETE		0x0001  struct mgmt_ev_cmd_complete { @@ -1120,3 +1156,19 @@ struct mgmt_ev_adv_monitor_device_lost {  	__le16 monitor_handle;  	struct mgmt_addr_info addr;  } __packed; + +#define MGMT_EV_MESH_DEVICE_FOUND	0x0031 +struct mgmt_ev_mesh_device_found { +	struct mgmt_addr_info addr; +	__s8	rssi; +	__le64	instant; +	__le32	flags; +	__le16	eir_len; +	__u8	eir[]; +} __packed; + + +#define MGMT_EV_MESH_PACKET_CMPLT		0x0032 +struct mgmt_ev_mesh_pkt_cmplt { +	__u8	handle; +} __packed; diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 9777e7b109ee..7a59c4487050 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -44,6 +44,11 @@ struct sco_param {  	u8  retrans_effort;  }; +struct conn_handle_t { +	struct hci_conn *conn; +	__u16 handle; +}; +  static const struct sco_param esco_param_cvsd[] = {  	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x000a,	0x01 }, /* S3 */  	{ EDR_ESCO_MASK & ~ESCO_2EV3, 0x0007,	0x01 }, /* S2 */ @@ -316,17 +321,60 @@ static bool find_next_esco_param(struct hci_conn *conn,  	return conn->attempt <= size;  } -static bool hci_enhanced_setup_sync_conn(struct hci_conn *conn, __u16 handle) +static int configure_datapath_sync(struct hci_dev *hdev, struct bt_codec *codec)  { -	struct hci_dev *hdev = conn->hdev; +	int err; +	__u8 vnd_len, *vnd_data = NULL; +	struct hci_op_configure_data_path *cmd = NULL; + +	err = hdev->get_codec_config_data(hdev, ESCO_LINK, codec, &vnd_len, +					  &vnd_data); +	if (err < 0) +		goto error; + +	cmd = kzalloc(sizeof(*cmd) + vnd_len, GFP_KERNEL); +	if (!cmd) { +		err = -ENOMEM; +		goto error; +	} + +	err = hdev->get_data_path_id(hdev, &cmd->data_path_id); +	if (err < 0) +		goto error; + +	cmd->vnd_len = vnd_len; +	memcpy(cmd->vnd_data, vnd_data, vnd_len); + +	cmd->direction = 0x00; +	__hci_cmd_sync_status(hdev, HCI_CONFIGURE_DATA_PATH, +			      sizeof(*cmd) + vnd_len, cmd, HCI_CMD_TIMEOUT); + +	cmd->direction = 0x01; +	err = __hci_cmd_sync_status(hdev, HCI_CONFIGURE_DATA_PATH, +				    sizeof(*cmd) + vnd_len, cmd, +				    HCI_CMD_TIMEOUT); +error: + +	kfree(cmd); +	kfree(vnd_data); +	return err; +} + +static int hci_enhanced_setup_sync(struct hci_dev *hdev, void *data) +{ +	struct conn_handle_t *conn_handle = data; +	struct hci_conn *conn = conn_handle->conn; +	__u16 handle = conn_handle->handle;  	struct hci_cp_enhanced_setup_sync_conn cp;  	const struct sco_param *param; +	kfree(conn_handle); +  	bt_dev_dbg(hdev, "hcon %p", conn);  	/* for offload use case, codec needs to configured before opening SCO */  	if (conn->codec.data_path) -		hci_req_configure_datapath(hdev, &conn->codec); +		configure_datapath_sync(hdev, &conn->codec);  	conn->state = BT_CONNECT;  	conn->out = true; @@ -344,7 +392,7 @@ static bool hci_enhanced_setup_sync_conn(struct hci_conn *conn, __u16 handle)  	case BT_CODEC_MSBC:  		if (!find_next_esco_param(conn, esco_param_msbc,  					  ARRAY_SIZE(esco_param_msbc))) -			return false; +			return -EINVAL;  		param = &esco_param_msbc[conn->attempt - 1];  		cp.tx_coding_format.id = 0x05; @@ -396,11 +444,11 @@ static bool hci_enhanced_setup_sync_conn(struct hci_conn *conn, __u16 handle)  		if (lmp_esco_capable(conn->link)) {  			if (!find_next_esco_param(conn, esco_param_cvsd,  						  ARRAY_SIZE(esco_param_cvsd))) -				return false; +				return -EINVAL;  			param = &esco_param_cvsd[conn->attempt - 1];  		} else {  			if (conn->attempt > ARRAY_SIZE(sco_param_cvsd)) -				return false; +				return -EINVAL;  			param = &sco_param_cvsd[conn->attempt - 1];  		}  		cp.tx_coding_format.id = 2; @@ -423,7 +471,7 @@ static bool hci_enhanced_setup_sync_conn(struct hci_conn *conn, __u16 handle)  		cp.out_transport_unit_size = 16;  		break;  	default: -		return false; +		return -EINVAL;  	}  	cp.retrans_effort = param->retrans_effort; @@ -431,9 +479,9 @@ static bool hci_enhanced_setup_sync_conn(struct hci_conn *conn, __u16 handle)  	cp.max_latency = __cpu_to_le16(param->max_latency);  	if (hci_send_cmd(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN, sizeof(cp), &cp) < 0) -		return false; +		return -EIO; -	return true; +	return 0;  }  static bool hci_setup_sync_conn(struct hci_conn *conn, __u16 handle) @@ -490,8 +538,24 @@ static bool hci_setup_sync_conn(struct hci_conn *conn, __u16 handle)  bool hci_setup_sync(struct hci_conn *conn, __u16 handle)  { -	if (enhanced_sync_conn_capable(conn->hdev)) -		return hci_enhanced_setup_sync_conn(conn, handle); +	int result; +	struct conn_handle_t *conn_handle; + +	if (enhanced_sync_conn_capable(conn->hdev)) { +		conn_handle = kzalloc(sizeof(*conn_handle), GFP_KERNEL); + +		if (!conn_handle) +			return false; + +		conn_handle->conn = conn; +		conn_handle->handle = handle; +		result = hci_cmd_sync_queue(conn->hdev, hci_enhanced_setup_sync, +					    conn_handle, NULL); +		if (result < 0) +			kfree(conn_handle); + +		return result == 0; +	}  	return hci_setup_sync_conn(conn, handle);  } @@ -2696,3 +2760,79 @@ u32 hci_conn_get_phy(struct hci_conn *conn)  	return phys;  } + +int hci_abort_conn(struct hci_conn *conn, u8 reason) +{ +	int r = 0; + +	switch (conn->state) { +	case BT_CONNECTED: +	case BT_CONFIG: +		if (conn->type == AMP_LINK) { +			struct hci_cp_disconn_phy_link cp; + +			cp.phy_handle = HCI_PHY_HANDLE(conn->handle); +			cp.reason = reason; +			r = hci_send_cmd(conn->hdev, HCI_OP_DISCONN_PHY_LINK, +					 sizeof(cp), &cp); +		} else { +			struct hci_cp_disconnect dc; + +			dc.handle = cpu_to_le16(conn->handle); +			dc.reason = reason; +			r = hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, +					 sizeof(dc), &dc); +		} + +		conn->state = BT_DISCONN; + +		break; +	case BT_CONNECT: +		if (conn->type == LE_LINK) { +			if (test_bit(HCI_CONN_SCANNING, &conn->flags)) +				break; +			r = hci_send_cmd(conn->hdev, +					 HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL); +		} else if (conn->type == ACL_LINK) { +			if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2) +				break; +			r = hci_send_cmd(conn->hdev, +					 HCI_OP_CREATE_CONN_CANCEL, +					 6, &conn->dst); +		} +		break; +	case BT_CONNECT2: +		if (conn->type == ACL_LINK) { +			struct hci_cp_reject_conn_req rej; + +			bacpy(&rej.bdaddr, &conn->dst); +			rej.reason = reason; + +			r = hci_send_cmd(conn->hdev, +					 HCI_OP_REJECT_CONN_REQ, +					 sizeof(rej), &rej); +		} else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) { +			struct hci_cp_reject_sync_conn_req rej; + +			bacpy(&rej.bdaddr, &conn->dst); + +			/* SCO rejection has its own limited set of +			 * allowed error values (0x0D-0x0F) which isn't +			 * compatible with most values passed to this +			 * function. To be safe hard-code one of the +			 * values that's suitable for SCO. +			 */ +			rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES; + +			r = hci_send_cmd(conn->hdev, +					 HCI_OP_REJECT_SYNC_CONN_REQ, +					 sizeof(rej), &rej); +		} +		break; +	default: +		conn->state = BT_CLOSED; +		break; +	} + +	return r; +} diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index b3a5a3cc9372..0540555b3704 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -597,6 +597,15 @@ static int hci_dev_do_reset(struct hci_dev *hdev)  	/* Cancel these to avoid queueing non-chained pending work */  	hci_dev_set_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE); +	/* Wait for +	 * +	 *    if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE)) +	 *        queue_delayed_work(&hdev->{cmd,ncmd}_timer) +	 * +	 * inside RCU section to see the flag or complete scheduling. +	 */ +	synchronize_rcu(); +	/* Explicitly cancel works in case scheduled after setting the flag. */  	cancel_delayed_work(&hdev->cmd_timer);  	cancel_delayed_work(&hdev->ncmd_timer); @@ -714,7 +723,7 @@ static void hci_update_passive_scan_state(struct hci_dev *hdev, u8 scan)  		hci_dev_set_flag(hdev, HCI_BREDR_ENABLED);  		if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) -			hci_req_update_adv_data(hdev, hdev->cur_adv_instance); +			hci_update_adv_data(hdev, hdev->cur_adv_instance);  		mgmt_new_settings(hdev);  	} @@ -1706,7 +1715,8 @@ struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance,  				      u32 flags, u16 adv_data_len, u8 *adv_data,  				      u16 scan_rsp_len, u8 *scan_rsp_data,  				      u16 timeout, u16 duration, s8 tx_power, -				      u32 min_interval, u32 max_interval) +				      u32 min_interval, u32 max_interval, +				      u8 mesh_handle)  {  	struct adv_info *adv; @@ -1717,7 +1727,7 @@ struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance,  		memset(adv->per_adv_data, 0, sizeof(adv->per_adv_data));  	} else {  		if (hdev->adv_instance_cnt >= hdev->le_num_of_adv_sets || -		    instance < 1 || instance > hdev->le_num_of_adv_sets) +		    instance < 1 || instance > hdev->le_num_of_adv_sets + 1)  			return ERR_PTR(-EOVERFLOW);  		adv = kzalloc(sizeof(*adv), GFP_KERNEL); @@ -1734,6 +1744,11 @@ struct adv_info *hci_add_adv_instance(struct hci_dev *hdev, u8 instance,  	adv->min_interval = min_interval;  	adv->max_interval = max_interval;  	adv->tx_power = tx_power; +	/* Defining a mesh_handle changes the timing units to ms, +	 * rather than seconds, and ties the instance to the requested +	 * mesh_tx queue. +	 */ +	adv->mesh = mesh_handle;  	hci_set_adv_instance_data(hdev, instance, adv_data_len, adv_data,  				  scan_rsp_len, scan_rsp_data); @@ -1762,7 +1777,7 @@ struct adv_info *hci_add_per_instance(struct hci_dev *hdev, u8 instance,  	adv = hci_add_adv_instance(hdev, instance, flags, 0, NULL, 0, NULL,  				   0, 0, HCI_ADV_TX_POWER_NO_PREFERENCE, -				   min_interval, max_interval); +				   min_interval, max_interval, 0);  	if (IS_ERR(adv))  		return adv; @@ -2391,6 +2406,10 @@ static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,  		container_of(nb, struct hci_dev, suspend_notifier);  	int ret = 0; +	/* Userspace has full control of this device. Do nothing. */ +	if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) +		return NOTIFY_DONE; +  	if (action == PM_SUSPEND_PREPARE)  		ret = hci_suspend_dev(hdev);  	else if (action == PM_POST_SUSPEND) @@ -2486,6 +2505,7 @@ struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)  	mutex_init(&hdev->lock);  	mutex_init(&hdev->req_lock); +	INIT_LIST_HEAD(&hdev->mesh_pending);  	INIT_LIST_HEAD(&hdev->mgmt_pending);  	INIT_LIST_HEAD(&hdev->reject_list);  	INIT_LIST_HEAD(&hdev->accept_list); @@ -3469,15 +3489,27 @@ static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)  	return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);  } -static void __check_timeout(struct hci_dev *hdev, unsigned int cnt) +static void __check_timeout(struct hci_dev *hdev, unsigned int cnt, u8 type)  { -	if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { -		/* ACL tx timeout must be longer than maximum -		 * link supervision timeout (40.9 seconds) */ -		if (!cnt && time_after(jiffies, hdev->acl_last_tx + -				       HCI_ACL_TX_TIMEOUT)) -			hci_link_tx_to(hdev, ACL_LINK); +	unsigned long last_tx; + +	if (hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) +		return; + +	switch (type) { +	case LE_LINK: +		last_tx = hdev->le_last_tx; +		break; +	default: +		last_tx = hdev->acl_last_tx; +		break;  	} + +	/* tx timeout must be longer than maximum link supervision timeout +	 * (40.9 seconds) +	 */ +	if (!cnt && time_after(jiffies, last_tx + HCI_ACL_TX_TIMEOUT)) +		hci_link_tx_to(hdev, type);  }  /* Schedule SCO */ @@ -3535,7 +3567,7 @@ static void hci_sched_acl_pkt(struct hci_dev *hdev)  	struct sk_buff *skb;  	int quote; -	__check_timeout(hdev, cnt); +	__check_timeout(hdev, cnt, ACL_LINK);  	while (hdev->acl_cnt &&  	       (chan = hci_chan_sent(hdev, ACL_LINK, "e))) { @@ -3578,8 +3610,6 @@ static void hci_sched_acl_blk(struct hci_dev *hdev)  	int quote;  	u8 type; -	__check_timeout(hdev, cnt); -  	BT_DBG("%s", hdev->name);  	if (hdev->dev_type == HCI_AMP) @@ -3587,6 +3617,8 @@ static void hci_sched_acl_blk(struct hci_dev *hdev)  	else  		type = ACL_LINK; +	__check_timeout(hdev, cnt, type); +  	while (hdev->block_cnt > 0 &&  	       (chan = hci_chan_sent(hdev, type, "e))) {  		u32 priority = (skb_peek(&chan->data_q))->priority; @@ -3660,7 +3692,7 @@ static void hci_sched_le(struct hci_dev *hdev)  	cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt; -	__check_timeout(hdev, cnt); +	__check_timeout(hdev, cnt, LE_LINK);  	tmp = cnt;  	while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) { @@ -4056,12 +4088,14 @@ static void hci_cmd_work(struct work_struct *work)  			if (res < 0)  				__hci_cmd_sync_cancel(hdev, -res); +			rcu_read_lock();  			if (test_bit(HCI_RESET, &hdev->flags) ||  			    hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE))  				cancel_delayed_work(&hdev->cmd_timer);  			else -				schedule_delayed_work(&hdev->cmd_timer, -						      HCI_CMD_TIMEOUT); +				queue_delayed_work(hdev->workqueue, &hdev->cmd_timer, +						   HCI_CMD_TIMEOUT); +			rcu_read_unlock();  		} else {  			skb_queue_head(&hdev->cmd_q, skb);  			queue_work(hdev->workqueue, &hdev->cmd_work); diff --git a/net/bluetooth/hci_debugfs.c b/net/bluetooth/hci_debugfs.c index 902b40a90b91..3f401ec5bb0c 100644 --- a/net/bluetooth/hci_debugfs.c +++ b/net/bluetooth/hci_debugfs.c @@ -1245,7 +1245,7 @@ void hci_debugfs_create_conn(struct hci_conn *conn)  	struct hci_dev *hdev = conn->hdev;  	char name[6]; -	if (IS_ERR_OR_NULL(hdev->debugfs)) +	if (IS_ERR_OR_NULL(hdev->debugfs) || conn->debugfs)  		return;  	snprintf(name, sizeof(name), "%u", conn->handle); diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index 6643c9c20fa4..faca701bce2a 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -712,6 +712,47 @@ static u8 hci_cc_read_local_version(struct hci_dev *hdev, void *data,  	return rp->status;  } +static u8 hci_cc_read_enc_key_size(struct hci_dev *hdev, void *data, +				   struct sk_buff *skb) +{ +	struct hci_rp_read_enc_key_size *rp = data; +	struct hci_conn *conn; +	u16 handle; +	u8 status = rp->status; + +	bt_dev_dbg(hdev, "status 0x%2.2x", status); + +	handle = le16_to_cpu(rp->handle); + +	hci_dev_lock(hdev); + +	conn = hci_conn_hash_lookup_handle(hdev, handle); +	if (!conn) { +		status = 0xFF; +		goto done; +	} + +	/* While unexpected, the read_enc_key_size command may fail. The most +	 * secure approach is to then assume the key size is 0 to force a +	 * disconnection. +	 */ +	if (status) { +		bt_dev_err(hdev, "failed to read key size for handle %u", +			   handle); +		conn->enc_key_size = 0; +	} else { +		conn->enc_key_size = rp->key_size; +		status = 0; +	} + +	hci_encrypt_cfm(conn, 0); + +done: +	hci_dev_unlock(hdev); + +	return status; +} +  static u8 hci_cc_read_local_commands(struct hci_dev *hdev, void *data,  				     struct sk_buff *skb)  { @@ -1715,6 +1756,8 @@ static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)  		hci_dev_set_flag(hdev, HCI_LE_SCAN);  		if (hdev->le_scan_type == LE_SCAN_ACTIVE)  			clear_pending_adv_report(hdev); +		if (hci_dev_test_flag(hdev, HCI_MESH)) +			hci_discovery_set_state(hdev, DISCOVERY_FINDING);  		break;  	case LE_SCAN_DISABLE: @@ -1729,7 +1772,7 @@ static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)  					  d->last_adv_addr_type, NULL,  					  d->last_adv_rssi, d->last_adv_flags,  					  d->last_adv_data, -					  d->last_adv_data_len, NULL, 0); +					  d->last_adv_data_len, NULL, 0, 0);  		}  		/* Cancel this timer so that we don't try to disable scanning @@ -1745,6 +1788,9 @@ static void le_set_scan_enable_complete(struct hci_dev *hdev, u8 enable)  		 */  		if (hci_dev_test_and_clear_flag(hdev, HCI_LE_SCAN_INTERRUPTED))  			hci_discovery_set_state(hdev, DISCOVERY_STOPPED); +		else if (!hci_dev_test_flag(hdev, HCI_LE_ADV) && +			 hdev->discovery.state == DISCOVERY_FINDING) +			queue_work(hdev->workqueue, &hdev->reenable_adv_work);  		break; @@ -2152,7 +2198,7 @@ static u8 hci_cc_set_ext_adv_param(struct hci_dev *hdev, void *data,  			adv_instance->tx_power = rp->tx_power;  	}  	/* Update adv data as tx power is known now */ -	hci_req_update_adv_data(hdev, cp->handle); +	hci_update_adv_data(hdev, cp->handle);  	hci_dev_unlock(hdev); @@ -3071,7 +3117,7 @@ static void hci_inquiry_result_evt(struct hci_dev *hdev, void *edata,  		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,  				  info->dev_class, HCI_RSSI_INVALID, -				  flags, NULL, 0, NULL, 0); +				  flags, NULL, 0, NULL, 0, 0);  	}  	hci_dev_unlock(hdev); @@ -3534,47 +3580,6 @@ unlock:  	hci_dev_unlock(hdev);  } -static void read_enc_key_size_complete(struct hci_dev *hdev, u8 status, -				       u16 opcode, struct sk_buff *skb) -{ -	const struct hci_rp_read_enc_key_size *rp; -	struct hci_conn *conn; -	u16 handle; - -	BT_DBG("%s status 0x%02x", hdev->name, status); - -	if (!skb || skb->len < sizeof(*rp)) { -		bt_dev_err(hdev, "invalid read key size response"); -		return; -	} - -	rp = (void *)skb->data; -	handle = le16_to_cpu(rp->handle); - -	hci_dev_lock(hdev); - -	conn = hci_conn_hash_lookup_handle(hdev, handle); -	if (!conn) -		goto unlock; - -	/* While unexpected, the read_enc_key_size command may fail. The most -	 * secure approach is to then assume the key size is 0 to force a -	 * disconnection. -	 */ -	if (rp->status) { -		bt_dev_err(hdev, "failed to read key size for handle %u", -			   handle); -		conn->enc_key_size = 0; -	} else { -		conn->enc_key_size = rp->key_size; -	} - -	hci_encrypt_cfm(conn, 0); - -unlock: -	hci_dev_unlock(hdev); -} -  static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data,  				   struct sk_buff *skb)  { @@ -3639,7 +3644,6 @@ static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data,  	/* Try reading the encryption key size for encrypted ACL links */  	if (!ev->status && ev->encrypt && conn->type == ACL_LINK) {  		struct hci_cp_read_enc_key_size cp; -		struct hci_request req;  		/* Only send HCI_Read_Encryption_Key_Size if the  		 * controller really supports it. If it doesn't, assume @@ -3650,12 +3654,9 @@ static void hci_encrypt_change_evt(struct hci_dev *hdev, void *data,  			goto notify;  		} -		hci_req_init(&req, hdev); -  		cp.handle = cpu_to_le16(conn->handle); -		hci_req_add(&req, HCI_OP_READ_ENC_KEY_SIZE, sizeof(cp), &cp); - -		if (hci_req_run_skb(&req, read_enc_key_size_complete)) { +		if (hci_send_cmd(hdev, HCI_OP_READ_ENC_KEY_SIZE, +				 sizeof(cp), &cp)) {  			bt_dev_err(hdev, "sending read key size failed");  			conn->enc_key_size = HCI_LINK_KEY_SIZE;  			goto notify; @@ -3766,16 +3767,18 @@ static inline void handle_cmd_cnt_and_timer(struct hci_dev *hdev, u8 ncmd)  {  	cancel_delayed_work(&hdev->cmd_timer); +	rcu_read_lock();  	if (!test_bit(HCI_RESET, &hdev->flags)) {  		if (ncmd) {  			cancel_delayed_work(&hdev->ncmd_timer);  			atomic_set(&hdev->cmd_cnt, 1);  		} else {  			if (!hci_dev_test_flag(hdev, HCI_CMD_DRAIN_WORKQUEUE)) -				schedule_delayed_work(&hdev->ncmd_timer, -						      HCI_NCMD_TIMEOUT); +				queue_delayed_work(hdev->workqueue, &hdev->ncmd_timer, +						   HCI_NCMD_TIMEOUT);  		}  	} +	rcu_read_unlock();  }  static u8 hci_cc_le_read_buffer_size_v2(struct hci_dev *hdev, void *data, @@ -4037,6 +4040,8 @@ static const struct hci_cc {  	       sizeof(struct hci_rp_read_local_amp_info)),  	HCI_CC(HCI_OP_READ_CLOCK, hci_cc_read_clock,  	       sizeof(struct hci_rp_read_clock)), +	HCI_CC(HCI_OP_READ_ENC_KEY_SIZE, hci_cc_read_enc_key_size, +	       sizeof(struct hci_rp_read_enc_key_size)),  	HCI_CC(HCI_OP_READ_INQ_RSP_TX_POWER, hci_cc_read_inq_rsp_tx_power,  	       sizeof(struct hci_rp_read_inq_rsp_tx_power)),  	HCI_CC(HCI_OP_READ_DEF_ERR_DATA_REPORTING, @@ -4829,7 +4834,7 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, void *edata,  			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,  					  info->dev_class, info->rssi, -					  flags, NULL, 0, NULL, 0); +					  flags, NULL, 0, NULL, 0, 0);  		}  	} else if (skb->len == array_size(ev->num,  					  sizeof(struct inquiry_info_rssi))) { @@ -4860,7 +4865,7 @@ static void hci_inquiry_result_with_rssi_evt(struct hci_dev *hdev, void *edata,  			mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,  					  info->dev_class, info->rssi, -					  flags, NULL, 0, NULL, 0); +					  flags, NULL, 0, NULL, 0, 0);  		}  	} else {  		bt_dev_err(hdev, "Malformed HCI Event: 0x%2.2x", @@ -5116,7 +5121,7 @@ static void hci_extended_inquiry_result_evt(struct hci_dev *hdev, void *edata,  		mgmt_device_found(hdev, &info->bdaddr, ACL_LINK, 0x00,  				  info->dev_class, info->rssi, -				  flags, info->data, eir_len, NULL, 0); +				  flags, info->data, eir_len, NULL, 0, 0);  	}  	hci_dev_unlock(hdev); @@ -6172,7 +6177,7 @@ static struct hci_conn *check_pending_le_conn(struct hci_dev *hdev,  static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,  			       u8 bdaddr_type, bdaddr_t *direct_addr,  			       u8 direct_addr_type, s8 rssi, u8 *data, u8 len, -			       bool ext_adv) +			       bool ext_adv, bool ctl_time, u64 instant)  {  	struct discovery_state *d = &hdev->discovery;  	struct smp_irk *irk; @@ -6220,7 +6225,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,  	 * important to see if the address is matching the local  	 * controller address.  	 */ -	if (direct_addr) { +	if (!hci_dev_test_flag(hdev, HCI_MESH) && direct_addr) {  		direct_addr_type = ev_bdaddr_type(hdev, direct_addr_type,  						  &bdaddr_resolved); @@ -6268,6 +6273,18 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,  		conn->le_adv_data_len = len;  	} +	if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND) +		flags = MGMT_DEV_FOUND_NOT_CONNECTABLE; +	else +		flags = 0; + +	/* All scan results should be sent up for Mesh systems */ +	if (hci_dev_test_flag(hdev, HCI_MESH)) { +		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, +				  rssi, flags, data, len, NULL, 0, instant); +		return; +	} +  	/* Passive scanning shouldn't trigger any device found events,  	 * except for devices marked as CONN_REPORT for which we do send  	 * device found events, or advertisement monitoring requested. @@ -6281,12 +6298,8 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,  		    idr_is_empty(&hdev->adv_monitors_idr))  			return; -		if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND) -			flags = MGMT_DEV_FOUND_NOT_CONNECTABLE; -		else -			flags = 0;  		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, -				  rssi, flags, data, len, NULL, 0); +				  rssi, flags, data, len, NULL, 0, 0);  		return;  	} @@ -6305,11 +6318,8 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,  	 * and just sends a scan response event, then it is marked as  	 * not connectable as well.  	 */ -	if (type == LE_ADV_NONCONN_IND || type == LE_ADV_SCAN_IND || -	    type == LE_ADV_SCAN_RSP) +	if (type == LE_ADV_SCAN_RSP)  		flags = MGMT_DEV_FOUND_NOT_CONNECTABLE; -	else -		flags = 0;  	/* If there's nothing pending either store the data from this  	 * event or send an immediate device found event if the data @@ -6326,7 +6336,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,  		}  		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, -				  rssi, flags, data, len, NULL, 0); +				  rssi, flags, data, len, NULL, 0, 0);  		return;  	} @@ -6345,7 +6355,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,  					  d->last_adv_addr_type, NULL,  					  d->last_adv_rssi, d->last_adv_flags,  					  d->last_adv_data, -					  d->last_adv_data_len, NULL, 0); +					  d->last_adv_data_len, NULL, 0, 0);  		/* If the new report will trigger a SCAN_REQ store it for  		 * later merging. @@ -6362,7 +6372,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,  		 */  		clear_pending_adv_report(hdev);  		mgmt_device_found(hdev, bdaddr, LE_LINK, bdaddr_type, NULL, -				  rssi, flags, data, len, NULL, 0); +				  rssi, flags, data, len, NULL, 0, 0);  		return;  	} @@ -6372,7 +6382,7 @@ static void process_adv_report(struct hci_dev *hdev, u8 type, bdaddr_t *bdaddr,  	 */  	mgmt_device_found(hdev, &d->last_adv_addr, LE_LINK,  			  d->last_adv_addr_type, NULL, rssi, d->last_adv_flags, -			  d->last_adv_data, d->last_adv_data_len, data, len); +			  d->last_adv_data, d->last_adv_data_len, data, len, 0);  	clear_pending_adv_report(hdev);  } @@ -6380,6 +6390,7 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data,  				  struct sk_buff *skb)  {  	struct hci_ev_le_advertising_report *ev = data; +	u64 instant = jiffies;  	if (!ev->num)  		return; @@ -6404,7 +6415,8 @@ static void hci_le_adv_report_evt(struct hci_dev *hdev, void *data,  			rssi = info->data[info->length];  			process_adv_report(hdev, info->type, &info->bdaddr,  					   info->bdaddr_type, NULL, 0, rssi, -					   info->data, info->length, false); +					   info->data, info->length, false, +					   false, instant);  		} else {  			bt_dev_err(hdev, "Dropping invalid advertising data");  		} @@ -6461,6 +6473,7 @@ static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data,  				      struct sk_buff *skb)  {  	struct hci_ev_le_ext_adv_report *ev = data; +	u64 instant = jiffies;  	if (!ev->num)  		return; @@ -6487,7 +6500,8 @@ static void hci_le_ext_adv_report_evt(struct hci_dev *hdev, void *data,  			process_adv_report(hdev, legacy_evt_type, &info->bdaddr,  					   info->bdaddr_type, NULL, 0,  					   info->rssi, info->data, info->length, -					   !(evt_type & LE_EXT_ADV_LEGACY_PDU)); +					   !(evt_type & LE_EXT_ADV_LEGACY_PDU), +					   false, instant);  		}  	} @@ -6710,6 +6724,7 @@ static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, void *data,  					 struct sk_buff *skb)  {  	struct hci_ev_le_direct_adv_report *ev = data; +	u64 instant = jiffies;  	int i;  	if (!hci_le_ev_skb_pull(hdev, skb, HCI_EV_LE_DIRECT_ADV_REPORT, @@ -6727,7 +6742,7 @@ static void hci_le_direct_adv_report_evt(struct hci_dev *hdev, void *data,  		process_adv_report(hdev, info->type, &info->bdaddr,  				   info->bdaddr_type, &info->direct_addr,  				   info->direct_addr_type, info->rssi, NULL, 0, -				   false); +				   false, false, instant);  	}  	hci_dev_unlock(hdev); @@ -6776,6 +6791,13 @@ static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data,  		goto unlock;  	} +	if (conn->type != ISO_LINK) { +		bt_dev_err(hdev, +			   "Invalid connection link type handle 0x%4.4x", +			   handle); +		goto unlock; +	} +  	if (conn->role == HCI_ROLE_SLAVE) {  		__le32 interval; @@ -6896,6 +6918,13 @@ static void hci_le_create_big_complete_evt(struct hci_dev *hdev, void *data,  	if (!conn)  		goto unlock; +	if (conn->type != ISO_LINK) { +		bt_dev_err(hdev, +			   "Invalid connection link type handle 0x%2.2x", +			   ev->handle); +		goto unlock; +	} +  	if (ev->num_bis)  		conn->handle = __le16_to_cpu(ev->bis_handle[0]); diff --git a/net/bluetooth/hci_request.c b/net/bluetooth/hci_request.c index e64d558e5d69..5a0296a4352e 100644 --- a/net/bluetooth/hci_request.c +++ b/net/bluetooth/hci_request.c @@ -269,42 +269,10 @@ void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,  void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,  		 const void *param)  { +	bt_dev_err(req->hdev, "HCI_REQ-0x%4.4x", opcode);  	hci_req_add_ev(req, opcode, plen, param, 0);  } -void __hci_req_write_fast_connectable(struct hci_request *req, bool enable) -{ -	struct hci_dev *hdev = req->hdev; -	struct hci_cp_write_page_scan_activity acp; -	u8 type; - -	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) -		return; - -	if (hdev->hci_ver < BLUETOOTH_VER_1_2) -		return; - -	if (enable) { -		type = PAGE_SCAN_TYPE_INTERLACED; - -		/* 160 msec page scan interval */ -		acp.interval = cpu_to_le16(0x0100); -	} else { -		type = hdev->def_page_scan_type; -		acp.interval = cpu_to_le16(hdev->def_page_scan_int); -	} - -	acp.window = cpu_to_le16(hdev->def_page_scan_window); - -	if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval || -	    __cpu_to_le16(hdev->page_scan_window) != acp.window) -		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY, -			    sizeof(acp), &acp); - -	if (hdev->page_scan_type != type) -		hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type); -} -  static void start_interleave_scan(struct hci_dev *hdev)  {  	hdev->interleave_scan_state = INTERLEAVE_SCAN_NO_FILTER; @@ -357,45 +325,6 @@ static bool __hci_update_interleaved_scan(struct hci_dev *hdev)  	return false;  } -void __hci_req_update_name(struct hci_request *req) -{ -	struct hci_dev *hdev = req->hdev; -	struct hci_cp_write_local_name cp; - -	memcpy(cp.name, hdev->dev_name, sizeof(cp.name)); - -	hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp); -} - -void __hci_req_update_eir(struct hci_request *req) -{ -	struct hci_dev *hdev = req->hdev; -	struct hci_cp_write_eir cp; - -	if (!hdev_is_powered(hdev)) -		return; - -	if (!lmp_ext_inq_capable(hdev)) -		return; - -	if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) -		return; - -	if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE)) -		return; - -	memset(&cp, 0, sizeof(cp)); - -	eir_create(hdev, cp.data); - -	if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0) -		return; - -	memcpy(hdev->eir, cp.data, sizeof(cp.data)); - -	hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp); -} -  void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn)  {  	struct hci_dev *hdev = req->hdev; @@ -721,6 +650,96 @@ static inline bool hci_is_le_conn_scanning(struct hci_dev *hdev)  	return false;  } +static void set_random_addr(struct hci_request *req, bdaddr_t *rpa); +static int hci_update_random_address(struct hci_request *req, +				     bool require_privacy, bool use_rpa, +				     u8 *own_addr_type) +{ +	struct hci_dev *hdev = req->hdev; +	int err; + +	/* If privacy is enabled use a resolvable private address. If +	 * current RPA has expired or there is something else than +	 * the current RPA in use, then generate a new one. +	 */ +	if (use_rpa) { +		/* If Controller supports LL Privacy use own address type is +		 * 0x03 +		 */ +		if (use_ll_privacy(hdev)) +			*own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED; +		else +			*own_addr_type = ADDR_LE_DEV_RANDOM; + +		if (rpa_valid(hdev)) +			return 0; + +		err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa); +		if (err < 0) { +			bt_dev_err(hdev, "failed to generate new RPA"); +			return err; +		} + +		set_random_addr(req, &hdev->rpa); + +		return 0; +	} + +	/* In case of required privacy without resolvable private address, +	 * use an non-resolvable private address. This is useful for active +	 * scanning and non-connectable advertising. +	 */ +	if (require_privacy) { +		bdaddr_t nrpa; + +		while (true) { +			/* The non-resolvable private address is generated +			 * from random six bytes with the two most significant +			 * bits cleared. +			 */ +			get_random_bytes(&nrpa, 6); +			nrpa.b[5] &= 0x3f; + +			/* The non-resolvable private address shall not be +			 * equal to the public address. +			 */ +			if (bacmp(&hdev->bdaddr, &nrpa)) +				break; +		} + +		*own_addr_type = ADDR_LE_DEV_RANDOM; +		set_random_addr(req, &nrpa); +		return 0; +	} + +	/* If forcing static address is in use or there is no public +	 * address use the static address as random address (but skip +	 * the HCI command if the current random address is already the +	 * static one. +	 * +	 * In case BR/EDR has been disabled on a dual-mode controller +	 * and a static address has been configured, then use that +	 * address instead of the public BR/EDR address. +	 */ +	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) || +	    !bacmp(&hdev->bdaddr, BDADDR_ANY) || +	    (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) && +	     bacmp(&hdev->static_addr, BDADDR_ANY))) { +		*own_addr_type = ADDR_LE_DEV_RANDOM; +		if (bacmp(&hdev->static_addr, &hdev->random_addr)) +			hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, +				    &hdev->static_addr); +		return 0; +	} + +	/* Neither privacy nor static address is being used so use a +	 * public address. +	 */ +	*own_addr_type = ADDR_LE_DEV_PUBLIC; + +	return 0; +} +  /* Ensure to call hci_req_add_le_scan_disable() first to disable the   * controller based address resolution to be able to reconfigure   * resolving list. @@ -810,366 +829,6 @@ void hci_req_add_le_passive_scan(struct hci_request *req)  			   addr_resolv);  } -static void cancel_adv_timeout(struct hci_dev *hdev) -{ -	if (hdev->adv_instance_timeout) { -		hdev->adv_instance_timeout = 0; -		cancel_delayed_work(&hdev->adv_instance_expire); -	} -} - -static bool adv_cur_instance_is_scannable(struct hci_dev *hdev) -{ -	return hci_adv_instance_is_scannable(hdev, hdev->cur_adv_instance); -} - -void __hci_req_disable_advertising(struct hci_request *req) -{ -	if (ext_adv_capable(req->hdev)) { -		__hci_req_disable_ext_adv_instance(req, 0x00); -	} else { -		u8 enable = 0x00; - -		hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable); -	} -} - -static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags) -{ -	/* If privacy is not enabled don't use RPA */ -	if (!hci_dev_test_flag(hdev, HCI_PRIVACY)) -		return false; - -	/* If basic privacy mode is enabled use RPA */ -	if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) -		return true; - -	/* If limited privacy mode is enabled don't use RPA if we're -	 * both discoverable and bondable. -	 */ -	if ((flags & MGMT_ADV_FLAG_DISCOV) && -	    hci_dev_test_flag(hdev, HCI_BONDABLE)) -		return false; - -	/* We're neither bondable nor discoverable in the limited -	 * privacy mode, therefore use RPA. -	 */ -	return true; -} - -static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable) -{ -	/* If there is no connection we are OK to advertise. */ -	if (hci_conn_num(hdev, LE_LINK) == 0) -		return true; - -	/* Check le_states if there is any connection in peripheral role. */ -	if (hdev->conn_hash.le_num_peripheral > 0) { -		/* Peripheral connection state and non connectable mode bit 20. -		 */ -		if (!connectable && !(hdev->le_states[2] & 0x10)) -			return false; - -		/* Peripheral connection state and connectable mode bit 38 -		 * and scannable bit 21. -		 */ -		if (connectable && (!(hdev->le_states[4] & 0x40) || -				    !(hdev->le_states[2] & 0x20))) -			return false; -	} - -	/* Check le_states if there is any connection in central role. */ -	if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_peripheral) { -		/* Central connection state and non connectable mode bit 18. */ -		if (!connectable && !(hdev->le_states[2] & 0x02)) -			return false; - -		/* Central connection state and connectable mode bit 35 and -		 * scannable 19. -		 */ -		if (connectable && (!(hdev->le_states[4] & 0x08) || -				    !(hdev->le_states[2] & 0x08))) -			return false; -	} - -	return true; -} - -void __hci_req_enable_advertising(struct hci_request *req) -{ -	struct hci_dev *hdev = req->hdev; -	struct adv_info *adv; -	struct hci_cp_le_set_adv_param cp; -	u8 own_addr_type, enable = 0x01; -	bool connectable; -	u16 adv_min_interval, adv_max_interval; -	u32 flags; - -	flags = hci_adv_instance_flags(hdev, hdev->cur_adv_instance); -	adv = hci_find_adv_instance(hdev, hdev->cur_adv_instance); - -	/* If the "connectable" instance flag was not set, then choose between -	 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting. -	 */ -	connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) || -		      mgmt_get_connectable(hdev); - -	if (!is_advertising_allowed(hdev, connectable)) -		return; - -	if (hci_dev_test_flag(hdev, HCI_LE_ADV)) -		__hci_req_disable_advertising(req); - -	/* Clear the HCI_LE_ADV bit temporarily so that the -	 * hci_update_random_address knows that it's safe to go ahead -	 * and write a new random address. The flag will be set back on -	 * as soon as the SET_ADV_ENABLE HCI command completes. -	 */ -	hci_dev_clear_flag(hdev, HCI_LE_ADV); - -	/* Set require_privacy to true only when non-connectable -	 * advertising is used. In that case it is fine to use a -	 * non-resolvable private address. -	 */ -	if (hci_update_random_address(req, !connectable, -				      adv_use_rpa(hdev, flags), -				      &own_addr_type) < 0) -		return; - -	memset(&cp, 0, sizeof(cp)); - -	if (adv) { -		adv_min_interval = adv->min_interval; -		adv_max_interval = adv->max_interval; -	} else { -		adv_min_interval = hdev->le_adv_min_interval; -		adv_max_interval = hdev->le_adv_max_interval; -	} - -	if (connectable) { -		cp.type = LE_ADV_IND; -	} else { -		if (adv_cur_instance_is_scannable(hdev)) -			cp.type = LE_ADV_SCAN_IND; -		else -			cp.type = LE_ADV_NONCONN_IND; - -		if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE) || -		    hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) { -			adv_min_interval = DISCOV_LE_FAST_ADV_INT_MIN; -			adv_max_interval = DISCOV_LE_FAST_ADV_INT_MAX; -		} -	} - -	cp.min_interval = cpu_to_le16(adv_min_interval); -	cp.max_interval = cpu_to_le16(adv_max_interval); -	cp.own_address_type = own_addr_type; -	cp.channel_map = hdev->le_adv_channel_map; - -	hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp); - -	hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable); -} - -void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance) -{ -	struct hci_dev *hdev = req->hdev; -	u8 len; - -	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) -		return; - -	if (ext_adv_capable(hdev)) { -		struct { -			struct hci_cp_le_set_ext_scan_rsp_data cp; -			u8 data[HCI_MAX_EXT_AD_LENGTH]; -		} pdu; - -		memset(&pdu, 0, sizeof(pdu)); - -		len = eir_create_scan_rsp(hdev, instance, pdu.data); - -		if (hdev->scan_rsp_data_len == len && -		    !memcmp(pdu.data, hdev->scan_rsp_data, len)) -			return; - -		memcpy(hdev->scan_rsp_data, pdu.data, len); -		hdev->scan_rsp_data_len = len; - -		pdu.cp.handle = instance; -		pdu.cp.length = len; -		pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE; -		pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG; - -		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, -			    sizeof(pdu.cp) + len, &pdu.cp); -	} else { -		struct hci_cp_le_set_scan_rsp_data cp; - -		memset(&cp, 0, sizeof(cp)); - -		len = eir_create_scan_rsp(hdev, instance, cp.data); - -		if (hdev->scan_rsp_data_len == len && -		    !memcmp(cp.data, hdev->scan_rsp_data, len)) -			return; - -		memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data)); -		hdev->scan_rsp_data_len = len; - -		cp.length = len; - -		hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp); -	} -} - -void __hci_req_update_adv_data(struct hci_request *req, u8 instance) -{ -	struct hci_dev *hdev = req->hdev; -	u8 len; - -	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) -		return; - -	if (ext_adv_capable(hdev)) { -		struct { -			struct hci_cp_le_set_ext_adv_data cp; -			u8 data[HCI_MAX_EXT_AD_LENGTH]; -		} pdu; - -		memset(&pdu, 0, sizeof(pdu)); - -		len = eir_create_adv_data(hdev, instance, pdu.data); - -		/* There's nothing to do if the data hasn't changed */ -		if (hdev->adv_data_len == len && -		    memcmp(pdu.data, hdev->adv_data, len) == 0) -			return; - -		memcpy(hdev->adv_data, pdu.data, len); -		hdev->adv_data_len = len; - -		pdu.cp.length = len; -		pdu.cp.handle = instance; -		pdu.cp.operation = LE_SET_ADV_DATA_OP_COMPLETE; -		pdu.cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG; - -		hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA, -			    sizeof(pdu.cp) + len, &pdu.cp); -	} else { -		struct hci_cp_le_set_adv_data cp; - -		memset(&cp, 0, sizeof(cp)); - -		len = eir_create_adv_data(hdev, instance, cp.data); - -		/* There's nothing to do if the data hasn't changed */ -		if (hdev->adv_data_len == len && -		    memcmp(cp.data, hdev->adv_data, len) == 0) -			return; - -		memcpy(hdev->adv_data, cp.data, sizeof(cp.data)); -		hdev->adv_data_len = len; - -		cp.length = len; - -		hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp); -	} -} - -int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance) -{ -	struct hci_request req; - -	hci_req_init(&req, hdev); -	__hci_req_update_adv_data(&req, instance); - -	return hci_req_run(&req, NULL); -} - -static void enable_addr_resolution_complete(struct hci_dev *hdev, u8 status, -					    u16 opcode) -{ -	BT_DBG("%s status %u", hdev->name, status); -} - -void hci_req_disable_address_resolution(struct hci_dev *hdev) -{ -	struct hci_request req; -	__u8 enable = 0x00; - -	if (!hci_dev_test_flag(hdev, HCI_LL_RPA_RESOLUTION)) -		return; - -	hci_req_init(&req, hdev); - -	hci_req_add(&req, HCI_OP_LE_SET_ADDR_RESOLV_ENABLE, 1, &enable); - -	hci_req_run(&req, enable_addr_resolution_complete); -} - -static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode) -{ -	bt_dev_dbg(hdev, "status %u", status); -} - -void hci_req_reenable_advertising(struct hci_dev *hdev) -{ -	struct hci_request req; - -	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) && -	    list_empty(&hdev->adv_instances)) -		return; - -	hci_req_init(&req, hdev); - -	if (hdev->cur_adv_instance) { -		__hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance, -						true); -	} else { -		if (ext_adv_capable(hdev)) { -			__hci_req_start_ext_adv(&req, 0x00); -		} else { -			__hci_req_update_adv_data(&req, 0x00); -			__hci_req_update_scan_rsp_data(&req, 0x00); -			__hci_req_enable_advertising(&req); -		} -	} - -	hci_req_run(&req, adv_enable_complete); -} - -static void adv_timeout_expire(struct work_struct *work) -{ -	struct hci_dev *hdev = container_of(work, struct hci_dev, -					    adv_instance_expire.work); - -	struct hci_request req; -	u8 instance; - -	bt_dev_dbg(hdev, ""); - -	hci_dev_lock(hdev); - -	hdev->adv_instance_timeout = 0; - -	instance = hdev->cur_adv_instance; -	if (instance == 0x00) -		goto unlock; - -	hci_req_init(&req, hdev); - -	hci_req_clear_adv_instance(hdev, NULL, &req, instance, false); - -	if (list_empty(&hdev->adv_instances)) -		__hci_req_disable_advertising(&req); - -	hci_req_run(&req, NULL); - -unlock: -	hci_dev_unlock(hdev); -} -  static int hci_req_add_le_interleaved_scan(struct hci_request *req,  					   unsigned long opt)  { @@ -1226,84 +885,6 @@ static void interleave_scan_work(struct work_struct *work)  				   &hdev->interleave_scan, timeout);  } -int hci_get_random_address(struct hci_dev *hdev, bool require_privacy, -			   bool use_rpa, struct adv_info *adv_instance, -			   u8 *own_addr_type, bdaddr_t *rand_addr) -{ -	int err; - -	bacpy(rand_addr, BDADDR_ANY); - -	/* If privacy is enabled use a resolvable private address. If -	 * current RPA has expired then generate a new one. -	 */ -	if (use_rpa) { -		/* If Controller supports LL Privacy use own address type is -		 * 0x03 -		 */ -		if (use_ll_privacy(hdev)) -			*own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED; -		else -			*own_addr_type = ADDR_LE_DEV_RANDOM; - -		if (adv_instance) { -			if (adv_rpa_valid(adv_instance)) -				return 0; -		} else { -			if (rpa_valid(hdev)) -				return 0; -		} - -		err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa); -		if (err < 0) { -			bt_dev_err(hdev, "failed to generate new RPA"); -			return err; -		} - -		bacpy(rand_addr, &hdev->rpa); - -		return 0; -	} - -	/* In case of required privacy without resolvable private address, -	 * use an non-resolvable private address. This is useful for -	 * non-connectable advertising. -	 */ -	if (require_privacy) { -		bdaddr_t nrpa; - -		while (true) { -			/* The non-resolvable private address is generated -			 * from random six bytes with the two most significant -			 * bits cleared. -			 */ -			get_random_bytes(&nrpa, 6); -			nrpa.b[5] &= 0x3f; - -			/* The non-resolvable private address shall not be -			 * equal to the public address. -			 */ -			if (bacmp(&hdev->bdaddr, &nrpa)) -				break; -		} - -		*own_addr_type = ADDR_LE_DEV_RANDOM; -		bacpy(rand_addr, &nrpa); - -		return 0; -	} - -	/* No privacy so use a public address. */ -	*own_addr_type = ADDR_LE_DEV_PUBLIC; - -	return 0; -} - -void __hci_req_clear_ext_adv_sets(struct hci_request *req) -{ -	hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL); -} -  static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)  {  	struct hci_dev *hdev = req->hdev; @@ -1328,933 +909,8 @@ static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)  	hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);  } -int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance) -{ -	struct hci_cp_le_set_ext_adv_params cp; -	struct hci_dev *hdev = req->hdev; -	bool connectable; -	u32 flags; -	bdaddr_t random_addr; -	u8 own_addr_type; -	int err; -	struct adv_info *adv; -	bool secondary_adv, require_privacy; - -	if (instance > 0) { -		adv = hci_find_adv_instance(hdev, instance); -		if (!adv) -			return -EINVAL; -	} else { -		adv = NULL; -	} - -	flags = hci_adv_instance_flags(hdev, instance); - -	/* If the "connectable" instance flag was not set, then choose between -	 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting. -	 */ -	connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) || -		      mgmt_get_connectable(hdev); - -	if (!is_advertising_allowed(hdev, connectable)) -		return -EPERM; - -	/* Set require_privacy to true only when non-connectable -	 * advertising is used. In that case it is fine to use a -	 * non-resolvable private address. -	 */ -	require_privacy = !connectable; - -	/* Don't require privacy for periodic adv? */ -	if (adv && adv->periodic) -		require_privacy = false; - -	err = hci_get_random_address(hdev, require_privacy, -				     adv_use_rpa(hdev, flags), adv, -				     &own_addr_type, &random_addr); -	if (err < 0) -		return err; - -	memset(&cp, 0, sizeof(cp)); - -	if (adv) { -		hci_cpu_to_le24(adv->min_interval, cp.min_interval); -		hci_cpu_to_le24(adv->max_interval, cp.max_interval); -		cp.tx_power = adv->tx_power; -	} else { -		hci_cpu_to_le24(hdev->le_adv_min_interval, cp.min_interval); -		hci_cpu_to_le24(hdev->le_adv_max_interval, cp.max_interval); -		cp.tx_power = HCI_ADV_TX_POWER_NO_PREFERENCE; -	} - -	secondary_adv = (flags & MGMT_ADV_FLAG_SEC_MASK); - -	if (connectable) { -		if (secondary_adv) -			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_CONN_IND); -		else -			cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND); -	} else if (hci_adv_instance_is_scannable(hdev, instance) || -		   (flags & MGMT_ADV_PARAM_SCAN_RSP)) { -		if (secondary_adv) -			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_SCAN_IND); -		else -			cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND); -	} else { -		/* Secondary and periodic cannot use legacy PDUs */ -		if (secondary_adv || (adv && adv->periodic)) -			cp.evt_properties = cpu_to_le16(LE_EXT_ADV_NON_CONN_IND); -		else -			cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND); -	} - -	cp.own_addr_type = own_addr_type; -	cp.channel_map = hdev->le_adv_channel_map; -	cp.handle = instance; - -	if (flags & MGMT_ADV_FLAG_SEC_2M) { -		cp.primary_phy = HCI_ADV_PHY_1M; -		cp.secondary_phy = HCI_ADV_PHY_2M; -	} else if (flags & MGMT_ADV_FLAG_SEC_CODED) { -		cp.primary_phy = HCI_ADV_PHY_CODED; -		cp.secondary_phy = HCI_ADV_PHY_CODED; -	} else { -		/* In all other cases use 1M */ -		cp.primary_phy = HCI_ADV_PHY_1M; -		cp.secondary_phy = HCI_ADV_PHY_1M; -	} - -	hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp); - -	if ((own_addr_type == ADDR_LE_DEV_RANDOM || -	     own_addr_type == ADDR_LE_DEV_RANDOM_RESOLVED) && -	    bacmp(&random_addr, BDADDR_ANY)) { -		struct hci_cp_le_set_adv_set_rand_addr cp; - -		/* Check if random address need to be updated */ -		if (adv) { -			if (!bacmp(&random_addr, &adv->random_addr)) -				return 0; -		} else { -			if (!bacmp(&random_addr, &hdev->random_addr)) -				return 0; -			/* Instance 0x00 doesn't have an adv_info, instead it -			 * uses hdev->random_addr to track its address so -			 * whenever it needs to be updated this also set the -			 * random address since hdev->random_addr is shared with -			 * scan state machine. -			 */ -			set_random_addr(req, &random_addr); -		} - -		memset(&cp, 0, sizeof(cp)); - -		cp.handle = instance; -		bacpy(&cp.bdaddr, &random_addr); - -		hci_req_add(req, -			    HCI_OP_LE_SET_ADV_SET_RAND_ADDR, -			    sizeof(cp), &cp); -	} - -	return 0; -} - -int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance) -{ -	struct hci_dev *hdev = req->hdev; -	struct hci_cp_le_set_ext_adv_enable *cp; -	struct hci_cp_ext_adv_set *adv_set; -	u8 data[sizeof(*cp) + sizeof(*adv_set) * 1]; -	struct adv_info *adv_instance; - -	if (instance > 0) { -		adv_instance = hci_find_adv_instance(hdev, instance); -		if (!adv_instance) -			return -EINVAL; -	} else { -		adv_instance = NULL; -	} - -	cp = (void *) data; -	adv_set = (void *) cp->data; - -	memset(cp, 0, sizeof(*cp)); - -	cp->enable = 0x01; -	cp->num_of_sets = 0x01; - -	memset(adv_set, 0, sizeof(*adv_set)); - -	adv_set->handle = instance; - -	/* Set duration per instance since controller is responsible for -	 * scheduling it. -	 */ -	if (adv_instance && adv_instance->duration) { -		u16 duration = adv_instance->timeout * MSEC_PER_SEC; - -		/* Time = N * 10 ms */ -		adv_set->duration = cpu_to_le16(duration / 10); -	} - -	hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, -		    sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets, -		    data); - -	return 0; -} - -int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance) -{ -	struct hci_dev *hdev = req->hdev; -	struct hci_cp_le_set_ext_adv_enable *cp; -	struct hci_cp_ext_adv_set *adv_set; -	u8 data[sizeof(*cp) + sizeof(*adv_set) * 1]; -	u8 req_size; - -	/* If request specifies an instance that doesn't exist, fail */ -	if (instance > 0 && !hci_find_adv_instance(hdev, instance)) -		return -EINVAL; - -	memset(data, 0, sizeof(data)); - -	cp = (void *)data; -	adv_set = (void *)cp->data; - -	/* Instance 0x00 indicates all advertising instances will be disabled */ -	cp->num_of_sets = !!instance; -	cp->enable = 0x00; - -	adv_set->handle = instance; - -	req_size = sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets; -	hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, req_size, data); - -	return 0; -} - -int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance) -{ -	struct hci_dev *hdev = req->hdev; - -	/* If request specifies an instance that doesn't exist, fail */ -	if (instance > 0 && !hci_find_adv_instance(hdev, instance)) -		return -EINVAL; - -	hci_req_add(req, HCI_OP_LE_REMOVE_ADV_SET, sizeof(instance), &instance); - -	return 0; -} - -int __hci_req_start_ext_adv(struct hci_request *req, u8 instance) -{ -	struct hci_dev *hdev = req->hdev; -	struct adv_info *adv_instance = hci_find_adv_instance(hdev, instance); -	int err; - -	/* If instance isn't pending, the chip knows about it, and it's safe to -	 * disable -	 */ -	if (adv_instance && !adv_instance->pending) -		__hci_req_disable_ext_adv_instance(req, instance); - -	err = __hci_req_setup_ext_adv_instance(req, instance); -	if (err < 0) -		return err; - -	__hci_req_update_scan_rsp_data(req, instance); -	__hci_req_enable_ext_advertising(req, instance); - -	return 0; -} - -int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance, -				    bool force) -{ -	struct hci_dev *hdev = req->hdev; -	struct adv_info *adv_instance = NULL; -	u16 timeout; - -	if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || -	    list_empty(&hdev->adv_instances)) -		return -EPERM; - -	if (hdev->adv_instance_timeout) -		return -EBUSY; - -	adv_instance = hci_find_adv_instance(hdev, instance); -	if (!adv_instance) -		return -ENOENT; - -	/* A zero timeout means unlimited advertising. As long as there is -	 * only one instance, duration should be ignored. We still set a timeout -	 * in case further instances are being added later on. -	 * -	 * If the remaining lifetime of the instance is more than the duration -	 * then the timeout corresponds to the duration, otherwise it will be -	 * reduced to the remaining instance lifetime. -	 */ -	if (adv_instance->timeout == 0 || -	    adv_instance->duration <= adv_instance->remaining_time) -		timeout = adv_instance->duration; -	else -		timeout = adv_instance->remaining_time; - -	/* The remaining time is being reduced unless the instance is being -	 * advertised without time limit. -	 */ -	if (adv_instance->timeout) -		adv_instance->remaining_time = -				adv_instance->remaining_time - timeout; - -	/* Only use work for scheduling instances with legacy advertising */ -	if (!ext_adv_capable(hdev)) { -		hdev->adv_instance_timeout = timeout; -		queue_delayed_work(hdev->req_workqueue, -			   &hdev->adv_instance_expire, -			   msecs_to_jiffies(timeout * 1000)); -	} - -	/* If we're just re-scheduling the same instance again then do not -	 * execute any HCI commands. This happens when a single instance is -	 * being advertised. -	 */ -	if (!force && hdev->cur_adv_instance == instance && -	    hci_dev_test_flag(hdev, HCI_LE_ADV)) -		return 0; - -	hdev->cur_adv_instance = instance; -	if (ext_adv_capable(hdev)) { -		__hci_req_start_ext_adv(req, instance); -	} else { -		__hci_req_update_adv_data(req, instance); -		__hci_req_update_scan_rsp_data(req, instance); -		__hci_req_enable_advertising(req); -	} - -	return 0; -} - -/* For a single instance: - * - force == true: The instance will be removed even when its remaining - *   lifetime is not zero. - * - force == false: the instance will be deactivated but kept stored unless - *   the remaining lifetime is zero. - * - * For instance == 0x00: - * - force == true: All instances will be removed regardless of their timeout - *   setting. - * - force == false: Only instances that have a timeout will be removed. - */ -void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk, -				struct hci_request *req, u8 instance, -				bool force) -{ -	struct adv_info *adv_instance, *n, *next_instance = NULL; -	int err; -	u8 rem_inst; - -	/* Cancel any timeout concerning the removed instance(s). */ -	if (!instance || hdev->cur_adv_instance == instance) -		cancel_adv_timeout(hdev); - -	/* Get the next instance to advertise BEFORE we remove -	 * the current one. This can be the same instance again -	 * if there is only one instance. -	 */ -	if (instance && hdev->cur_adv_instance == instance) -		next_instance = hci_get_next_instance(hdev, instance); - -	if (instance == 0x00) { -		list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, -					 list) { -			if (!(force || adv_instance->timeout)) -				continue; - -			rem_inst = adv_instance->instance; -			err = hci_remove_adv_instance(hdev, rem_inst); -			if (!err) -				mgmt_advertising_removed(sk, hdev, rem_inst); -		} -	} else { -		adv_instance = hci_find_adv_instance(hdev, instance); - -		if (force || (adv_instance && adv_instance->timeout && -			      !adv_instance->remaining_time)) { -			/* Don't advertise a removed instance. */ -			if (next_instance && -			    next_instance->instance == instance) -				next_instance = NULL; - -			err = hci_remove_adv_instance(hdev, instance); -			if (!err) -				mgmt_advertising_removed(sk, hdev, instance); -		} -	} - -	if (!req || !hdev_is_powered(hdev) || -	    hci_dev_test_flag(hdev, HCI_ADVERTISING)) -		return; - -	if (next_instance && !ext_adv_capable(hdev)) -		__hci_req_schedule_adv_instance(req, next_instance->instance, -						false); -} - -int hci_update_random_address(struct hci_request *req, bool require_privacy, -			      bool use_rpa, u8 *own_addr_type) -{ -	struct hci_dev *hdev = req->hdev; -	int err; - -	/* If privacy is enabled use a resolvable private address. If -	 * current RPA has expired or there is something else than -	 * the current RPA in use, then generate a new one. -	 */ -	if (use_rpa) { -		/* If Controller supports LL Privacy use own address type is -		 * 0x03 -		 */ -		if (use_ll_privacy(hdev)) -			*own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED; -		else -			*own_addr_type = ADDR_LE_DEV_RANDOM; - -		if (rpa_valid(hdev)) -			return 0; - -		err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa); -		if (err < 0) { -			bt_dev_err(hdev, "failed to generate new RPA"); -			return err; -		} - -		set_random_addr(req, &hdev->rpa); - -		return 0; -	} - -	/* In case of required privacy without resolvable private address, -	 * use an non-resolvable private address. This is useful for active -	 * scanning and non-connectable advertising. -	 */ -	if (require_privacy) { -		bdaddr_t nrpa; - -		while (true) { -			/* The non-resolvable private address is generated -			 * from random six bytes with the two most significant -			 * bits cleared. -			 */ -			get_random_bytes(&nrpa, 6); -			nrpa.b[5] &= 0x3f; - -			/* The non-resolvable private address shall not be -			 * equal to the public address. -			 */ -			if (bacmp(&hdev->bdaddr, &nrpa)) -				break; -		} - -		*own_addr_type = ADDR_LE_DEV_RANDOM; -		set_random_addr(req, &nrpa); -		return 0; -	} - -	/* If forcing static address is in use or there is no public -	 * address use the static address as random address (but skip -	 * the HCI command if the current random address is already the -	 * static one. -	 * -	 * In case BR/EDR has been disabled on a dual-mode controller -	 * and a static address has been configured, then use that -	 * address instead of the public BR/EDR address. -	 */ -	if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) || -	    !bacmp(&hdev->bdaddr, BDADDR_ANY) || -	    (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) && -	     bacmp(&hdev->static_addr, BDADDR_ANY))) { -		*own_addr_type = ADDR_LE_DEV_RANDOM; -		if (bacmp(&hdev->static_addr, &hdev->random_addr)) -			hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, -				    &hdev->static_addr); -		return 0; -	} - -	/* Neither privacy nor static address is being used so use a -	 * public address. -	 */ -	*own_addr_type = ADDR_LE_DEV_PUBLIC; - -	return 0; -} - -static bool disconnected_accept_list_entries(struct hci_dev *hdev) -{ -	struct bdaddr_list *b; - -	list_for_each_entry(b, &hdev->accept_list, list) { -		struct hci_conn *conn; - -		conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr); -		if (!conn) -			return true; - -		if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG) -			return true; -	} - -	return false; -} - -void __hci_req_update_scan(struct hci_request *req) -{ -	struct hci_dev *hdev = req->hdev; -	u8 scan; - -	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) -		return; - -	if (!hdev_is_powered(hdev)) -		return; - -	if (mgmt_powering_down(hdev)) -		return; - -	if (hdev->scanning_paused) -		return; - -	if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) || -	    disconnected_accept_list_entries(hdev)) -		scan = SCAN_PAGE; -	else -		scan = SCAN_DISABLED; - -	if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) -		scan |= SCAN_INQUIRY; - -	if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) && -	    test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY)) -		return; - -	hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); -} - -static u8 get_service_classes(struct hci_dev *hdev) -{ -	struct bt_uuid *uuid; -	u8 val = 0; - -	list_for_each_entry(uuid, &hdev->uuids, list) -		val |= uuid->svc_hint; - -	return val; -} - -void __hci_req_update_class(struct hci_request *req) -{ -	struct hci_dev *hdev = req->hdev; -	u8 cod[3]; - -	bt_dev_dbg(hdev, ""); - -	if (!hdev_is_powered(hdev)) -		return; - -	if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) -		return; - -	if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE)) -		return; - -	cod[0] = hdev->minor_class; -	cod[1] = hdev->major_class; -	cod[2] = get_service_classes(hdev); - -	if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) -		cod[1] |= 0x20; - -	if (memcmp(cod, hdev->dev_class, 3) == 0) -		return; - -	hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod); -} - -void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn, -		      u8 reason) -{ -	switch (conn->state) { -	case BT_CONNECTED: -	case BT_CONFIG: -		if (conn->type == AMP_LINK) { -			struct hci_cp_disconn_phy_link cp; - -			cp.phy_handle = HCI_PHY_HANDLE(conn->handle); -			cp.reason = reason; -			hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp), -				    &cp); -		} else { -			struct hci_cp_disconnect dc; - -			dc.handle = cpu_to_le16(conn->handle); -			dc.reason = reason; -			hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc); -		} - -		conn->state = BT_DISCONN; - -		break; -	case BT_CONNECT: -		if (conn->type == LE_LINK) { -			if (test_bit(HCI_CONN_SCANNING, &conn->flags)) -				break; -			hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL, -				    0, NULL); -		} else if (conn->type == ACL_LINK) { -			if (req->hdev->hci_ver < BLUETOOTH_VER_1_2) -				break; -			hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL, -				    6, &conn->dst); -		} -		break; -	case BT_CONNECT2: -		if (conn->type == ACL_LINK) { -			struct hci_cp_reject_conn_req rej; - -			bacpy(&rej.bdaddr, &conn->dst); -			rej.reason = reason; - -			hci_req_add(req, HCI_OP_REJECT_CONN_REQ, -				    sizeof(rej), &rej); -		} else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) { -			struct hci_cp_reject_sync_conn_req rej; - -			bacpy(&rej.bdaddr, &conn->dst); - -			/* SCO rejection has its own limited set of -			 * allowed error values (0x0D-0x0F) which isn't -			 * compatible with most values passed to this -			 * function. To be safe hard-code one of the -			 * values that's suitable for SCO. -			 */ -			rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES; - -			hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ, -				    sizeof(rej), &rej); -		} -		break; -	default: -		conn->state = BT_CLOSED; -		break; -	} -} - -static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode) -{ -	if (status) -		bt_dev_dbg(hdev, "Failed to abort connection: status 0x%2.2x", status); -} - -int hci_abort_conn(struct hci_conn *conn, u8 reason) -{ -	struct hci_request req; -	int err; - -	hci_req_init(&req, conn->hdev); - -	__hci_abort_conn(&req, conn, reason); - -	err = hci_req_run(&req, abort_conn_complete); -	if (err && err != -ENODATA) { -		bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err); -		return err; -	} - -	return 0; -} - -static int le_scan_disable(struct hci_request *req, unsigned long opt) -{ -	hci_req_add_le_scan_disable(req, false); -	return 0; -} - -static int bredr_inquiry(struct hci_request *req, unsigned long opt) -{ -	u8 length = opt; -	const u8 giac[3] = { 0x33, 0x8b, 0x9e }; -	const u8 liac[3] = { 0x00, 0x8b, 0x9e }; -	struct hci_cp_inquiry cp; - -	if (test_bit(HCI_INQUIRY, &req->hdev->flags)) -		return 0; - -	bt_dev_dbg(req->hdev, ""); - -	hci_dev_lock(req->hdev); -	hci_inquiry_cache_flush(req->hdev); -	hci_dev_unlock(req->hdev); - -	memset(&cp, 0, sizeof(cp)); - -	if (req->hdev->discovery.limited) -		memcpy(&cp.lap, liac, sizeof(cp.lap)); -	else -		memcpy(&cp.lap, giac, sizeof(cp.lap)); - -	cp.length = length; - -	hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp); - -	return 0; -} - -static void le_scan_disable_work(struct work_struct *work) -{ -	struct hci_dev *hdev = container_of(work, struct hci_dev, -					    le_scan_disable.work); -	u8 status; - -	bt_dev_dbg(hdev, ""); - -	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) -		return; - -	cancel_delayed_work(&hdev->le_scan_restart); - -	hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status); -	if (status) { -		bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x", -			   status); -		return; -	} - -	hdev->discovery.scan_start = 0; - -	/* If we were running LE only scan, change discovery state. If -	 * we were running both LE and BR/EDR inquiry simultaneously, -	 * and BR/EDR inquiry is already finished, stop discovery, -	 * otherwise BR/EDR inquiry will stop discovery when finished. -	 * If we will resolve remote device name, do not change -	 * discovery state. -	 */ - -	if (hdev->discovery.type == DISCOV_TYPE_LE) -		goto discov_stopped; - -	if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED) -		return; - -	if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) { -		if (!test_bit(HCI_INQUIRY, &hdev->flags) && -		    hdev->discovery.state != DISCOVERY_RESOLVING) -			goto discov_stopped; - -		return; -	} - -	hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN, -		     HCI_CMD_TIMEOUT, &status); -	if (status) { -		bt_dev_err(hdev, "inquiry failed: status 0x%02x", status); -		goto discov_stopped; -	} - -	return; - -discov_stopped: -	hci_dev_lock(hdev); -	hci_discovery_set_state(hdev, DISCOVERY_STOPPED); -	hci_dev_unlock(hdev); -} - -static int le_scan_restart(struct hci_request *req, unsigned long opt) -{ -	struct hci_dev *hdev = req->hdev; - -	/* If controller is not scanning we are done. */ -	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) -		return 0; - -	if (hdev->scanning_paused) { -		bt_dev_dbg(hdev, "Scanning is paused for suspend"); -		return 0; -	} - -	hci_req_add_le_scan_disable(req, false); - -	if (use_ext_scan(hdev)) { -		struct hci_cp_le_set_ext_scan_enable ext_enable_cp; - -		memset(&ext_enable_cp, 0, sizeof(ext_enable_cp)); -		ext_enable_cp.enable = LE_SCAN_ENABLE; -		ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE; - -		hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, -			    sizeof(ext_enable_cp), &ext_enable_cp); -	} else { -		struct hci_cp_le_set_scan_enable cp; - -		memset(&cp, 0, sizeof(cp)); -		cp.enable = LE_SCAN_ENABLE; -		cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE; -		hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); -	} - -	return 0; -} - -static void le_scan_restart_work(struct work_struct *work) -{ -	struct hci_dev *hdev = container_of(work, struct hci_dev, -					    le_scan_restart.work); -	unsigned long timeout, duration, scan_start, now; -	u8 status; - -	bt_dev_dbg(hdev, ""); - -	hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status); -	if (status) { -		bt_dev_err(hdev, "failed to restart LE scan: status %d", -			   status); -		return; -	} - -	hci_dev_lock(hdev); - -	if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) || -	    !hdev->discovery.scan_start) -		goto unlock; - -	/* When the scan was started, hdev->le_scan_disable has been queued -	 * after duration from scan_start. During scan restart this job -	 * has been canceled, and we need to queue it again after proper -	 * timeout, to make sure that scan does not run indefinitely. -	 */ -	duration = hdev->discovery.scan_duration; -	scan_start = hdev->discovery.scan_start; -	now = jiffies; -	if (now - scan_start <= duration) { -		int elapsed; - -		if (now >= scan_start) -			elapsed = now - scan_start; -		else -			elapsed = ULONG_MAX - scan_start + now; - -		timeout = duration - elapsed; -	} else { -		timeout = 0; -	} - -	queue_delayed_work(hdev->req_workqueue, -			   &hdev->le_scan_disable, timeout); - -unlock: -	hci_dev_unlock(hdev); -} - -bool hci_req_stop_discovery(struct hci_request *req) -{ -	struct hci_dev *hdev = req->hdev; -	struct discovery_state *d = &hdev->discovery; -	struct hci_cp_remote_name_req_cancel cp; -	struct inquiry_entry *e; -	bool ret = false; - -	bt_dev_dbg(hdev, "state %u", hdev->discovery.state); - -	if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) { -		if (test_bit(HCI_INQUIRY, &hdev->flags)) -			hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL); - -		if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) { -			cancel_delayed_work(&hdev->le_scan_disable); -			cancel_delayed_work(&hdev->le_scan_restart); -			hci_req_add_le_scan_disable(req, false); -		} - -		ret = true; -	} else { -		/* Passive scanning */ -		if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) { -			hci_req_add_le_scan_disable(req, false); -			ret = true; -		} -	} - -	/* No further actions needed for LE-only discovery */ -	if (d->type == DISCOV_TYPE_LE) -		return ret; - -	if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) { -		e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, -						     NAME_PENDING); -		if (!e) -			return ret; - -		bacpy(&cp.bdaddr, &e->data.bdaddr); -		hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp), -			    &cp); -		ret = true; -	} - -	return ret; -} - -static void config_data_path_complete(struct hci_dev *hdev, u8 status, -				      u16 opcode) -{ -	bt_dev_dbg(hdev, "status %u", status); -} - -int hci_req_configure_datapath(struct hci_dev *hdev, struct bt_codec *codec) -{ -	struct hci_request req; -	int err; -	__u8 vnd_len, *vnd_data = NULL; -	struct hci_op_configure_data_path *cmd = NULL; - -	hci_req_init(&req, hdev); - -	err = hdev->get_codec_config_data(hdev, ESCO_LINK, codec, &vnd_len, -					  &vnd_data); -	if (err < 0) -		goto error; - -	cmd = kzalloc(sizeof(*cmd) + vnd_len, GFP_KERNEL); -	if (!cmd) { -		err = -ENOMEM; -		goto error; -	} - -	err = hdev->get_data_path_id(hdev, &cmd->data_path_id); -	if (err < 0) -		goto error; - -	cmd->vnd_len = vnd_len; -	memcpy(cmd->vnd_data, vnd_data, vnd_len); - -	cmd->direction = 0x00; -	hci_req_add(&req, HCI_CONFIGURE_DATA_PATH, sizeof(*cmd) + vnd_len, cmd); - -	cmd->direction = 0x01; -	hci_req_add(&req, HCI_CONFIGURE_DATA_PATH, sizeof(*cmd) + vnd_len, cmd); - -	err = hci_req_run(&req, config_data_path_complete); -error: - -	kfree(cmd); -	kfree(vnd_data); -	return err; -} -  void hci_request_setup(struct hci_dev *hdev)  { -	INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work); -	INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work); -	INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);  	INIT_DELAYED_WORK(&hdev->interleave_scan, interleave_scan_work);  } @@ -2262,13 +918,5 @@ void hci_request_cancel_all(struct hci_dev *hdev)  {  	__hci_cmd_sync_cancel(hdev, ENODEV); -	cancel_delayed_work_sync(&hdev->le_scan_disable); -	cancel_delayed_work_sync(&hdev->le_scan_restart); - -	if (hdev->adv_instance_timeout) { -		cancel_delayed_work_sync(&hdev->adv_instance_expire); -		hdev->adv_instance_timeout = 0; -	} -  	cancel_interleave_scan(hdev);  } diff --git a/net/bluetooth/hci_request.h b/net/bluetooth/hci_request.h index 39d001fa3acf..b9c5a9823837 100644 --- a/net/bluetooth/hci_request.h +++ b/net/bluetooth/hci_request.h @@ -68,63 +68,10 @@ int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,  struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,  				const void *param); -void __hci_req_write_fast_connectable(struct hci_request *req, bool enable); -void __hci_req_update_name(struct hci_request *req); -void __hci_req_update_eir(struct hci_request *req); -  void hci_req_add_le_scan_disable(struct hci_request *req, bool rpa_le_conn);  void hci_req_add_le_passive_scan(struct hci_request *req);  void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next); -void hci_req_disable_address_resolution(struct hci_dev *hdev); -void hci_req_reenable_advertising(struct hci_dev *hdev); -void __hci_req_enable_advertising(struct hci_request *req); -void __hci_req_disable_advertising(struct hci_request *req); -void __hci_req_update_adv_data(struct hci_request *req, u8 instance); -int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance); -int hci_req_start_per_adv(struct hci_dev *hdev, u8 instance, u32 flags, -			  u16 min_interval, u16 max_interval, -			  u16 sync_interval); -void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance); - -int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance, -				    bool force); -void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk, -				struct hci_request *req, u8 instance, -				bool force); - -int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance); -int __hci_req_setup_per_adv_instance(struct hci_request *req, u8 instance, -				     u16 min_interval, u16 max_interval); -int __hci_req_start_ext_adv(struct hci_request *req, u8 instance); -int __hci_req_start_per_adv(struct hci_request *req, u8 instance, u32 flags, -			    u16 min_interval, u16 max_interval, -			    u16 sync_interval); -int __hci_req_enable_ext_advertising(struct hci_request *req, u8 instance); -int __hci_req_enable_per_advertising(struct hci_request *req, u8 instance); -int __hci_req_disable_ext_adv_instance(struct hci_request *req, u8 instance); -int __hci_req_remove_ext_adv_instance(struct hci_request *req, u8 instance); -void __hci_req_clear_ext_adv_sets(struct hci_request *req); -int hci_get_random_address(struct hci_dev *hdev, bool require_privacy, -			   bool use_rpa, struct adv_info *adv_instance, -			   u8 *own_addr_type, bdaddr_t *rand_addr); - -void __hci_req_update_class(struct hci_request *req); - -/* Returns true if HCI commands were queued */ -bool hci_req_stop_discovery(struct hci_request *req); - -int hci_req_configure_datapath(struct hci_dev *hdev, struct bt_codec *codec); - -void __hci_req_update_scan(struct hci_request *req); - -int hci_update_random_address(struct hci_request *req, bool require_privacy, -			      bool use_rpa, u8 *own_addr_type); - -int hci_abort_conn(struct hci_conn *conn, u8 reason); -void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn, -		      u8 reason); -  void hci_request_setup(struct hci_dev *hdev);  void hci_request_cancel_all(struct hci_dev *hdev); diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index 0d015d4a8e41..06581223238c 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c @@ -887,7 +887,6 @@ static int hci_sock_release(struct socket *sock)  			 */  			hci_dev_do_close(hdev);  			hci_dev_clear_flag(hdev, HCI_USER_CHANNEL); -			hci_register_suspend_notifier(hdev);  			mgmt_index_added(hdev);  		} @@ -1216,7 +1215,6 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,  		}  		mgmt_index_removed(hdev); -		hci_unregister_suspend_notifier(hdev);  		err = hci_dev_open(hdev->id);  		if (err) { @@ -1231,7 +1229,6 @@ static int hci_sock_bind(struct socket *sock, struct sockaddr *addr,  				err = 0;  			} else {  				hci_dev_clear_flag(hdev, HCI_USER_CHANNEL); -				hci_register_suspend_notifier(hdev);  				mgmt_index_added(hdev);  				hci_dev_put(hdev);  				goto done; @@ -2065,6 +2062,7 @@ static int hci_sock_getsockopt(struct socket *sock, int level, int optname,  static void hci_sock_destruct(struct sock *sk)  { +	mgmt_cleanup(sk);  	skb_queue_purge(&sk->sk_receive_queue);  	skb_queue_purge(&sk->sk_write_queue);  } diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c index fbd5613eebfc..76c3107c9f91 100644 --- a/net/bluetooth/hci_sync.c +++ b/net/bluetooth/hci_sync.c @@ -246,7 +246,7 @@ int __hci_cmd_sync_status_sk(struct hci_dev *hdev, u16 opcode, u32 plen,  	skb = __hci_cmd_sync_sk(hdev, opcode, plen, param, event, timeout, sk);  	if (IS_ERR(skb)) {  		bt_dev_err(hdev, "Opcode 0x%4x failed: %ld", opcode, -			   PTR_ERR(skb)); +				PTR_ERR(skb));  		return PTR_ERR(skb);  	} @@ -321,6 +321,307 @@ static void hci_cmd_sync_cancel_work(struct work_struct *work)  	wake_up_interruptible(&hdev->req_wait_q);  } +static int hci_scan_disable_sync(struct hci_dev *hdev); +static int scan_disable_sync(struct hci_dev *hdev, void *data) +{ +	return hci_scan_disable_sync(hdev); +} + +static int hci_inquiry_sync(struct hci_dev *hdev, u8 length); +static int interleaved_inquiry_sync(struct hci_dev *hdev, void *data) +{ +	return hci_inquiry_sync(hdev, DISCOV_INTERLEAVED_INQUIRY_LEN); +} + +static void le_scan_disable(struct work_struct *work) +{ +	struct hci_dev *hdev = container_of(work, struct hci_dev, +					    le_scan_disable.work); +	int status; + +	bt_dev_dbg(hdev, ""); +	hci_dev_lock(hdev); + +	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) +		goto _return; + +	cancel_delayed_work(&hdev->le_scan_restart); + +	status = hci_cmd_sync_queue(hdev, scan_disable_sync, NULL, NULL); +	if (status) { +		bt_dev_err(hdev, "failed to disable LE scan: %d", status); +		goto _return; +	} + +	hdev->discovery.scan_start = 0; + +	/* If we were running LE only scan, change discovery state. If +	 * we were running both LE and BR/EDR inquiry simultaneously, +	 * and BR/EDR inquiry is already finished, stop discovery, +	 * otherwise BR/EDR inquiry will stop discovery when finished. +	 * If we will resolve remote device name, do not change +	 * discovery state. +	 */ + +	if (hdev->discovery.type == DISCOV_TYPE_LE) +		goto discov_stopped; + +	if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED) +		goto _return; + +	if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) { +		if (!test_bit(HCI_INQUIRY, &hdev->flags) && +		    hdev->discovery.state != DISCOVERY_RESOLVING) +			goto discov_stopped; + +		goto _return; +	} + +	status = hci_cmd_sync_queue(hdev, interleaved_inquiry_sync, NULL, NULL); +	if (status) { +		bt_dev_err(hdev, "inquiry failed: status %d", status); +		goto discov_stopped; +	} + +	goto _return; + +discov_stopped: +	hci_discovery_set_state(hdev, DISCOVERY_STOPPED); + +_return: +	hci_dev_unlock(hdev); +} + +static int hci_le_set_scan_enable_sync(struct hci_dev *hdev, u8 val, +				       u8 filter_dup); +static int hci_le_scan_restart_sync(struct hci_dev *hdev) +{ +	/* If controller is not scanning we are done. */ +	if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) +		return 0; + +	if (hdev->scanning_paused) { +		bt_dev_dbg(hdev, "Scanning is paused for suspend"); +		return 0; +	} + +	hci_le_set_scan_enable_sync(hdev, LE_SCAN_DISABLE, 0x00); +	return hci_le_set_scan_enable_sync(hdev, LE_SCAN_ENABLE, +					   LE_SCAN_FILTER_DUP_ENABLE); +} + +static int le_scan_restart_sync(struct hci_dev *hdev, void *data) +{ +	return hci_le_scan_restart_sync(hdev); +} + +static void le_scan_restart(struct work_struct *work) +{ +	struct hci_dev *hdev = container_of(work, struct hci_dev, +					    le_scan_restart.work); +	unsigned long timeout, duration, scan_start, now; +	int status; + +	bt_dev_dbg(hdev, ""); + +	hci_dev_lock(hdev); + +	status = hci_cmd_sync_queue(hdev, le_scan_restart_sync, NULL, NULL); +	if (status) { +		bt_dev_err(hdev, "failed to restart LE scan: status %d", +			   status); +		goto unlock; +	} + +	if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) || +	    !hdev->discovery.scan_start) +		goto unlock; + +	/* When the scan was started, hdev->le_scan_disable has been queued +	 * after duration from scan_start. During scan restart this job +	 * has been canceled, and we need to queue it again after proper +	 * timeout, to make sure that scan does not run indefinitely. +	 */ +	duration = hdev->discovery.scan_duration; +	scan_start = hdev->discovery.scan_start; +	now = jiffies; +	if (now - scan_start <= duration) { +		int elapsed; + +		if (now >= scan_start) +			elapsed = now - scan_start; +		else +			elapsed = ULONG_MAX - scan_start + now; + +		timeout = duration - elapsed; +	} else { +		timeout = 0; +	} + +	queue_delayed_work(hdev->req_workqueue, +			   &hdev->le_scan_disable, timeout); + +unlock: +	hci_dev_unlock(hdev); +} + +static int reenable_adv_sync(struct hci_dev *hdev, void *data) +{ +	bt_dev_dbg(hdev, ""); + +	if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) && +	    list_empty(&hdev->adv_instances)) +		return 0; + +	if (hdev->cur_adv_instance) { +		return hci_schedule_adv_instance_sync(hdev, +						      hdev->cur_adv_instance, +						      true); +	} else { +		if (ext_adv_capable(hdev)) { +			hci_start_ext_adv_sync(hdev, 0x00); +		} else { +			hci_update_adv_data_sync(hdev, 0x00); +			hci_update_scan_rsp_data_sync(hdev, 0x00); +			hci_enable_advertising_sync(hdev); +		} +	} + +	return 0; +} + +static void reenable_adv(struct work_struct *work) +{ +	struct hci_dev *hdev = container_of(work, struct hci_dev, +					    reenable_adv_work); +	int status; + +	bt_dev_dbg(hdev, ""); + +	hci_dev_lock(hdev); + +	status = hci_cmd_sync_queue(hdev, reenable_adv_sync, NULL, NULL); +	if (status) +		bt_dev_err(hdev, "failed to reenable ADV: %d", status); + +	hci_dev_unlock(hdev); +} + +static void cancel_adv_timeout(struct hci_dev *hdev) +{ +	if (hdev->adv_instance_timeout) { +		hdev->adv_instance_timeout = 0; +		cancel_delayed_work(&hdev->adv_instance_expire); +	} +} + +/* For a single instance: + * - force == true: The instance will be removed even when its remaining + *   lifetime is not zero. + * - force == false: the instance will be deactivated but kept stored unless + *   the remaining lifetime is zero. + * + * For instance == 0x00: + * - force == true: All instances will be removed regardless of their timeout + *   setting. + * - force == false: Only instances that have a timeout will be removed. + */ +int hci_clear_adv_instance_sync(struct hci_dev *hdev, struct sock *sk, +				u8 instance, bool force) +{ +	struct adv_info *adv_instance, *n, *next_instance = NULL; +	int err; +	u8 rem_inst; + +	/* Cancel any timeout concerning the removed instance(s). */ +	if (!instance || hdev->cur_adv_instance == instance) +		cancel_adv_timeout(hdev); + +	/* Get the next instance to advertise BEFORE we remove +	 * the current one. This can be the same instance again +	 * if there is only one instance. +	 */ +	if (instance && hdev->cur_adv_instance == instance) +		next_instance = hci_get_next_instance(hdev, instance); + +	if (instance == 0x00) { +		list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, +					 list) { +			if (!(force || adv_instance->timeout)) +				continue; + +			rem_inst = adv_instance->instance; +			err = hci_remove_adv_instance(hdev, rem_inst); +			if (!err) +				mgmt_advertising_removed(sk, hdev, rem_inst); +		} +	} else { +		adv_instance = hci_find_adv_instance(hdev, instance); + +		if (force || (adv_instance && adv_instance->timeout && +			      !adv_instance->remaining_time)) { +			/* Don't advertise a removed instance. */ +			if (next_instance && +			    next_instance->instance == instance) +				next_instance = NULL; + +			err = hci_remove_adv_instance(hdev, instance); +			if (!err) +				mgmt_advertising_removed(sk, hdev, instance); +		} +	} + +	if (!hdev_is_powered(hdev) || hci_dev_test_flag(hdev, HCI_ADVERTISING)) +		return 0; + +	if (next_instance && !ext_adv_capable(hdev)) +		return hci_schedule_adv_instance_sync(hdev, +						      next_instance->instance, +						      false); + +	return 0; +} + +static int adv_timeout_expire_sync(struct hci_dev *hdev, void *data) +{ +	u8 instance = *(u8 *)data; + +	kfree(data); + +	hci_clear_adv_instance_sync(hdev, NULL, instance, false); + +	if (list_empty(&hdev->adv_instances)) +		return hci_disable_advertising_sync(hdev); + +	return 0; +} + +static void adv_timeout_expire(struct work_struct *work) +{ +	u8 *inst_ptr; +	struct hci_dev *hdev = container_of(work, struct hci_dev, +					    adv_instance_expire.work); + +	bt_dev_dbg(hdev, ""); + +	hci_dev_lock(hdev); + +	hdev->adv_instance_timeout = 0; + +	if (hdev->cur_adv_instance == 0x00) +		goto unlock; + +	inst_ptr = kmalloc(1, GFP_KERNEL); +	if (!inst_ptr) +		goto unlock; + +	*inst_ptr = hdev->cur_adv_instance; +	hci_cmd_sync_queue(hdev, adv_timeout_expire_sync, inst_ptr, NULL); + +unlock: +	hci_dev_unlock(hdev); +} +  void hci_cmd_sync_init(struct hci_dev *hdev)  {  	INIT_WORK(&hdev->cmd_sync_work, hci_cmd_sync_work); @@ -328,6 +629,10 @@ void hci_cmd_sync_init(struct hci_dev *hdev)  	mutex_init(&hdev->cmd_sync_work_lock);  	INIT_WORK(&hdev->cmd_sync_cancel_work, hci_cmd_sync_cancel_work); +	INIT_WORK(&hdev->reenable_adv_work, reenable_adv); +	INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable); +	INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart); +	INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);  }  void hci_cmd_sync_clear(struct hci_dev *hdev) @@ -335,6 +640,7 @@ void hci_cmd_sync_clear(struct hci_dev *hdev)  	struct hci_cmd_sync_work_entry *entry, *tmp;  	cancel_work_sync(&hdev->cmd_sync_work); +	cancel_work_sync(&hdev->reenable_adv_work);  	list_for_each_entry_safe(entry, tmp, &hdev->cmd_sync_work_list, list) {  		if (entry->destroy) @@ -1333,14 +1639,6 @@ int hci_le_terminate_big_sync(struct hci_dev *hdev, u8 handle, u8 reason)  				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);  } -static void cancel_adv_timeout(struct hci_dev *hdev) -{ -	if (hdev->adv_instance_timeout) { -		hdev->adv_instance_timeout = 0; -		cancel_delayed_work(&hdev->adv_instance_expire); -	} -} -  static int hci_set_ext_adv_data_sync(struct hci_dev *hdev, u8 instance)  {  	struct { @@ -1492,10 +1790,13 @@ static int hci_clear_adv_sets_sync(struct hci_dev *hdev, struct sock *sk)  static int hci_clear_adv_sync(struct hci_dev *hdev, struct sock *sk, bool force)  {  	struct adv_info *adv, *n; +	int err = 0;  	if (ext_adv_capable(hdev))  		/* Remove all existing sets */ -		return hci_clear_adv_sets_sync(hdev, sk); +		err = hci_clear_adv_sets_sync(hdev, sk); +	if (ext_adv_capable(hdev)) +		return err;  	/* This is safe as long as there is no command send while the lock is  	 * held. @@ -1523,11 +1824,13 @@ static int hci_clear_adv_sync(struct hci_dev *hdev, struct sock *sk, bool force)  static int hci_remove_adv_sync(struct hci_dev *hdev, u8 instance,  			       struct sock *sk)  { -	int err; +	int err = 0;  	/* If we use extended advertising, instance has to be removed first. */  	if (ext_adv_capable(hdev)) -		return hci_remove_ext_adv_instance_sync(hdev, instance, sk); +		err = hci_remove_ext_adv_instance_sync(hdev, instance, sk); +	if (ext_adv_capable(hdev)) +		return err;  	/* This is safe as long as there is no command send while the lock is  	 * held. @@ -1626,13 +1929,16 @@ int hci_read_tx_power_sync(struct hci_dev *hdev, __le16 handle, u8 type)  int hci_disable_advertising_sync(struct hci_dev *hdev)  {  	u8 enable = 0x00; +	int err = 0;  	/* If controller is not advertising we are done. */  	if (!hci_dev_test_flag(hdev, HCI_LE_ADV))  		return 0;  	if (ext_adv_capable(hdev)) -		return hci_disable_ext_adv_instance_sync(hdev, 0x00); +		err = hci_disable_ext_adv_instance_sync(hdev, 0x00); +	if (ext_adv_capable(hdev)) +		return err;  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_ADV_ENABLE,  				     sizeof(enable), &enable, HCI_CMD_TIMEOUT); @@ -1645,7 +1951,11 @@ static int hci_le_set_ext_scan_enable_sync(struct hci_dev *hdev, u8 val,  	memset(&cp, 0, sizeof(cp));  	cp.enable = val; -	cp.filter_dup = filter_dup; + +	if (hci_dev_test_flag(hdev, HCI_MESH)) +		cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE; +	else +		cp.filter_dup = filter_dup;  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_EXT_SCAN_ENABLE,  				     sizeof(cp), &cp, HCI_CMD_TIMEOUT); @@ -1661,7 +1971,11 @@ static int hci_le_set_scan_enable_sync(struct hci_dev *hdev, u8 val,  	memset(&cp, 0, sizeof(cp));  	cp.enable = val; -	cp.filter_dup = filter_dup; + +	if (val && hci_dev_test_flag(hdev, HCI_MESH)) +		cp.filter_dup = LE_SCAN_FILTER_DUP_DISABLE; +	else +		cp.filter_dup = filter_dup;  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_SCAN_ENABLE,  				     sizeof(cp), &cp, HCI_CMD_TIMEOUT); @@ -2300,6 +2614,7 @@ static int hci_passive_scan_sync(struct hci_dev *hdev)  	u8 own_addr_type;  	u8 filter_policy;  	u16 window, interval; +	u8 filter_dups = LE_SCAN_FILTER_DUP_ENABLE;  	int err;  	if (hdev->scanning_paused) { @@ -2362,11 +2677,16 @@ static int hci_passive_scan_sync(struct hci_dev *hdev)  		interval = hdev->le_scan_interval;  	} +	/* Disable all filtering for Mesh */ +	if (hci_dev_test_flag(hdev, HCI_MESH)) { +		filter_policy = 0; +		filter_dups = LE_SCAN_FILTER_DUP_DISABLE; +	} +  	bt_dev_dbg(hdev, "LE passive scan with acceptlist = %d", filter_policy);  	return hci_start_scan_sync(hdev, LE_SCAN_PASSIVE, interval, window, -				   own_addr_type, filter_policy, -				   LE_SCAN_FILTER_DUP_ENABLE); +				   own_addr_type, filter_policy, filter_dups);  }  /* This function controls the passive scanning based on hdev->pend_le_conns @@ -2416,7 +2736,8 @@ int hci_update_passive_scan_sync(struct hci_dev *hdev)  	bt_dev_dbg(hdev, "ADV monitoring is %s",  		   hci_is_adv_monitoring(hdev) ? "on" : "off"); -	if (list_empty(&hdev->pend_le_conns) && +	if (!hci_dev_test_flag(hdev, HCI_MESH) && +	    list_empty(&hdev->pend_le_conns) &&  	    list_empty(&hdev->pend_le_reports) &&  	    !hci_is_adv_monitoring(hdev) &&  	    !hci_dev_test_flag(hdev, HCI_PA_SYNC)) { @@ -4355,6 +4676,7 @@ int hci_dev_open_sync(struct hci_dev *hdev)  		    hci_dev_test_flag(hdev, HCI_MGMT) &&  		    hdev->dev_type == HCI_PRIMARY) {  			ret = hci_powered_update_sync(hdev); +			mgmt_power_on(hdev, ret);  		}  	} else {  		/* Init failed, cleanup */ @@ -4406,6 +4728,31 @@ static void hci_pend_le_actions_clear(struct hci_dev *hdev)  	BT_DBG("All LE pending actions cleared");  } +static int hci_dev_shutdown(struct hci_dev *hdev) +{ +	int err = 0; +	/* Similar to how we first do setup and then set the exclusive access +	 * bit for userspace, we must first unset userchannel and then clean up. +	 * Otherwise, the kernel can't properly use the hci channel to clean up +	 * the controller (some shutdown routines require sending additional +	 * commands to the controller for example). +	 */ +	bool was_userchannel = +		hci_dev_test_and_clear_flag(hdev, HCI_USER_CHANNEL); + +	if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) && +	    test_bit(HCI_UP, &hdev->flags)) { +		/* Execute vendor specific shutdown routine */ +		if (hdev->shutdown) +			err = hdev->shutdown(hdev); +	} + +	if (was_userchannel) +		hci_dev_set_flag(hdev, HCI_USER_CHANNEL); + +	return err; +} +  int hci_dev_close_sync(struct hci_dev *hdev)  {  	bool auto_off; @@ -4415,17 +4762,18 @@ int hci_dev_close_sync(struct hci_dev *hdev)  	cancel_delayed_work(&hdev->power_off);  	cancel_delayed_work(&hdev->ncmd_timer); +	cancel_delayed_work(&hdev->le_scan_disable); +	cancel_delayed_work(&hdev->le_scan_restart);  	hci_request_cancel_all(hdev); -	if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) && -	    !hci_dev_test_flag(hdev, HCI_USER_CHANNEL) && -	    test_bit(HCI_UP, &hdev->flags)) { -		/* Execute vendor specific shutdown routine */ -		if (hdev->shutdown) -			err = hdev->shutdown(hdev); +	if (hdev->adv_instance_timeout) { +		cancel_delayed_work_sync(&hdev->adv_instance_expire); +		hdev->adv_instance_timeout = 0;  	} +	err = hci_dev_shutdown(hdev); +  	if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {  		cancel_delayed_work_sync(&hdev->cmd_timer);  		return err; @@ -5023,7 +5371,7 @@ static int hci_active_scan_sync(struct hci_dev *hdev, uint16_t interval)  	/* Pause advertising since active scanning disables address resolution  	 * which advertising depend on in order to generate its RPAs.  	 */ -	if (use_ll_privacy(hdev)) { +	if (use_ll_privacy(hdev) && hci_dev_test_flag(hdev, HCI_PRIVACY)) {  		err = hci_pause_advertising_sync(hdev);  		if (err) {  			bt_dev_err(hdev, "pause advertising failed: %d", err); @@ -5737,3 +6085,96 @@ int hci_le_pa_terminate_sync(struct hci_dev *hdev, u16 handle)  	return __hci_cmd_sync_status(hdev, HCI_OP_LE_PA_TERM_SYNC,  				     sizeof(cp), &cp, HCI_CMD_TIMEOUT);  } + +int hci_get_random_address(struct hci_dev *hdev, bool require_privacy, +			   bool use_rpa, struct adv_info *adv_instance, +			   u8 *own_addr_type, bdaddr_t *rand_addr) +{ +	int err; + +	bacpy(rand_addr, BDADDR_ANY); + +	/* If privacy is enabled use a resolvable private address. If +	 * current RPA has expired then generate a new one. +	 */ +	if (use_rpa) { +		/* If Controller supports LL Privacy use own address type is +		 * 0x03 +		 */ +		if (use_ll_privacy(hdev)) +			*own_addr_type = ADDR_LE_DEV_RANDOM_RESOLVED; +		else +			*own_addr_type = ADDR_LE_DEV_RANDOM; + +		if (adv_instance) { +			if (adv_rpa_valid(adv_instance)) +				return 0; +		} else { +			if (rpa_valid(hdev)) +				return 0; +		} + +		err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa); +		if (err < 0) { +			bt_dev_err(hdev, "failed to generate new RPA"); +			return err; +		} + +		bacpy(rand_addr, &hdev->rpa); + +		return 0; +	} + +	/* In case of required privacy without resolvable private address, +	 * use an non-resolvable private address. This is useful for +	 * non-connectable advertising. +	 */ +	if (require_privacy) { +		bdaddr_t nrpa; + +		while (true) { +			/* The non-resolvable private address is generated +			 * from random six bytes with the two most significant +			 * bits cleared. +			 */ +			get_random_bytes(&nrpa, 6); +			nrpa.b[5] &= 0x3f; + +			/* The non-resolvable private address shall not be +			 * equal to the public address. +			 */ +			if (bacmp(&hdev->bdaddr, &nrpa)) +				break; +		} + +		*own_addr_type = ADDR_LE_DEV_RANDOM; +		bacpy(rand_addr, &nrpa); + +		return 0; +	} + +	/* No privacy so use a public address. */ +	*own_addr_type = ADDR_LE_DEV_PUBLIC; + +	return 0; +} + +static int _update_adv_data_sync(struct hci_dev *hdev, void *data) +{ +	u8 instance = *(u8 *)data; + +	kfree(data); + +	return hci_update_adv_data_sync(hdev, instance); +} + +int hci_update_adv_data(struct hci_dev *hdev, u8 instance) +{ +	u8 *inst_ptr = kmalloc(1, GFP_KERNEL); + +	if (!inst_ptr) +		return -ENOMEM; + +	*inst_ptr = instance; +	return hci_cmd_sync_queue(hdev, _update_adv_data_sync, inst_ptr, NULL); +} diff --git a/net/bluetooth/hci_sysfs.c b/net/bluetooth/hci_sysfs.c index 4e3e0451b08c..08542dfc2dc5 100644 --- a/net/bluetooth/hci_sysfs.c +++ b/net/bluetooth/hci_sysfs.c @@ -48,6 +48,9 @@ void hci_conn_add_sysfs(struct hci_conn *conn)  	BT_DBG("conn %p", conn); +	if (device_is_registered(&conn->dev)) +		return; +  	dev_set_name(&conn->dev, "%s:%d", hdev->name, conn->handle);  	if (device_add(&conn->dev) < 0) { diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index 2c9de67daadc..1f34b82ca0ec 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -61,6 +61,9 @@ static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);  static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,  		     struct sk_buff_head *skbs, u8 event); +static void l2cap_retrans_timeout(struct work_struct *work); +static void l2cap_monitor_timeout(struct work_struct *work); +static void l2cap_ack_timeout(struct work_struct *work);  static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)  { @@ -476,6 +479,9 @@ struct l2cap_chan *l2cap_chan_create(void)  	write_unlock(&chan_list_lock);  	INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout); +	INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout); +	INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout); +	INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);  	chan->state = BT_OPEN; @@ -3320,10 +3326,6 @@ int l2cap_ertm_init(struct l2cap_chan *chan)  	chan->rx_state = L2CAP_RX_STATE_RECV;  	chan->tx_state = L2CAP_TX_STATE_XMIT; -	INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout); -	INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout); -	INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout); -  	skb_queue_head_init(&chan->srej_q);  	err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win); @@ -4307,6 +4309,12 @@ static int l2cap_connect_create_rsp(struct l2cap_conn *conn,  		}  	} +	chan = l2cap_chan_hold_unless_zero(chan); +	if (!chan) { +		err = -EBADSLT; +		goto unlock; +	} +  	err = 0;  	l2cap_chan_lock(chan); @@ -4336,6 +4344,7 @@ static int l2cap_connect_create_rsp(struct l2cap_conn *conn,  	}  	l2cap_chan_unlock(chan); +	l2cap_chan_put(chan);  unlock:  	mutex_unlock(&conn->chan_lock); diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index 72e6595a71cc..a92e7e485feb 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -129,6 +129,10 @@ static const u16 mgmt_commands[] = {  	MGMT_OP_ADD_EXT_ADV_PARAMS,  	MGMT_OP_ADD_EXT_ADV_DATA,  	MGMT_OP_ADD_ADV_PATTERNS_MONITOR_RSSI, +	MGMT_OP_SET_MESH_RECEIVER, +	MGMT_OP_MESH_READ_FEATURES, +	MGMT_OP_MESH_SEND, +	MGMT_OP_MESH_SEND_CANCEL,  };  static const u16 mgmt_events[] = { @@ -1048,9 +1052,66 @@ static void discov_off(struct work_struct *work)  	hci_dev_unlock(hdev);  } +static int send_settings_rsp(struct sock *sk, u16 opcode, struct hci_dev *hdev); + +static void mesh_send_complete(struct hci_dev *hdev, +			       struct mgmt_mesh_tx *mesh_tx, bool silent) +{ +	u8 handle = mesh_tx->handle; + +	if (!silent) +		mgmt_event(MGMT_EV_MESH_PACKET_CMPLT, hdev, &handle, +			   sizeof(handle), NULL); + +	mgmt_mesh_remove(mesh_tx); +} + +static int mesh_send_done_sync(struct hci_dev *hdev, void *data) +{ +	struct mgmt_mesh_tx *mesh_tx; + +	hci_dev_clear_flag(hdev, HCI_MESH_SENDING); +	hci_disable_advertising_sync(hdev); +	mesh_tx = mgmt_mesh_next(hdev, NULL); + +	if (mesh_tx) +		mesh_send_complete(hdev, mesh_tx, false); + +	return 0; +} + +static int mesh_send_sync(struct hci_dev *hdev, void *data); +static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err); +static void mesh_next(struct hci_dev *hdev, void *data, int err) +{ +	struct mgmt_mesh_tx *mesh_tx = mgmt_mesh_next(hdev, NULL); + +	if (!mesh_tx) +		return; + +	err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx, +				 mesh_send_start_complete); + +	if (err < 0) +		mesh_send_complete(hdev, mesh_tx, false); +	else +		hci_dev_set_flag(hdev, HCI_MESH_SENDING); +} + +static void mesh_send_done(struct work_struct *work) +{ +	struct hci_dev *hdev = container_of(work, struct hci_dev, +					    mesh_send_done.work); + +	if (!hci_dev_test_flag(hdev, HCI_MESH_SENDING)) +		return; + +	hci_cmd_sync_queue(hdev, mesh_send_done_sync, NULL, mesh_next); +} +  static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)  { -	if (hci_dev_test_and_set_flag(hdev, HCI_MGMT)) +	if (hci_dev_test_flag(hdev, HCI_MGMT))  		return;  	BT_INFO("MGMT ver %d.%d", MGMT_VERSION, MGMT_REVISION); @@ -1058,6 +1119,7 @@ static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)  	INIT_DELAYED_WORK(&hdev->discov_off, discov_off);  	INIT_DELAYED_WORK(&hdev->service_cache, service_cache_off);  	INIT_DELAYED_WORK(&hdev->rpa_expired, rpa_expired); +	INIT_DELAYED_WORK(&hdev->mesh_send_done, mesh_send_done);  	/* Non-mgmt controlled devices get this bit set  	 * implicitly so that pairing works for them, however @@ -1065,6 +1127,8 @@ static void mgmt_init_hdev(struct sock *sk, struct hci_dev *hdev)  	 * it  	 */  	hci_dev_clear_flag(hdev, HCI_BONDABLE); + +	hci_dev_set_flag(hdev, HCI_MGMT);  }  static int read_controller_info(struct sock *sk, struct hci_dev *hdev, @@ -2058,6 +2122,8 @@ static int set_le_sync(struct hci_dev *hdev, void *data)  	int err;  	if (!val) { +		hci_clear_adv_instance_sync(hdev, NULL, 0x00, true); +  		if (hci_dev_test_flag(hdev, HCI_LE_ADV))  			hci_disable_advertising_sync(hdev); @@ -2092,6 +2158,317 @@ static int set_le_sync(struct hci_dev *hdev, void *data)  	return err;  } +static void set_mesh_complete(struct hci_dev *hdev, void *data, int err) +{ +	struct mgmt_pending_cmd *cmd = data; +	u8 status = mgmt_status(err); +	struct sock *sk = cmd->sk; + +	if (status) { +		mgmt_pending_foreach(MGMT_OP_SET_MESH_RECEIVER, hdev, +				     cmd_status_rsp, &status); +		return; +	} + +	mgmt_pending_remove(cmd); +	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, 0, NULL, 0); +} + +static int set_mesh_sync(struct hci_dev *hdev, void *data) +{ +	struct mgmt_pending_cmd *cmd = data; +	struct mgmt_cp_set_mesh *cp = cmd->param; +	size_t len = cmd->param_len; + +	memset(hdev->mesh_ad_types, 0, sizeof(hdev->mesh_ad_types)); + +	if (cp->enable) +		hci_dev_set_flag(hdev, HCI_MESH); +	else +		hci_dev_clear_flag(hdev, HCI_MESH); + +	len -= sizeof(*cp); + +	/* If filters don't fit, forward all adv pkts */ +	if (len <= sizeof(hdev->mesh_ad_types)) +		memcpy(hdev->mesh_ad_types, cp->ad_types, len); + +	hci_update_passive_scan_sync(hdev); +	return 0; +} + +static int set_mesh(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) +{ +	struct mgmt_cp_set_mesh *cp = data; +	struct mgmt_pending_cmd *cmd; +	int err = 0; + +	bt_dev_dbg(hdev, "sock %p", sk); + +	if (!lmp_le_capable(hdev) || +	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL)) +		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, +				       MGMT_STATUS_NOT_SUPPORTED); + +	if (cp->enable != 0x00 && cp->enable != 0x01) +		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, +				       MGMT_STATUS_INVALID_PARAMS); + +	hci_dev_lock(hdev); + +	cmd = mgmt_pending_add(sk, MGMT_OP_SET_MESH_RECEIVER, hdev, data, len); +	if (!cmd) +		err = -ENOMEM; +	else +		err = hci_cmd_sync_queue(hdev, set_mesh_sync, cmd, +					 set_mesh_complete); + +	if (err < 0) { +		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_MESH_RECEIVER, +				      MGMT_STATUS_FAILED); + +		if (cmd) +			mgmt_pending_remove(cmd); +	} + +	hci_dev_unlock(hdev); +	return err; +} + +static void mesh_send_start_complete(struct hci_dev *hdev, void *data, int err) +{ +	struct mgmt_mesh_tx *mesh_tx = data; +	struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param; +	unsigned long mesh_send_interval; +	u8 mgmt_err = mgmt_status(err); + +	/* Report any errors here, but don't report completion */ + +	if (mgmt_err) { +		hci_dev_clear_flag(hdev, HCI_MESH_SENDING); +		/* Send Complete Error Code for handle */ +		mesh_send_complete(hdev, mesh_tx, false); +		return; +	} + +	mesh_send_interval = msecs_to_jiffies((send->cnt) * 25); +	queue_delayed_work(hdev->req_workqueue, &hdev->mesh_send_done, +			   mesh_send_interval); +} + +static int mesh_send_sync(struct hci_dev *hdev, void *data) +{ +	struct mgmt_mesh_tx *mesh_tx = data; +	struct mgmt_cp_mesh_send *send = (void *)mesh_tx->param; +	struct adv_info *adv, *next_instance; +	u8 instance = hdev->le_num_of_adv_sets + 1; +	u16 timeout, duration; +	int err = 0; + +	if (hdev->le_num_of_adv_sets <= hdev->adv_instance_cnt) +		return MGMT_STATUS_BUSY; + +	timeout = 1000; +	duration = send->cnt * INTERVAL_TO_MS(hdev->le_adv_max_interval); +	adv = hci_add_adv_instance(hdev, instance, 0, +				   send->adv_data_len, send->adv_data, +				   0, NULL, +				   timeout, duration, +				   HCI_ADV_TX_POWER_NO_PREFERENCE, +				   hdev->le_adv_min_interval, +				   hdev->le_adv_max_interval, +				   mesh_tx->handle); + +	if (!IS_ERR(adv)) +		mesh_tx->instance = instance; +	else +		err = PTR_ERR(adv); + +	if (hdev->cur_adv_instance == instance) { +		/* If the currently advertised instance is being changed then +		 * cancel the current advertising and schedule the next +		 * instance. If there is only one instance then the overridden +		 * advertising data will be visible right away. +		 */ +		cancel_adv_timeout(hdev); + +		next_instance = hci_get_next_instance(hdev, instance); +		if (next_instance) +			instance = next_instance->instance; +		else +			instance = 0; +	} else if (hdev->adv_instance_timeout) { +		/* Immediately advertise the new instance if no other, or +		 * let it go naturally from queue if ADV is already happening +		 */ +		instance = 0; +	} + +	if (instance) +		return hci_schedule_adv_instance_sync(hdev, instance, true); + +	return err; +} + +static void send_count(struct mgmt_mesh_tx *mesh_tx, void *data) +{ +	struct mgmt_rp_mesh_read_features *rp = data; + +	if (rp->used_handles >= rp->max_handles) +		return; + +	rp->handles[rp->used_handles++] = mesh_tx->handle; +} + +static int mesh_features(struct sock *sk, struct hci_dev *hdev, +			 void *data, u16 len) +{ +	struct mgmt_rp_mesh_read_features rp; + +	if (!lmp_le_capable(hdev) || +	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL)) +		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, +				       MGMT_STATUS_NOT_SUPPORTED); + +	memset(&rp, 0, sizeof(rp)); +	rp.index = cpu_to_le16(hdev->id); +	if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) +		rp.max_handles = MESH_HANDLES_MAX; + +	hci_dev_lock(hdev); + +	if (rp.max_handles) +		mgmt_mesh_foreach(hdev, send_count, &rp, sk); + +	mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_READ_FEATURES, 0, &rp, +			  rp.used_handles + sizeof(rp) - MESH_HANDLES_MAX); + +	hci_dev_unlock(hdev); +	return 0; +} + +static int send_cancel(struct hci_dev *hdev, void *data) +{ +	struct mgmt_pending_cmd *cmd = data; +	struct mgmt_cp_mesh_send_cancel *cancel = (void *)cmd->param; +	struct mgmt_mesh_tx *mesh_tx; + +	if (!cancel->handle) { +		do { +			mesh_tx = mgmt_mesh_next(hdev, cmd->sk); + +			if (mesh_tx) +				mesh_send_complete(hdev, mesh_tx, false); +		} while (mesh_tx); +	} else { +		mesh_tx = mgmt_mesh_find(hdev, cancel->handle); + +		if (mesh_tx && mesh_tx->sk == cmd->sk) +			mesh_send_complete(hdev, mesh_tx, false); +	} + +	mgmt_cmd_complete(cmd->sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL, +			  0, NULL, 0); +	mgmt_pending_free(cmd); + +	return 0; +} + +static int mesh_send_cancel(struct sock *sk, struct hci_dev *hdev, +			    void *data, u16 len) +{ +	struct mgmt_pending_cmd *cmd; +	int err; + +	if (!lmp_le_capable(hdev) || +	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL)) +		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL, +				       MGMT_STATUS_NOT_SUPPORTED); + +	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) +		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL, +				       MGMT_STATUS_REJECTED); + +	hci_dev_lock(hdev); +	cmd = mgmt_pending_new(sk, MGMT_OP_MESH_SEND_CANCEL, hdev, data, len); +	if (!cmd) +		err = -ENOMEM; +	else +		err = hci_cmd_sync_queue(hdev, send_cancel, cmd, NULL); + +	if (err < 0) { +		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND_CANCEL, +				      MGMT_STATUS_FAILED); + +		if (cmd) +			mgmt_pending_free(cmd); +	} + +	hci_dev_unlock(hdev); +	return err; +} + +static int mesh_send(struct sock *sk, struct hci_dev *hdev, void *data, u16 len) +{ +	struct mgmt_mesh_tx *mesh_tx; +	struct mgmt_cp_mesh_send *send = data; +	struct mgmt_rp_mesh_read_features rp; +	bool sending; +	int err = 0; + +	if (!lmp_le_capable(hdev) || +	    !hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL)) +		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND, +				       MGMT_STATUS_NOT_SUPPORTED); +	if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED) || +	    len <= MGMT_MESH_SEND_SIZE || +	    len > (MGMT_MESH_SEND_SIZE + 31)) +		return mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND, +				       MGMT_STATUS_REJECTED); + +	hci_dev_lock(hdev); + +	memset(&rp, 0, sizeof(rp)); +	rp.max_handles = MESH_HANDLES_MAX; + +	mgmt_mesh_foreach(hdev, send_count, &rp, sk); + +	if (rp.max_handles <= rp.used_handles) { +		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND, +				      MGMT_STATUS_BUSY); +		goto done; +	} + +	sending = hci_dev_test_flag(hdev, HCI_MESH_SENDING); +	mesh_tx = mgmt_mesh_add(sk, hdev, send, len); + +	if (!mesh_tx) +		err = -ENOMEM; +	else if (!sending) +		err = hci_cmd_sync_queue(hdev, mesh_send_sync, mesh_tx, +					 mesh_send_start_complete); + +	if (err < 0) { +		bt_dev_err(hdev, "Send Mesh Failed %d", err); +		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_MESH_SEND, +				      MGMT_STATUS_FAILED); + +		if (mesh_tx) { +			if (sending) +				mgmt_mesh_remove(mesh_tx); +		} +	} else { +		hci_dev_set_flag(hdev, HCI_MESH_SENDING); + +		mgmt_cmd_complete(sk, hdev->id, MGMT_OP_MESH_SEND, 0, +				  &mesh_tx->handle, 1); +	} + +done: +	hci_dev_unlock(hdev); +	return err; +} +  static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)  {  	struct mgmt_mode *cp = data; @@ -2131,9 +2508,6 @@ static int set_le(struct sock *sk, struct hci_dev *hdev, void *data, u16 len)  	val = !!cp->val;  	enabled = lmp_host_le_capable(hdev); -	if (!val) -		hci_req_clear_adv_instance(hdev, NULL, NULL, 0x00, true); -  	if (!hdev_is_powered(hdev) || val == enabled) {  		bool changed = false; @@ -3186,6 +3560,18 @@ unlock:  	return err;  } +static int abort_conn_sync(struct hci_dev *hdev, void *data) +{ +	struct hci_conn *conn; +	u16 handle = PTR_ERR(data); + +	conn = hci_conn_hash_lookup_handle(hdev, handle); +	if (!conn) +		return 0; + +	return hci_abort_conn_sync(hdev, conn, HCI_ERROR_REMOTE_USER_TERM); +} +  static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,  			      u16 len)  { @@ -3236,7 +3622,8 @@ static int cancel_pair_device(struct sock *sk, struct hci_dev *hdev, void *data,  					      le_addr_type(addr->type));  	if (conn->conn_reason == CONN_REASON_PAIR_DEVICE) -		hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM); +		hci_cmd_sync_queue(hdev, abort_conn_sync, ERR_PTR(conn->handle), +				   NULL);  unlock:  	hci_dev_unlock(hdev); @@ -3991,17 +4378,28 @@ static const u8 iso_socket_uuid[16] = {  	0x6a, 0x49, 0xe0, 0x05, 0x88, 0xf1, 0xba, 0x6f,  }; +/* 2ce463d7-7a03-4d8d-bf05-5f24e8f36e76 */ +static const u8 mgmt_mesh_uuid[16] = { +	0x76, 0x6e, 0xf3, 0xe8, 0x24, 0x5f, 0x05, 0xbf, +	0x8d, 0x4d, 0x03, 0x7a, 0xd7, 0x63, 0xe4, 0x2c, +}; +  static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,  				  void *data, u16 data_len)  { -	char buf[122];   /* Enough space for 6 features: 2 + 20 * 6 */ -	struct mgmt_rp_read_exp_features_info *rp = (void *)buf; +	struct mgmt_rp_read_exp_features_info *rp; +	size_t len;  	u16 idx = 0;  	u32 flags; +	int status;  	bt_dev_dbg(hdev, "sock %p", sk); -	memset(&buf, 0, sizeof(buf)); +	/* Enough space for 7 features */ +	len = sizeof(*rp) + (sizeof(rp->features[0]) * 7); +	rp = kzalloc(len, GFP_KERNEL); +	if (!rp) +		return -ENOMEM;  #ifdef CONFIG_BT_FEATURE_DEBUG  	if (!hdev) { @@ -4065,6 +4463,17 @@ static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,  		idx++;  	} +	if (hdev && lmp_le_capable(hdev)) { +		if (hci_dev_test_flag(hdev, HCI_MESH_EXPERIMENTAL)) +			flags = BIT(0); +		else +			flags = 0; + +		memcpy(rp->features[idx].uuid, mgmt_mesh_uuid, 16); +		rp->features[idx].flags = cpu_to_le32(flags); +		idx++; +	} +  	rp->feature_count = cpu_to_le16(idx);  	/* After reading the experimental features information, enable @@ -4072,9 +4481,12 @@ static int read_exp_features_info(struct sock *sk, struct hci_dev *hdev,  	 */  	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS); -	return mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE, -				 MGMT_OP_READ_EXP_FEATURES_INFO, -				 0, rp, sizeof(*rp) + (20 * idx)); +	status = mgmt_cmd_complete(sk, hdev ? hdev->id : MGMT_INDEX_NONE, +				   MGMT_OP_READ_EXP_FEATURES_INFO, +				   0, rp, sizeof(*rp) + (20 * idx)); + +	kfree(rp); +	return status;  }  static int exp_ll_privacy_feature_changed(bool enabled, struct hci_dev *hdev, @@ -4202,6 +4614,63 @@ static int set_debug_func(struct sock *sk, struct hci_dev *hdev,  }  #endif +static int set_mgmt_mesh_func(struct sock *sk, struct hci_dev *hdev, +			      struct mgmt_cp_set_exp_feature *cp, u16 data_len) +{ +	struct mgmt_rp_set_exp_feature rp; +	bool val, changed; +	int err; + +	/* Command requires to use the controller index */ +	if (!hdev) +		return mgmt_cmd_status(sk, MGMT_INDEX_NONE, +				       MGMT_OP_SET_EXP_FEATURE, +				       MGMT_STATUS_INVALID_INDEX); + +	/* Changes can only be made when controller is powered down */ +	if (hdev_is_powered(hdev)) +		return mgmt_cmd_status(sk, hdev->id, +				       MGMT_OP_SET_EXP_FEATURE, +				       MGMT_STATUS_REJECTED); + +	/* Parameters are limited to a single octet */ +	if (data_len != MGMT_SET_EXP_FEATURE_SIZE + 1) +		return mgmt_cmd_status(sk, hdev->id, +				       MGMT_OP_SET_EXP_FEATURE, +				       MGMT_STATUS_INVALID_PARAMS); + +	/* Only boolean on/off is supported */ +	if (cp->param[0] != 0x00 && cp->param[0] != 0x01) +		return mgmt_cmd_status(sk, hdev->id, +				       MGMT_OP_SET_EXP_FEATURE, +				       MGMT_STATUS_INVALID_PARAMS); + +	val = !!cp->param[0]; + +	if (val) { +		changed = !hci_dev_test_and_set_flag(hdev, +						     HCI_MESH_EXPERIMENTAL); +	} else { +		hci_dev_clear_flag(hdev, HCI_MESH); +		changed = hci_dev_test_and_clear_flag(hdev, +						      HCI_MESH_EXPERIMENTAL); +	} + +	memcpy(rp.uuid, mgmt_mesh_uuid, 16); +	rp.flags = cpu_to_le32(val ? BIT(0) : 0); + +	hci_sock_set_flag(sk, HCI_MGMT_EXP_FEATURE_EVENTS); + +	err = mgmt_cmd_complete(sk, hdev->id, +				MGMT_OP_SET_EXP_FEATURE, 0, +				&rp, sizeof(rp)); + +	if (changed) +		exp_feature_changed(hdev, mgmt_mesh_uuid, val, sk); + +	return err; +} +  static int set_rpa_resolution_func(struct sock *sk, struct hci_dev *hdev,  				   struct mgmt_cp_set_exp_feature *cp,  				   u16 data_len) @@ -4517,6 +4986,7 @@ static const struct mgmt_exp_feature {  #ifdef CONFIG_BT_FEATURE_DEBUG  	EXP_FEAT(debug_uuid, set_debug_func),  #endif +	EXP_FEAT(mgmt_mesh_uuid, set_mgmt_mesh_func),  	EXP_FEAT(rpa_resolution_uuid, set_rpa_resolution_func),  	EXP_FEAT(quality_report_uuid, set_quality_report_func),  	EXP_FEAT(offload_codecs_uuid, set_offload_codec_func), @@ -5981,6 +6451,7 @@ static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,  	if (!hdev_is_powered(hdev) ||  	    (val == hci_dev_test_flag(hdev, HCI_ADVERTISING) &&  	     (cp->val == 0x02) == hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) || +	    hci_dev_test_flag(hdev, HCI_MESH) ||  	    hci_conn_num(hdev, LE_LINK) > 0 ||  	    (hci_dev_test_flag(hdev, HCI_LE_SCAN) &&  	     hdev->le_scan_type == LE_SCAN_ACTIVE)) { @@ -7909,8 +8380,7 @@ static u32 get_supported_adv_flags(struct hci_dev *hdev)  	/* In extended adv TX_POWER returned from Set Adv Param  	 * will be always valid.  	 */ -	if ((hdev->adv_tx_power != HCI_TX_POWER_INVALID) || -	    ext_adv_capable(hdev)) +	if (hdev->adv_tx_power != HCI_TX_POWER_INVALID || ext_adv_capable(hdev))  		flags |= MGMT_ADV_FLAG_TX_POWER;  	if (ext_adv_capable(hdev)) { @@ -7963,8 +8433,14 @@ static int read_adv_features(struct sock *sk, struct hci_dev *hdev,  	instance = rp->instance;  	list_for_each_entry(adv_instance, &hdev->adv_instances, list) { -		*instance = adv_instance->instance; -		instance++; +		/* Only instances 1-le_num_of_adv_sets are externally visible */ +		if (adv_instance->instance <= hdev->adv_instance_cnt) { +			*instance = adv_instance->instance; +			instance++; +		} else { +			rp->num_instances--; +			rp_len--; +		}  	}  	hci_dev_unlock(hdev); @@ -8226,7 +8702,7 @@ static int add_advertising(struct sock *sk, struct hci_dev *hdev,  				   timeout, duration,  				   HCI_ADV_TX_POWER_NO_PREFERENCE,  				   hdev->le_adv_min_interval, -				   hdev->le_adv_max_interval); +				   hdev->le_adv_max_interval, 0);  	if (IS_ERR(adv)) {  		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,  				      MGMT_STATUS_FAILED); @@ -8430,7 +8906,7 @@ static int add_ext_adv_params(struct sock *sk, struct hci_dev *hdev,  	/* Create advertising instance with no advertising or response data */  	adv = hci_add_adv_instance(hdev, cp->instance, flags, 0, NULL, 0, NULL,  				   timeout, duration, tx_power, min_interval, -				   max_interval); +				   max_interval, 0);  	if (IS_ERR(adv)) {  		err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_EXT_ADV_PARAMS, @@ -8876,8 +9352,13 @@ static const struct hci_mgmt_handler mgmt_handlers[] = {  	{ add_ext_adv_data,        MGMT_ADD_EXT_ADV_DATA_SIZE,  						HCI_MGMT_VAR_LEN },  	{ add_adv_patterns_monitor_rssi, -				   MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE, +				   MGMT_ADD_ADV_PATTERNS_MONITOR_RSSI_SIZE }, +	{ set_mesh,                MGMT_SET_MESH_RECEIVER_SIZE, +						HCI_MGMT_VAR_LEN }, +	{ mesh_features,           MGMT_MESH_READ_FEATURES_SIZE }, +	{ mesh_send,               MGMT_MESH_SEND_SIZE,  						HCI_MGMT_VAR_LEN }, +	{ mesh_send_cancel,        MGMT_MESH_SEND_CANCEL_SIZE },  };  void mgmt_index_added(struct hci_dev *hdev) @@ -9817,14 +10298,86 @@ static void mgmt_adv_monitor_device_found(struct hci_dev *hdev,  		kfree_skb(skb);  } +static void mesh_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, +			      u8 addr_type, s8 rssi, u32 flags, u8 *eir, +			      u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len, +			      u64 instant) +{ +	struct sk_buff *skb; +	struct mgmt_ev_mesh_device_found *ev; +	int i, j; + +	if (!hdev->mesh_ad_types[0]) +		goto accepted; + +	/* Scan for requested AD types */ +	if (eir_len > 0) { +		for (i = 0; i + 1 < eir_len; i += eir[i] + 1) { +			for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) { +				if (!hdev->mesh_ad_types[j]) +					break; + +				if (hdev->mesh_ad_types[j] == eir[i + 1]) +					goto accepted; +			} +		} +	} + +	if (scan_rsp_len > 0) { +		for (i = 0; i + 1 < scan_rsp_len; i += scan_rsp[i] + 1) { +			for (j = 0; j < sizeof(hdev->mesh_ad_types); j++) { +				if (!hdev->mesh_ad_types[j]) +					break; + +				if (hdev->mesh_ad_types[j] == scan_rsp[i + 1]) +					goto accepted; +			} +		} +	} + +	return; + +accepted: +	skb = mgmt_alloc_skb(hdev, MGMT_EV_MESH_DEVICE_FOUND, +			     sizeof(*ev) + eir_len + scan_rsp_len); +	if (!skb) +		return; + +	ev = skb_put(skb, sizeof(*ev)); + +	bacpy(&ev->addr.bdaddr, bdaddr); +	ev->addr.type = link_to_bdaddr(LE_LINK, addr_type); +	ev->rssi = rssi; +	ev->flags = cpu_to_le32(flags); +	ev->instant = cpu_to_le64(instant); + +	if (eir_len > 0) +		/* Copy EIR or advertising data into event */ +		skb_put_data(skb, eir, eir_len); + +	if (scan_rsp_len > 0) +		/* Append scan response data to event */ +		skb_put_data(skb, scan_rsp, scan_rsp_len); + +	ev->eir_len = cpu_to_le16(eir_len + scan_rsp_len); + +	mgmt_event_skb(skb, NULL); +} +  void mgmt_device_found(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,  		       u8 addr_type, u8 *dev_class, s8 rssi, u32 flags, -		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len) +		       u8 *eir, u16 eir_len, u8 *scan_rsp, u8 scan_rsp_len, +		       u64 instant)  {  	struct sk_buff *skb;  	struct mgmt_ev_device_found *ev;  	bool report_device = hci_discovery_active(hdev); +	if (hci_dev_test_flag(hdev, HCI_MESH) && link_type == LE_LINK) +		mesh_device_found(hdev, bdaddr, addr_type, rssi, flags, +				  eir, eir_len, scan_rsp, scan_rsp_len, +				  instant); +  	/* Don't send events for a non-kernel initiated discovery. With  	 * LE one exception is if we have pend_le_reports > 0 in which  	 * case we're doing passive scanning and want these events. @@ -9983,3 +10536,22 @@ void mgmt_exit(void)  {  	hci_mgmt_chan_unregister(&chan);  } + +void mgmt_cleanup(struct sock *sk) +{ +	struct mgmt_mesh_tx *mesh_tx; +	struct hci_dev *hdev; + +	read_lock(&hci_dev_list_lock); + +	list_for_each_entry(hdev, &hci_dev_list, list) { +		do { +			mesh_tx = mgmt_mesh_next(hdev, sk); + +			if (mesh_tx) +				mesh_send_complete(hdev, mesh_tx, true); +		} while (mesh_tx); +	} + +	read_unlock(&hci_dev_list_lock); +} diff --git a/net/bluetooth/mgmt_util.c b/net/bluetooth/mgmt_util.c index b69cfed62088..0115f783bde8 100644 --- a/net/bluetooth/mgmt_util.c +++ b/net/bluetooth/mgmt_util.c @@ -314,3 +314,77 @@ void mgmt_pending_remove(struct mgmt_pending_cmd *cmd)  	list_del(&cmd->list);  	mgmt_pending_free(cmd);  } + +void mgmt_mesh_foreach(struct hci_dev *hdev, +		       void (*cb)(struct mgmt_mesh_tx *mesh_tx, void *data), +		       void *data, struct sock *sk) +{ +	struct mgmt_mesh_tx *mesh_tx, *tmp; + +	list_for_each_entry_safe(mesh_tx, tmp, &hdev->mgmt_pending, list) { +		if (!sk || mesh_tx->sk == sk) +			cb(mesh_tx, data); +	} +} + +struct mgmt_mesh_tx *mgmt_mesh_next(struct hci_dev *hdev, struct sock *sk) +{ +	struct mgmt_mesh_tx *mesh_tx; + +	if (list_empty(&hdev->mesh_pending)) +		return NULL; + +	list_for_each_entry(mesh_tx, &hdev->mesh_pending, list) { +		if (!sk || mesh_tx->sk == sk) +			return mesh_tx; +	} + +	return NULL; +} + +struct mgmt_mesh_tx *mgmt_mesh_find(struct hci_dev *hdev, u8 handle) +{ +	struct mgmt_mesh_tx *mesh_tx; + +	if (list_empty(&hdev->mesh_pending)) +		return NULL; + +	list_for_each_entry(mesh_tx, &hdev->mesh_pending, list) { +		if (mesh_tx->handle == handle) +			return mesh_tx; +	} + +	return NULL; +} + +struct mgmt_mesh_tx *mgmt_mesh_add(struct sock *sk, struct hci_dev *hdev, +				   void *data, u16 len) +{ +	struct mgmt_mesh_tx *mesh_tx; + +	mesh_tx = kzalloc(sizeof(*mesh_tx), GFP_KERNEL); +	if (!mesh_tx) +		return NULL; + +	hdev->mesh_send_ref++; +	if (!hdev->mesh_send_ref) +		hdev->mesh_send_ref++; + +	mesh_tx->handle = hdev->mesh_send_ref; +	mesh_tx->index = hdev->id; +	memcpy(mesh_tx->param, data, len); +	mesh_tx->param_len = len; +	mesh_tx->sk = sk; +	sock_hold(sk); + +	list_add_tail(&mesh_tx->list, &hdev->mesh_pending); + +	return mesh_tx; +} + +void mgmt_mesh_remove(struct mgmt_mesh_tx *mesh_tx) +{ +	list_del(&mesh_tx->list); +	sock_put(mesh_tx->sk); +	kfree(mesh_tx); +} diff --git a/net/bluetooth/mgmt_util.h b/net/bluetooth/mgmt_util.h index 98e40395a383..6a8b7e84293d 100644 --- a/net/bluetooth/mgmt_util.h +++ b/net/bluetooth/mgmt_util.h @@ -20,6 +20,16 @@     SOFTWARE IS DISCLAIMED.  */ +struct mgmt_mesh_tx { +	struct list_head list; +	int index; +	size_t param_len; +	struct sock *sk; +	u8 handle; +	u8 instance; +	u8 param[sizeof(struct mgmt_cp_mesh_send) + 29]; +}; +  struct mgmt_pending_cmd {  	struct list_head list;  	u16 opcode; @@ -59,3 +69,11 @@ struct mgmt_pending_cmd *mgmt_pending_new(struct sock *sk, u16 opcode,  					  void *data, u16 len);  void mgmt_pending_free(struct mgmt_pending_cmd *cmd);  void mgmt_pending_remove(struct mgmt_pending_cmd *cmd); +void mgmt_mesh_foreach(struct hci_dev *hdev, +		       void (*cb)(struct mgmt_mesh_tx *mesh_tx, void *data), +		       void *data, struct sock *sk); +struct mgmt_mesh_tx *mgmt_mesh_find(struct hci_dev *hdev, u8 handle); +struct mgmt_mesh_tx *mgmt_mesh_next(struct hci_dev *hdev, struct sock *sk); +struct mgmt_mesh_tx *mgmt_mesh_add(struct sock *sk, struct hci_dev *hdev, +				   void *data, u16 len); +void mgmt_mesh_remove(struct mgmt_mesh_tx *mesh_tx); diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index 4bf4ea6cbb5e..21e24da4847f 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c @@ -902,7 +902,10 @@ static int rfcomm_sock_shutdown(struct socket *sock, int how)  	lock_sock(sk);  	if (!sk->sk_shutdown) {  		sk->sk_shutdown = SHUTDOWN_MASK; + +		release_sock(sk);  		__rfcomm_sock_close(sk); +		lock_sock(sk);  		if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&  		    !(current->flags & PF_EXITING))  | 
