mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-10 15:10:38 +00:00
Merge branch 'for-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth-next
Johan Hedberg says: ==================== pull request: bluetooth-next 2020-03-19 Here's the main bluetooth-next pull request for the 5.7 kernel. - Added wideband speech support to mgmt and the ability for HCI drivers to declare support for it. - Added initial support for L2CAP Enhanced Credit Based Mode - Fixed suspend handling for several use cases - Fixed Extended Advertising related issues - Added support for Realtek 8822CE device - Added DT bindings for QTI chip WCN3991 - Cleanups to replace zero-length arrays with flexible-array members - Several other smaller cleanups & fixes ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
43861da75e
@ -31,6 +31,7 @@ Optional properties for compatible string qcom,wcn399x-bt:
|
||||
|
||||
- max-speed: see Documentation/devicetree/bindings/serial/slave-device.txt
|
||||
- firmware-name: specify the name of nvm firmware to load
|
||||
- clocks: clock provided to the controller
|
||||
|
||||
Examples:
|
||||
|
||||
@ -57,5 +58,6 @@ serial@898000 {
|
||||
vddch0-supply = <&vreg_l25a_3p3>;
|
||||
max-speed = <3200000>;
|
||||
firmware-name = "crnv21.bin";
|
||||
clocks = <&rpmhcc RPMH_RF_CLK2>;
|
||||
};
|
||||
};
|
||||
|
@ -1,37 +1,40 @@
|
||||
.. SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
Netdev private dataroom for 6lowpan interfaces:
|
||||
==============================================
|
||||
Netdev private dataroom for 6lowpan interfaces
|
||||
==============================================
|
||||
|
||||
All 6lowpan able net devices, means all interfaces with ARPHRD_6LOWPAN,
|
||||
must have "struct lowpan_priv" placed at beginning of netdev_priv.
|
||||
|
||||
The priv_size of each interface should be calculate by:
|
||||
The priv_size of each interface should be calculate by::
|
||||
|
||||
dev->priv_size = LOWPAN_PRIV_SIZE(LL_6LOWPAN_PRIV_DATA);
|
||||
|
||||
Where LL_PRIV_6LOWPAN_DATA is sizeof linklayer 6lowpan private data struct.
|
||||
To access the LL_PRIV_6LOWPAN_DATA structure you can cast:
|
||||
To access the LL_PRIV_6LOWPAN_DATA structure you can cast::
|
||||
|
||||
lowpan_priv(dev)-priv;
|
||||
|
||||
to your LL_6LOWPAN_PRIV_DATA structure.
|
||||
|
||||
Before registering the lowpan netdev interface you must run:
|
||||
Before registering the lowpan netdev interface you must run::
|
||||
|
||||
lowpan_netdev_setup(dev, LOWPAN_LLTYPE_FOOBAR);
|
||||
|
||||
wheres LOWPAN_LLTYPE_FOOBAR is a define for your 6LoWPAN linklayer type of
|
||||
enum lowpan_lltypes.
|
||||
|
||||
Example to evaluate the private usually you can do:
|
||||
Example to evaluate the private usually you can do::
|
||||
|
||||
static inline struct lowpan_priv_foobar *
|
||||
lowpan_foobar_priv(struct net_device *dev)
|
||||
{
|
||||
static inline struct lowpan_priv_foobar *
|
||||
lowpan_foobar_priv(struct net_device *dev)
|
||||
{
|
||||
return (struct lowpan_priv_foobar *)lowpan_priv(dev)->priv;
|
||||
}
|
||||
}
|
||||
|
||||
switch (dev->type) {
|
||||
case ARPHRD_6LOWPAN:
|
||||
switch (dev->type) {
|
||||
case ARPHRD_6LOWPAN:
|
||||
lowpan_priv = lowpan_priv(dev);
|
||||
/* do great stuff which is ARPHRD_6LOWPAN related */
|
||||
switch (lowpan_priv->lltype) {
|
||||
@ -42,8 +45,8 @@ case ARPHRD_6LOWPAN:
|
||||
...
|
||||
}
|
||||
break;
|
||||
...
|
||||
}
|
||||
...
|
||||
}
|
||||
|
||||
In case of generic 6lowpan branch ("net/6lowpan") you can remove the check
|
||||
on ARPHRD_6LOWPAN, because you can be sure that these function are called
|
@ -34,6 +34,7 @@ Contents:
|
||||
tls
|
||||
tls-offload
|
||||
nfc
|
||||
6lowpan
|
||||
|
||||
.. only:: subproject and html
|
||||
|
||||
|
@ -176,7 +176,7 @@ L: linux-wpan@vger.kernel.org
|
||||
S: Maintained
|
||||
F: net/6lowpan/
|
||||
F: include/net/6lowpan.h
|
||||
F: Documentation/networking/6lowpan.txt
|
||||
F: Documentation/networking/6lowpan.rst
|
||||
|
||||
6PACK NETWORK DRIVER FOR AX.25
|
||||
M: Andreas Koensgen <ajk@comnets.uni-bremen.de>
|
||||
|
@ -211,12 +211,12 @@ config BT_HCIUART_RTL
|
||||
depends on BT_HCIUART
|
||||
depends on BT_HCIUART_SERDEV
|
||||
depends on GPIOLIB
|
||||
depends on ACPI
|
||||
depends on (ACPI || SERIAL_DEV_CTRL_TTYPORT)
|
||||
select BT_HCIUART_3WIRE
|
||||
select BT_RTL
|
||||
help
|
||||
The Realtek protocol support enables Bluetooth HCI over 3-Wire
|
||||
serial port internface for Realtek Bluetooth controllers.
|
||||
serial port interface for Realtek Bluetooth controllers.
|
||||
|
||||
Say Y here to compile support for Realtek protocol.
|
||||
|
||||
|
@ -133,8 +133,8 @@ static int bfusb_send_bulk(struct bfusb_data *data, struct sk_buff *skb)
|
||||
|
||||
err = usb_submit_urb(urb, GFP_ATOMIC);
|
||||
if (err) {
|
||||
BT_ERR("%s bulk tx submit failed urb %p err %d",
|
||||
data->hdev->name, urb, err);
|
||||
bt_dev_err(data->hdev, "bulk tx submit failed urb %p err %d",
|
||||
urb, err);
|
||||
skb_unlink(skb, &data->pending_q);
|
||||
usb_free_urb(urb);
|
||||
} else
|
||||
@ -232,8 +232,8 @@ static int bfusb_rx_submit(struct bfusb_data *data, struct urb *urb)
|
||||
|
||||
err = usb_submit_urb(urb, GFP_ATOMIC);
|
||||
if (err) {
|
||||
BT_ERR("%s bulk rx submit failed urb %p err %d",
|
||||
data->hdev->name, urb, err);
|
||||
bt_dev_err(data->hdev, "bulk rx submit failed urb %p err %d",
|
||||
urb, err);
|
||||
skb_unlink(skb, &data->pending_q);
|
||||
kfree_skb(skb);
|
||||
usb_free_urb(urb);
|
||||
@ -247,7 +247,7 @@ static inline int bfusb_recv_block(struct bfusb_data *data, int hdr, unsigned ch
|
||||
BT_DBG("bfusb %p hdr 0x%02x data %p len %d", data, hdr, buf, len);
|
||||
|
||||
if (hdr & 0x10) {
|
||||
BT_ERR("%s error in block", data->hdev->name);
|
||||
bt_dev_err(data->hdev, "error in block");
|
||||
kfree_skb(data->reassembly);
|
||||
data->reassembly = NULL;
|
||||
return -EIO;
|
||||
@ -259,13 +259,13 @@ static inline int bfusb_recv_block(struct bfusb_data *data, int hdr, unsigned ch
|
||||
int pkt_len = 0;
|
||||
|
||||
if (data->reassembly) {
|
||||
BT_ERR("%s unexpected start block", data->hdev->name);
|
||||
bt_dev_err(data->hdev, "unexpected start block");
|
||||
kfree_skb(data->reassembly);
|
||||
data->reassembly = NULL;
|
||||
}
|
||||
|
||||
if (len < 1) {
|
||||
BT_ERR("%s no packet type found", data->hdev->name);
|
||||
bt_dev_err(data->hdev, "no packet type found");
|
||||
return -EPROTO;
|
||||
}
|
||||
|
||||
@ -277,7 +277,7 @@ static inline int bfusb_recv_block(struct bfusb_data *data, int hdr, unsigned ch
|
||||
struct hci_event_hdr *hdr = (struct hci_event_hdr *) buf;
|
||||
pkt_len = HCI_EVENT_HDR_SIZE + hdr->plen;
|
||||
} else {
|
||||
BT_ERR("%s event block is too short", data->hdev->name);
|
||||
bt_dev_err(data->hdev, "event block is too short");
|
||||
return -EILSEQ;
|
||||
}
|
||||
break;
|
||||
@ -287,7 +287,7 @@ static inline int bfusb_recv_block(struct bfusb_data *data, int hdr, unsigned ch
|
||||
struct hci_acl_hdr *hdr = (struct hci_acl_hdr *) buf;
|
||||
pkt_len = HCI_ACL_HDR_SIZE + __le16_to_cpu(hdr->dlen);
|
||||
} else {
|
||||
BT_ERR("%s data block is too short", data->hdev->name);
|
||||
bt_dev_err(data->hdev, "data block is too short");
|
||||
return -EILSEQ;
|
||||
}
|
||||
break;
|
||||
@ -297,7 +297,7 @@ static inline int bfusb_recv_block(struct bfusb_data *data, int hdr, unsigned ch
|
||||
struct hci_sco_hdr *hdr = (struct hci_sco_hdr *) buf;
|
||||
pkt_len = HCI_SCO_HDR_SIZE + hdr->dlen;
|
||||
} else {
|
||||
BT_ERR("%s audio block is too short", data->hdev->name);
|
||||
bt_dev_err(data->hdev, "audio block is too short");
|
||||
return -EILSEQ;
|
||||
}
|
||||
break;
|
||||
@ -305,7 +305,7 @@ static inline int bfusb_recv_block(struct bfusb_data *data, int hdr, unsigned ch
|
||||
|
||||
skb = bt_skb_alloc(pkt_len, GFP_ATOMIC);
|
||||
if (!skb) {
|
||||
BT_ERR("%s no memory for the packet", data->hdev->name);
|
||||
bt_dev_err(data->hdev, "no memory for the packet");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@ -314,7 +314,7 @@ static inline int bfusb_recv_block(struct bfusb_data *data, int hdr, unsigned ch
|
||||
data->reassembly = skb;
|
||||
} else {
|
||||
if (!data->reassembly) {
|
||||
BT_ERR("%s unexpected continuation block", data->hdev->name);
|
||||
bt_dev_err(data->hdev, "unexpected continuation block");
|
||||
return -EIO;
|
||||
}
|
||||
}
|
||||
@ -366,8 +366,7 @@ static void bfusb_rx_complete(struct urb *urb)
|
||||
}
|
||||
|
||||
if (count < len) {
|
||||
BT_ERR("%s block extends over URB buffer ranges",
|
||||
data->hdev->name);
|
||||
bt_dev_err(data->hdev, "block extends over URB buffer ranges");
|
||||
}
|
||||
|
||||
if ((hdr & 0xe1) == 0xc1)
|
||||
@ -391,8 +390,8 @@ resubmit:
|
||||
|
||||
err = usb_submit_urb(urb, GFP_ATOMIC);
|
||||
if (err) {
|
||||
BT_ERR("%s bulk resubmit failed urb %p err %d",
|
||||
data->hdev->name, urb, err);
|
||||
bt_dev_err(data->hdev, "bulk resubmit failed urb %p err %d",
|
||||
urb, err);
|
||||
}
|
||||
|
||||
unlock:
|
||||
@ -477,7 +476,7 @@ static int bfusb_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
/* Max HCI frame size seems to be 1511 + 1 */
|
||||
nskb = bt_skb_alloc(count + 32, GFP_KERNEL);
|
||||
if (!nskb) {
|
||||
BT_ERR("Can't allocate memory for new packet");
|
||||
bt_dev_err(hdev, "Can't allocate memory for new packet");
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -376,13 +376,13 @@ struct ibt_cp_reg_access {
|
||||
__le32 addr;
|
||||
__u8 mode;
|
||||
__u8 len;
|
||||
__u8 data[0];
|
||||
__u8 data[];
|
||||
} __packed;
|
||||
|
||||
struct ibt_rp_reg_access {
|
||||
__u8 status;
|
||||
__le32 addr;
|
||||
__u8 data[0];
|
||||
__u8 data[];
|
||||
} __packed;
|
||||
|
||||
static int regmap_ibt_read(void *context, const void *addr, size_t reg_size,
|
||||
|
@ -139,7 +139,7 @@ int qca_send_pre_shutdown_cmd(struct hci_dev *hdev)
|
||||
EXPORT_SYMBOL_GPL(qca_send_pre_shutdown_cmd);
|
||||
|
||||
static void qca_tlv_check_data(struct qca_fw_config *config,
|
||||
const struct firmware *fw)
|
||||
const struct firmware *fw, enum qca_btsoc_type soc_type)
|
||||
{
|
||||
const u8 *data;
|
||||
u32 type_len;
|
||||
@ -148,6 +148,7 @@ static void qca_tlv_check_data(struct qca_fw_config *config,
|
||||
struct tlv_type_hdr *tlv;
|
||||
struct tlv_type_patch *tlv_patch;
|
||||
struct tlv_type_nvm *tlv_nvm;
|
||||
uint8_t nvm_baud_rate = config->user_baud_rate;
|
||||
|
||||
tlv = (struct tlv_type_hdr *)fw->data;
|
||||
|
||||
@ -216,7 +217,10 @@ static void qca_tlv_check_data(struct qca_fw_config *config,
|
||||
tlv_nvm->data[0] |= 0x80;
|
||||
|
||||
/* UART Baud Rate */
|
||||
tlv_nvm->data[2] = config->user_baud_rate;
|
||||
if (soc_type == QCA_WCN3991)
|
||||
tlv_nvm->data[1] = nvm_baud_rate;
|
||||
else
|
||||
tlv_nvm->data[2] = nvm_baud_rate;
|
||||
|
||||
break;
|
||||
|
||||
@ -354,7 +358,7 @@ static int qca_download_firmware(struct hci_dev *hdev,
|
||||
return ret;
|
||||
}
|
||||
|
||||
qca_tlv_check_data(config, fw);
|
||||
qca_tlv_check_data(config, fw, soc_type);
|
||||
|
||||
segment = fw->data;
|
||||
remain = fw->size;
|
||||
|
@ -79,7 +79,7 @@ struct qca_fw_config {
|
||||
struct edl_event_hdr {
|
||||
__u8 cresp;
|
||||
__u8 rtype;
|
||||
__u8 data[0];
|
||||
__u8 data[];
|
||||
} __packed;
|
||||
|
||||
struct qca_btsoc_version {
|
||||
@ -112,12 +112,12 @@ struct tlv_type_nvm {
|
||||
__le16 tag_len;
|
||||
__le32 reserve1;
|
||||
__le32 reserve2;
|
||||
__u8 data[0];
|
||||
__u8 data[];
|
||||
} __packed;
|
||||
|
||||
struct tlv_type_hdr {
|
||||
__le32 type_len;
|
||||
__u8 data[0];
|
||||
__u8 data[];
|
||||
} __packed;
|
||||
|
||||
enum qca_btsoc_type {
|
||||
|
@ -136,6 +136,18 @@ static const struct id_table ic_id_table[] = {
|
||||
.fw_name = "rtl_bt/rtl8761a_fw.bin",
|
||||
.cfg_name = "rtl_bt/rtl8761a_config" },
|
||||
|
||||
/* 8822C with UART interface */
|
||||
{ .match_flags = IC_MATCH_FL_LMPSUBV | IC_MATCH_FL_HCIREV |
|
||||
IC_MATCH_FL_HCIBUS,
|
||||
.lmp_subver = RTL_ROM_LMP_8822B,
|
||||
.hci_rev = 0x000c,
|
||||
.hci_ver = 0x0a,
|
||||
.hci_bus = HCI_UART,
|
||||
.config_needed = true,
|
||||
.has_rom_version = true,
|
||||
.fw_name = "rtl_bt/rtl8822cs_fw.bin",
|
||||
.cfg_name = "rtl_bt/rtl8822cs_config" },
|
||||
|
||||
/* 8822C with USB interface */
|
||||
{ IC_INFO(RTL_ROM_LMP_8822B, 0xc),
|
||||
.config_needed = false,
|
||||
|
@ -38,13 +38,13 @@ struct rtl_epatch_header {
|
||||
struct rtl_vendor_config_entry {
|
||||
__le16 offset;
|
||||
__u8 len;
|
||||
__u8 data[0];
|
||||
__u8 data[];
|
||||
} __packed;
|
||||
|
||||
struct rtl_vendor_config {
|
||||
__le32 signature;
|
||||
__le16 total_len;
|
||||
struct rtl_vendor_config_entry entry[0];
|
||||
struct rtl_vendor_config_entry entry[];
|
||||
} __packed;
|
||||
|
||||
#if IS_ENABLED(CONFIG_BT_RTL)
|
||||
|
@ -57,6 +57,7 @@ static struct usb_driver btusb_driver;
|
||||
#define BTUSB_IFNUM_2 0x80000
|
||||
#define BTUSB_CW6622 0x100000
|
||||
#define BTUSB_MEDIATEK 0x200000
|
||||
#define BTUSB_WIDEBAND_SPEECH 0x400000
|
||||
|
||||
static const struct usb_device_id btusb_table[] = {
|
||||
/* Generic Bluetooth USB device */
|
||||
@ -333,15 +334,21 @@ static const struct usb_device_id blacklist_table[] = {
|
||||
{ USB_DEVICE(0x1286, 0x204e), .driver_info = BTUSB_MARVELL },
|
||||
|
||||
/* Intel Bluetooth devices */
|
||||
{ USB_DEVICE(0x8087, 0x0025), .driver_info = BTUSB_INTEL_NEW },
|
||||
{ USB_DEVICE(0x8087, 0x0026), .driver_info = BTUSB_INTEL_NEW },
|
||||
{ USB_DEVICE(0x8087, 0x0029), .driver_info = BTUSB_INTEL_NEW },
|
||||
{ USB_DEVICE(0x8087, 0x0025), .driver_info = BTUSB_INTEL_NEW |
|
||||
BTUSB_WIDEBAND_SPEECH },
|
||||
{ USB_DEVICE(0x8087, 0x0026), .driver_info = BTUSB_INTEL_NEW |
|
||||
BTUSB_WIDEBAND_SPEECH },
|
||||
{ USB_DEVICE(0x8087, 0x0029), .driver_info = BTUSB_INTEL_NEW |
|
||||
BTUSB_WIDEBAND_SPEECH },
|
||||
{ USB_DEVICE(0x8087, 0x07da), .driver_info = BTUSB_CSR },
|
||||
{ USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL },
|
||||
{ USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL },
|
||||
{ USB_DEVICE(0x8087, 0x0a2b), .driver_info = BTUSB_INTEL_NEW },
|
||||
{ USB_DEVICE(0x8087, 0x0aa7), .driver_info = BTUSB_INTEL },
|
||||
{ USB_DEVICE(0x8087, 0x0aaa), .driver_info = BTUSB_INTEL_NEW },
|
||||
{ USB_DEVICE(0x8087, 0x0a2b), .driver_info = BTUSB_INTEL_NEW |
|
||||
BTUSB_WIDEBAND_SPEECH },
|
||||
{ USB_DEVICE(0x8087, 0x0aa7), .driver_info = BTUSB_INTEL |
|
||||
BTUSB_WIDEBAND_SPEECH },
|
||||
{ USB_DEVICE(0x8087, 0x0aaa), .driver_info = BTUSB_INTEL_NEW |
|
||||
BTUSB_WIDEBAND_SPEECH },
|
||||
|
||||
/* Other Intel Bluetooth devices */
|
||||
{ USB_VENDOR_AND_INTERFACE_INFO(0x8087, 0xe0, 0x01, 0x01),
|
||||
@ -387,6 +394,7 @@ static const struct usb_device_id blacklist_table[] = {
|
||||
|
||||
/* Additional Realtek 8822CE Bluetooth devices */
|
||||
{ USB_DEVICE(0x04ca, 0x4005), .driver_info = BTUSB_REALTEK },
|
||||
{ USB_DEVICE(0x13d3, 0x3548), .driver_info = BTUSB_REALTEK },
|
||||
|
||||
/* Silicon Wave based devices */
|
||||
{ USB_DEVICE(0x0c10, 0x0000), .driver_info = BTUSB_SWAVE },
|
||||
@ -1930,7 +1938,14 @@ static int btusb_setup_intel(struct hci_dev *hdev)
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
bt_dev_info(hdev, "Intel firmware patch completed and activated");
|
||||
/* Need build number for downloaded fw patches in
|
||||
* every power-on boot
|
||||
*/
|
||||
err = btintel_read_version(hdev, &ver);
|
||||
if (err)
|
||||
return err;
|
||||
bt_dev_info(hdev, "Intel BT fw patch 0x%02x completed & activated",
|
||||
ver.fw_patch_num);
|
||||
|
||||
goto complete;
|
||||
|
||||
@ -3859,6 +3874,9 @@ static int btusb_probe(struct usb_interface *intf,
|
||||
if (id->driver_info & BTUSB_BROKEN_ISOC)
|
||||
data->isoc = NULL;
|
||||
|
||||
if (id->driver_info & BTUSB_WIDEBAND_SPEECH)
|
||||
set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks);
|
||||
|
||||
if (id->driver_info & BTUSB_DIGIANSWER) {
|
||||
data->cmdreq_type = USB_TYPE_VENDOR;
|
||||
set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks);
|
||||
|
@ -27,7 +27,7 @@ struct ag6xx_data {
|
||||
struct pbn_entry {
|
||||
__le32 addr;
|
||||
__le32 plen;
|
||||
__u8 data[0];
|
||||
__u8 data[];
|
||||
} __packed;
|
||||
|
||||
static int ag6xx_open(struct hci_uart *hu)
|
||||
|
@ -71,8 +71,6 @@ static int h4_close(struct hci_uart *hu)
|
||||
{
|
||||
struct h4_struct *h4 = hu->priv;
|
||||
|
||||
hu->priv = NULL;
|
||||
|
||||
BT_DBG("hu %p", hu);
|
||||
|
||||
skb_queue_purge(&h4->txq);
|
||||
@ -85,7 +83,7 @@ static int h4_close(struct hci_uart *hu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Enqueue frame for transmittion (padding, crc, etc) */
|
||||
/* Enqueue frame for transmission (padding, crc, etc) */
|
||||
static int h4_enqueue(struct hci_uart *hu, struct sk_buff *skb)
|
||||
{
|
||||
struct h4_struct *h4 = hu->priv;
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include <linux/gpio/consumer.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/mod_devicetable.h>
|
||||
#include <linux/of_device.h>
|
||||
#include <linux/serdev.h>
|
||||
#include <linux/skbuff.h>
|
||||
|
||||
@ -177,7 +178,7 @@ static void h5_peer_reset(struct hci_uart *hu)
|
||||
{
|
||||
struct h5 *h5 = hu->priv;
|
||||
|
||||
BT_ERR("Peer device has reset");
|
||||
bt_dev_err(hu->hdev, "Peer device has reset");
|
||||
|
||||
h5->state = H5_UNINITIALIZED;
|
||||
|
||||
@ -437,21 +438,21 @@ static int h5_rx_3wire_hdr(struct hci_uart *hu, unsigned char c)
|
||||
H5_HDR_LEN(hdr));
|
||||
|
||||
if (((hdr[0] + hdr[1] + hdr[2] + hdr[3]) & 0xff) != 0xff) {
|
||||
BT_ERR("Invalid header checksum");
|
||||
bt_dev_err(hu->hdev, "Invalid header checksum");
|
||||
h5_reset_rx(h5);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (H5_HDR_RELIABLE(hdr) && H5_HDR_SEQ(hdr) != h5->tx_ack) {
|
||||
BT_ERR("Out-of-order packet arrived (%u != %u)",
|
||||
H5_HDR_SEQ(hdr), h5->tx_ack);
|
||||
bt_dev_err(hu->hdev, "Out-of-order packet arrived (%u != %u)",
|
||||
H5_HDR_SEQ(hdr), h5->tx_ack);
|
||||
h5_reset_rx(h5);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (h5->state != H5_ACTIVE &&
|
||||
H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT) {
|
||||
BT_ERR("Non-link packet received in non-active state");
|
||||
bt_dev_err(hu->hdev, "Non-link packet received in non-active state");
|
||||
h5_reset_rx(h5);
|
||||
return 0;
|
||||
}
|
||||
@ -474,7 +475,7 @@ static int h5_rx_pkt_start(struct hci_uart *hu, unsigned char c)
|
||||
|
||||
h5->rx_skb = bt_skb_alloc(H5_MAX_LEN, GFP_ATOMIC);
|
||||
if (!h5->rx_skb) {
|
||||
BT_ERR("Can't allocate mem for new packet");
|
||||
bt_dev_err(hu->hdev, "Can't allocate mem for new packet");
|
||||
h5_reset_rx(h5);
|
||||
return -ENOMEM;
|
||||
}
|
||||
@ -550,7 +551,7 @@ static int h5_recv(struct hci_uart *hu, const void *data, int count)
|
||||
|
||||
if (h5->rx_pending > 0) {
|
||||
if (*ptr == SLIP_DELIMITER) {
|
||||
BT_ERR("Too short H5 packet");
|
||||
bt_dev_err(hu->hdev, "Too short H5 packet");
|
||||
h5_reset_rx(h5);
|
||||
continue;
|
||||
}
|
||||
@ -577,13 +578,13 @@ static int h5_enqueue(struct hci_uart *hu, struct sk_buff *skb)
|
||||
struct h5 *h5 = hu->priv;
|
||||
|
||||
if (skb->len > 0xfff) {
|
||||
BT_ERR("Packet too long (%u bytes)", skb->len);
|
||||
bt_dev_err(hu->hdev, "Packet too long (%u bytes)", skb->len);
|
||||
kfree_skb(skb);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (h5->state != H5_ACTIVE) {
|
||||
BT_ERR("Ignoring HCI data in non-active state");
|
||||
bt_dev_err(hu->hdev, "Ignoring HCI data in non-active state");
|
||||
kfree_skb(skb);
|
||||
return 0;
|
||||
}
|
||||
@ -600,7 +601,7 @@ static int h5_enqueue(struct hci_uart *hu, struct sk_buff *skb)
|
||||
break;
|
||||
|
||||
default:
|
||||
BT_ERR("Unknown packet type %u", hci_skb_pkt_type(skb));
|
||||
bt_dev_err(hu->hdev, "Unknown packet type %u", hci_skb_pkt_type(skb));
|
||||
kfree_skb(skb);
|
||||
break;
|
||||
}
|
||||
@ -656,7 +657,7 @@ static struct sk_buff *h5_prepare_pkt(struct hci_uart *hu, u8 pkt_type,
|
||||
int i;
|
||||
|
||||
if (!valid_packet_type(pkt_type)) {
|
||||
BT_ERR("Unknown packet type %u", pkt_type);
|
||||
bt_dev_err(hu->hdev, "Unknown packet type %u", pkt_type);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -733,7 +734,7 @@ static struct sk_buff *h5_dequeue(struct hci_uart *hu)
|
||||
}
|
||||
|
||||
skb_queue_head(&h5->unrel, skb);
|
||||
BT_ERR("Could not dequeue pkt because alloc_skb failed");
|
||||
bt_dev_err(hu->hdev, "Could not dequeue pkt because alloc_skb failed");
|
||||
}
|
||||
|
||||
spin_lock_irqsave_nested(&h5->unack.lock, flags, SINGLE_DEPTH_NESTING);
|
||||
@ -753,7 +754,7 @@ static struct sk_buff *h5_dequeue(struct hci_uart *hu)
|
||||
}
|
||||
|
||||
skb_queue_head(&h5->rel, skb);
|
||||
BT_ERR("Could not dequeue pkt because alloc_skb failed");
|
||||
bt_dev_err(hu->hdev, "Could not dequeue pkt because alloc_skb failed");
|
||||
}
|
||||
|
||||
unlock:
|
||||
@ -785,7 +786,6 @@ static const struct hci_uart_proto h5p = {
|
||||
|
||||
static int h5_serdev_probe(struct serdev_device *serdev)
|
||||
{
|
||||
const struct acpi_device_id *match;
|
||||
struct device *dev = &serdev->dev;
|
||||
struct h5 *h5;
|
||||
|
||||
@ -800,6 +800,8 @@ static int h5_serdev_probe(struct serdev_device *serdev)
|
||||
serdev_device_set_drvdata(serdev, h5);
|
||||
|
||||
if (has_acpi_companion(dev)) {
|
||||
const struct acpi_device_id *match;
|
||||
|
||||
match = acpi_match_device(dev->driver->acpi_match_table, dev);
|
||||
if (!match)
|
||||
return -ENODEV;
|
||||
@ -810,8 +812,17 @@ static int h5_serdev_probe(struct serdev_device *serdev)
|
||||
if (h5->vnd->acpi_gpio_map)
|
||||
devm_acpi_dev_add_driver_gpios(dev,
|
||||
h5->vnd->acpi_gpio_map);
|
||||
} else {
|
||||
const void *data;
|
||||
|
||||
data = of_device_get_match_data(dev);
|
||||
if (!data)
|
||||
return -ENODEV;
|
||||
|
||||
h5->vnd = (const struct h5_vnd *)data;
|
||||
}
|
||||
|
||||
|
||||
h5->enable_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_LOW);
|
||||
if (IS_ERR(h5->enable_gpio))
|
||||
return PTR_ERR(h5->enable_gpio);
|
||||
@ -1003,6 +1014,15 @@ static const struct dev_pm_ops h5_serdev_pm_ops = {
|
||||
SET_SYSTEM_SLEEP_PM_OPS(h5_serdev_suspend, h5_serdev_resume)
|
||||
};
|
||||
|
||||
static const struct of_device_id rtl_bluetooth_of_match[] = {
|
||||
#ifdef CONFIG_BT_HCIUART_RTL
|
||||
{ .compatible = "realtek,rtl8822cs-bt",
|
||||
.data = (const void *)&rtl_vnd },
|
||||
#endif
|
||||
{ },
|
||||
};
|
||||
MODULE_DEVICE_TABLE(of, rtl_bluetooth_of_match);
|
||||
|
||||
static struct serdev_device_driver h5_serdev_driver = {
|
||||
.probe = h5_serdev_probe,
|
||||
.remove = h5_serdev_remove,
|
||||
@ -1010,6 +1030,7 @@ static struct serdev_device_driver h5_serdev_driver = {
|
||||
.name = "hci_uart_h5",
|
||||
.acpi_match_table = ACPI_PTR(h5_acpi_match),
|
||||
.pm = &h5_serdev_pm_ops,
|
||||
.of_match_table = rtl_bluetooth_of_match,
|
||||
},
|
||||
};
|
||||
|
||||
|
@ -49,7 +49,7 @@
|
||||
struct hci_lpm_pkt {
|
||||
__u8 opcode;
|
||||
__u8 dlen;
|
||||
__u8 data[0];
|
||||
__u8 data[];
|
||||
} __packed;
|
||||
|
||||
struct intel_device {
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include <linux/platform_device.h>
|
||||
#include <linux/regulator/consumer.h>
|
||||
#include <linux/serdev.h>
|
||||
#include <linux/mutex.h>
|
||||
#include <asm/unaligned.h>
|
||||
|
||||
#include <net/bluetooth/bluetooth.h>
|
||||
@ -69,7 +70,8 @@ enum qca_flags {
|
||||
QCA_IBS_ENABLED,
|
||||
QCA_DROP_VENDOR_EVENT,
|
||||
QCA_SUSPENDING,
|
||||
QCA_MEMDUMP_COLLECTION
|
||||
QCA_MEMDUMP_COLLECTION,
|
||||
QCA_HW_ERROR_EVENT
|
||||
};
|
||||
|
||||
|
||||
@ -138,18 +140,19 @@ struct qca_data {
|
||||
u32 tx_idle_delay;
|
||||
struct timer_list wake_retrans_timer;
|
||||
u32 wake_retrans;
|
||||
struct timer_list memdump_timer;
|
||||
struct workqueue_struct *workqueue;
|
||||
struct work_struct ws_awake_rx;
|
||||
struct work_struct ws_awake_device;
|
||||
struct work_struct ws_rx_vote_off;
|
||||
struct work_struct ws_tx_vote_off;
|
||||
struct work_struct ctrl_memdump_evt;
|
||||
struct delayed_work ctrl_memdump_timeout;
|
||||
struct qca_memdump_data *qca_memdump;
|
||||
unsigned long flags;
|
||||
struct completion drop_ev_comp;
|
||||
wait_queue_head_t suspend_wait_q;
|
||||
enum qca_memdump_states memdump_state;
|
||||
struct mutex hci_memdump_lock;
|
||||
|
||||
/* For debugging purpose */
|
||||
u64 ibs_sent_wacks;
|
||||
@ -522,23 +525,28 @@ static void hci_ibs_wake_retrans_timeout(struct timer_list *t)
|
||||
hci_uart_tx_wakeup(hu);
|
||||
}
|
||||
|
||||
static void hci_memdump_timeout(struct timer_list *t)
|
||||
{
|
||||
struct qca_data *qca = from_timer(qca, t, tx_idle_timer);
|
||||
struct hci_uart *hu = qca->hu;
|
||||
struct qca_memdump_data *qca_memdump = qca->qca_memdump;
|
||||
char *memdump_buf = qca_memdump->memdump_buf_tail;
|
||||
|
||||
bt_dev_err(hu->hdev, "clearing allocated memory due to memdump timeout");
|
||||
/* Inject hw error event to reset the device and driver. */
|
||||
hci_reset_dev(hu->hdev);
|
||||
vfree(memdump_buf);
|
||||
kfree(qca_memdump);
|
||||
qca->memdump_state = QCA_MEMDUMP_TIMEOUT;
|
||||
del_timer(&qca->memdump_timer);
|
||||
cancel_work_sync(&qca->ctrl_memdump_evt);
|
||||
static void qca_controller_memdump_timeout(struct work_struct *work)
|
||||
{
|
||||
struct qca_data *qca = container_of(work, struct qca_data,
|
||||
ctrl_memdump_timeout.work);
|
||||
struct hci_uart *hu = qca->hu;
|
||||
|
||||
mutex_lock(&qca->hci_memdump_lock);
|
||||
if (test_bit(QCA_MEMDUMP_COLLECTION, &qca->flags)) {
|
||||
qca->memdump_state = QCA_MEMDUMP_TIMEOUT;
|
||||
if (!test_bit(QCA_HW_ERROR_EVENT, &qca->flags)) {
|
||||
/* Inject hw error event to reset the device
|
||||
* and driver.
|
||||
*/
|
||||
hci_reset_dev(hu->hdev);
|
||||
}
|
||||
}
|
||||
|
||||
mutex_unlock(&qca->hci_memdump_lock);
|
||||
}
|
||||
|
||||
|
||||
/* Initialize protocol */
|
||||
static int qca_open(struct hci_uart *hu)
|
||||
{
|
||||
@ -558,6 +566,7 @@ static int qca_open(struct hci_uart *hu)
|
||||
skb_queue_head_init(&qca->tx_wait_q);
|
||||
skb_queue_head_init(&qca->rx_memdump_q);
|
||||
spin_lock_init(&qca->hci_ibs_lock);
|
||||
mutex_init(&qca->hci_memdump_lock);
|
||||
qca->workqueue = alloc_ordered_workqueue("qca_wq", 0);
|
||||
if (!qca->workqueue) {
|
||||
BT_ERR("QCA Workqueue not initialized properly");
|
||||
@ -570,6 +579,8 @@ static int qca_open(struct hci_uart *hu)
|
||||
INIT_WORK(&qca->ws_rx_vote_off, qca_wq_serial_rx_clock_vote_off);
|
||||
INIT_WORK(&qca->ws_tx_vote_off, qca_wq_serial_tx_clock_vote_off);
|
||||
INIT_WORK(&qca->ctrl_memdump_evt, qca_controller_memdump);
|
||||
INIT_DELAYED_WORK(&qca->ctrl_memdump_timeout,
|
||||
qca_controller_memdump_timeout);
|
||||
init_waitqueue_head(&qca->suspend_wait_q);
|
||||
|
||||
qca->hu = hu;
|
||||
@ -596,7 +607,6 @@ static int qca_open(struct hci_uart *hu)
|
||||
|
||||
timer_setup(&qca->tx_idle_timer, hci_ibs_tx_idle_timeout, 0);
|
||||
qca->tx_idle_delay = IBS_HOST_TX_IDLE_TIMEOUT_MS;
|
||||
timer_setup(&qca->memdump_timer, hci_memdump_timeout, 0);
|
||||
|
||||
BT_DBG("HCI_UART_QCA open, tx_idle_delay=%u, wake_retrans=%u",
|
||||
qca->tx_idle_delay, qca->wake_retrans);
|
||||
@ -677,7 +687,6 @@ static int qca_close(struct hci_uart *hu)
|
||||
skb_queue_purge(&qca->rx_memdump_q);
|
||||
del_timer(&qca->tx_idle_timer);
|
||||
del_timer(&qca->wake_retrans_timer);
|
||||
del_timer(&qca->memdump_timer);
|
||||
destroy_workqueue(qca->workqueue);
|
||||
qca->hu = NULL;
|
||||
|
||||
@ -963,11 +972,20 @@ static void qca_controller_memdump(struct work_struct *work)
|
||||
|
||||
while ((skb = skb_dequeue(&qca->rx_memdump_q))) {
|
||||
|
||||
mutex_lock(&qca->hci_memdump_lock);
|
||||
/* Skip processing the received packets if timeout detected. */
|
||||
if (qca->memdump_state == QCA_MEMDUMP_TIMEOUT) {
|
||||
mutex_unlock(&qca->hci_memdump_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!qca_memdump) {
|
||||
qca_memdump = kzalloc(sizeof(struct qca_memdump_data),
|
||||
GFP_ATOMIC);
|
||||
if (!qca_memdump)
|
||||
if (!qca_memdump) {
|
||||
mutex_unlock(&qca->hci_memdump_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
qca->qca_memdump = qca_memdump;
|
||||
}
|
||||
@ -992,13 +1010,15 @@ static void qca_controller_memdump(struct work_struct *work)
|
||||
if (!(dump_size)) {
|
||||
bt_dev_err(hu->hdev, "Rx invalid memdump size");
|
||||
kfree_skb(skb);
|
||||
mutex_unlock(&qca->hci_memdump_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
bt_dev_info(hu->hdev, "QCA collecting dump of size:%u",
|
||||
dump_size);
|
||||
mod_timer(&qca->memdump_timer, (jiffies +
|
||||
msecs_to_jiffies(MEMDUMP_TIMEOUT_MS)));
|
||||
queue_delayed_work(qca->workqueue,
|
||||
&qca->ctrl_memdump_timeout,
|
||||
msecs_to_jiffies(MEMDUMP_TIMEOUT_MS));
|
||||
|
||||
skb_pull(skb, sizeof(dump_size));
|
||||
memdump_buf = vmalloc(dump_size);
|
||||
@ -1016,6 +1036,7 @@ static void qca_controller_memdump(struct work_struct *work)
|
||||
kfree(qca_memdump);
|
||||
kfree_skb(skb);
|
||||
qca->qca_memdump = NULL;
|
||||
mutex_unlock(&qca->hci_memdump_lock);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1046,16 +1067,20 @@ static void qca_controller_memdump(struct work_struct *work)
|
||||
memdump_buf = qca_memdump->memdump_buf_head;
|
||||
dev_coredumpv(&hu->serdev->dev, memdump_buf,
|
||||
qca_memdump->received_dump, GFP_KERNEL);
|
||||
del_timer(&qca->memdump_timer);
|
||||
cancel_delayed_work(&qca->ctrl_memdump_timeout);
|
||||
kfree(qca->qca_memdump);
|
||||
qca->qca_memdump = NULL;
|
||||
qca->memdump_state = QCA_MEMDUMP_COLLECTED;
|
||||
clear_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
|
||||
}
|
||||
|
||||
mutex_unlock(&qca->hci_memdump_lock);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
int qca_controller_memdump_event(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
static int qca_controller_memdump_event(struct hci_dev *hdev,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct hci_uart *hu = hci_get_drvdata(hdev);
|
||||
struct qca_data *qca = hu->priv;
|
||||
@ -1406,30 +1431,21 @@ static void qca_wait_for_dump_collection(struct hci_dev *hdev)
|
||||
{
|
||||
struct hci_uart *hu = hci_get_drvdata(hdev);
|
||||
struct qca_data *qca = hu->priv;
|
||||
struct qca_memdump_data *qca_memdump = qca->qca_memdump;
|
||||
char *memdump_buf = NULL;
|
||||
|
||||
wait_on_bit_timeout(&qca->flags, QCA_MEMDUMP_COLLECTION,
|
||||
TASK_UNINTERRUPTIBLE, MEMDUMP_TIMEOUT_MS);
|
||||
|
||||
clear_bit(QCA_MEMDUMP_COLLECTION, &qca->flags);
|
||||
if (qca->memdump_state == QCA_MEMDUMP_IDLE) {
|
||||
bt_dev_err(hu->hdev, "Clearing the buffers due to timeout");
|
||||
if (qca_memdump)
|
||||
memdump_buf = qca_memdump->memdump_buf_tail;
|
||||
vfree(memdump_buf);
|
||||
kfree(qca_memdump);
|
||||
qca->memdump_state = QCA_MEMDUMP_TIMEOUT;
|
||||
del_timer(&qca->memdump_timer);
|
||||
cancel_work_sync(&qca->ctrl_memdump_evt);
|
||||
}
|
||||
}
|
||||
|
||||
static void qca_hw_error(struct hci_dev *hdev, u8 code)
|
||||
{
|
||||
struct hci_uart *hu = hci_get_drvdata(hdev);
|
||||
struct qca_data *qca = hu->priv;
|
||||
struct qca_memdump_data *qca_memdump = qca->qca_memdump;
|
||||
char *memdump_buf = NULL;
|
||||
|
||||
set_bit(QCA_HW_ERROR_EVENT, &qca->flags);
|
||||
bt_dev_info(hdev, "mem_dump_status: %d", qca->memdump_state);
|
||||
|
||||
if (qca->memdump_state == QCA_MEMDUMP_IDLE) {
|
||||
@ -1449,6 +1465,23 @@ static void qca_hw_error(struct hci_dev *hdev, u8 code)
|
||||
bt_dev_info(hdev, "waiting for dump to complete");
|
||||
qca_wait_for_dump_collection(hdev);
|
||||
}
|
||||
|
||||
if (qca->memdump_state != QCA_MEMDUMP_COLLECTED) {
|
||||
bt_dev_err(hu->hdev, "clearing allocated memory due to memdump timeout");
|
||||
mutex_lock(&qca->hci_memdump_lock);
|
||||
if (qca_memdump)
|
||||
memdump_buf = qca_memdump->memdump_buf_head;
|
||||
vfree(memdump_buf);
|
||||
kfree(qca_memdump);
|
||||
qca->qca_memdump = NULL;
|
||||
qca->memdump_state = QCA_MEMDUMP_TIMEOUT;
|
||||
cancel_delayed_work(&qca->ctrl_memdump_timeout);
|
||||
skb_queue_purge(&qca->rx_memdump_q);
|
||||
mutex_unlock(&qca->hci_memdump_lock);
|
||||
cancel_work_sync(&qca->ctrl_memdump_evt);
|
||||
}
|
||||
|
||||
clear_bit(QCA_HW_ERROR_EVENT, &qca->flags);
|
||||
}
|
||||
|
||||
static void qca_cmd_timeout(struct hci_dev *hdev)
|
||||
@ -1529,9 +1562,11 @@ static int qca_power_on(struct hci_dev *hdev)
|
||||
ret = qca_wcn3990_init(hu);
|
||||
} else {
|
||||
qcadev = serdev_device_get_drvdata(hu->serdev);
|
||||
gpiod_set_value_cansleep(qcadev->bt_en, 1);
|
||||
/* Controller needs time to bootup. */
|
||||
msleep(150);
|
||||
if (qcadev->bt_en) {
|
||||
gpiod_set_value_cansleep(qcadev->bt_en, 1);
|
||||
/* Controller needs time to bootup. */
|
||||
msleep(150);
|
||||
}
|
||||
}
|
||||
|
||||
return ret;
|
||||
@ -1717,7 +1752,7 @@ static void qca_power_shutdown(struct hci_uart *hu)
|
||||
host_set_baudrate(hu, 2400);
|
||||
qca_send_power_pulse(hu, false);
|
||||
qca_regulator_disable(qcadev);
|
||||
} else {
|
||||
} else if (qcadev->bt_en) {
|
||||
gpiod_set_value_cansleep(qcadev->bt_en, 0);
|
||||
}
|
||||
}
|
||||
@ -1726,9 +1761,11 @@ static int qca_power_off(struct hci_dev *hdev)
|
||||
{
|
||||
struct hci_uart *hu = hci_get_drvdata(hdev);
|
||||
struct qca_data *qca = hu->priv;
|
||||
enum qca_btsoc_type soc_type = qca_soc_type(hu);
|
||||
|
||||
/* Stop sending shutdown command if soc crashes. */
|
||||
if (qca->memdump_state == QCA_MEMDUMP_IDLE) {
|
||||
if (qca_is_wcn399x(soc_type)
|
||||
&& qca->memdump_state == QCA_MEMDUMP_IDLE) {
|
||||
qca_send_pre_shutdown_cmd(hdev);
|
||||
usleep_range(8000, 10000);
|
||||
}
|
||||
@ -1755,7 +1792,11 @@ static int qca_regulator_enable(struct qca_serdev *qcadev)
|
||||
|
||||
power->vregs_on = true;
|
||||
|
||||
return 0;
|
||||
ret = clk_prepare_enable(qcadev->susclk);
|
||||
if (ret)
|
||||
qca_regulator_disable(qcadev);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void qca_regulator_disable(struct qca_serdev *qcadev)
|
||||
@ -1773,6 +1814,8 @@ static void qca_regulator_disable(struct qca_serdev *qcadev)
|
||||
|
||||
regulator_bulk_disable(power->num_vregs, power->vreg_bulk);
|
||||
power->vregs_on = false;
|
||||
|
||||
clk_disable_unprepare(qcadev->susclk);
|
||||
}
|
||||
|
||||
static int qca_init_regulators(struct qca_power *qca,
|
||||
@ -1811,6 +1854,7 @@ static int qca_serdev_probe(struct serdev_device *serdev)
|
||||
struct hci_dev *hdev;
|
||||
const struct qca_vreg_data *data;
|
||||
int err;
|
||||
bool power_ctrl_enabled = true;
|
||||
|
||||
qcadev = devm_kzalloc(&serdev->dev, sizeof(*qcadev), GFP_KERNEL);
|
||||
if (!qcadev)
|
||||
@ -1839,6 +1883,12 @@ static int qca_serdev_probe(struct serdev_device *serdev)
|
||||
|
||||
qcadev->bt_power->vregs_on = false;
|
||||
|
||||
qcadev->susclk = devm_clk_get_optional(&serdev->dev, NULL);
|
||||
if (IS_ERR(qcadev->susclk)) {
|
||||
dev_err(&serdev->dev, "failed to acquire clk\n");
|
||||
return PTR_ERR(qcadev->susclk);
|
||||
}
|
||||
|
||||
device_property_read_u32(&serdev->dev, "max-speed",
|
||||
&qcadev->oper_speed);
|
||||
if (!qcadev->oper_speed)
|
||||
@ -1851,38 +1901,40 @@ static int qca_serdev_probe(struct serdev_device *serdev)
|
||||
}
|
||||
} else {
|
||||
qcadev->btsoc_type = QCA_ROME;
|
||||
qcadev->bt_en = devm_gpiod_get(&serdev->dev, "enable",
|
||||
qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable",
|
||||
GPIOD_OUT_LOW);
|
||||
if (IS_ERR(qcadev->bt_en)) {
|
||||
dev_err(&serdev->dev, "failed to acquire enable gpio\n");
|
||||
return PTR_ERR(qcadev->bt_en);
|
||||
if (!qcadev->bt_en) {
|
||||
dev_warn(&serdev->dev, "failed to acquire enable gpio\n");
|
||||
power_ctrl_enabled = false;
|
||||
}
|
||||
|
||||
qcadev->susclk = devm_clk_get(&serdev->dev, NULL);
|
||||
if (IS_ERR(qcadev->susclk)) {
|
||||
dev_err(&serdev->dev, "failed to acquire clk\n");
|
||||
return PTR_ERR(qcadev->susclk);
|
||||
qcadev->susclk = devm_clk_get_optional(&serdev->dev, NULL);
|
||||
if (!qcadev->susclk) {
|
||||
dev_warn(&serdev->dev, "failed to acquire clk\n");
|
||||
} else {
|
||||
err = clk_set_rate(qcadev->susclk, SUSCLK_RATE_32KHZ);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = clk_prepare_enable(qcadev->susclk);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
err = clk_set_rate(qcadev->susclk, SUSCLK_RATE_32KHZ);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = clk_prepare_enable(qcadev->susclk);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto);
|
||||
if (err) {
|
||||
BT_ERR("Rome serdev registration failed");
|
||||
clk_disable_unprepare(qcadev->susclk);
|
||||
if (qcadev->susclk)
|
||||
clk_disable_unprepare(qcadev->susclk);
|
||||
return err;
|
||||
}
|
||||
}
|
||||
|
||||
hdev = qcadev->serdev_hu.hdev;
|
||||
set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
|
||||
hdev->shutdown = qca_power_off;
|
||||
if (power_ctrl_enabled) {
|
||||
hdev = qcadev->serdev_hu.hdev;
|
||||
set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks);
|
||||
hdev->shutdown = qca_power_off;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -1893,7 +1945,7 @@ static void qca_serdev_remove(struct serdev_device *serdev)
|
||||
|
||||
if (qca_is_wcn399x(qcadev->btsoc_type))
|
||||
qca_power_shutdown(&qcadev->serdev_hu);
|
||||
else
|
||||
else if (qcadev->susclk)
|
||||
clk_disable_unprepare(qcadev->susclk);
|
||||
|
||||
hci_uart_unregister_device(&qcadev->serdev_hu);
|
||||
|
@ -138,7 +138,7 @@ struct lowpan_dev {
|
||||
struct lowpan_iphc_ctx_table ctx;
|
||||
|
||||
/* must be last */
|
||||
u8 priv[0] __aligned(sizeof(void *));
|
||||
u8 priv[] __aligned(sizeof(void *));
|
||||
};
|
||||
|
||||
struct lowpan_802154_neigh {
|
||||
|
@ -121,6 +121,23 @@ struct bt_voice {
|
||||
|
||||
#define BT_SNDMTU 12
|
||||
#define BT_RCVMTU 13
|
||||
#define BT_PHY 14
|
||||
|
||||
#define BT_PHY_BR_1M_1SLOT 0x00000001
|
||||
#define BT_PHY_BR_1M_3SLOT 0x00000002
|
||||
#define BT_PHY_BR_1M_5SLOT 0x00000004
|
||||
#define BT_PHY_EDR_2M_1SLOT 0x00000008
|
||||
#define BT_PHY_EDR_2M_3SLOT 0x00000010
|
||||
#define BT_PHY_EDR_2M_5SLOT 0x00000020
|
||||
#define BT_PHY_EDR_3M_1SLOT 0x00000040
|
||||
#define BT_PHY_EDR_3M_3SLOT 0x00000080
|
||||
#define BT_PHY_EDR_3M_5SLOT 0x00000100
|
||||
#define BT_PHY_LE_1M_TX 0x00000200
|
||||
#define BT_PHY_LE_1M_RX 0x00000400
|
||||
#define BT_PHY_LE_2M_TX 0x00000800
|
||||
#define BT_PHY_LE_2M_RX 0x00001000
|
||||
#define BT_PHY_LE_CODED_TX 0x00002000
|
||||
#define BT_PHY_LE_CODED_RX 0x00004000
|
||||
|
||||
__printf(1, 2)
|
||||
void bt_info(const char *fmt, ...);
|
||||
|
@ -115,7 +115,7 @@ enum {
|
||||
* wrongly configured local features that will require forcing
|
||||
* them to enable this mode. Getting RSSI information with the
|
||||
* inquiry responses is preferred since it allows for a better
|
||||
* user expierence.
|
||||
* user experience.
|
||||
*
|
||||
* This quirk must be set before hci_register_dev is called.
|
||||
*/
|
||||
@ -142,7 +142,7 @@ enum {
|
||||
|
||||
/* When this quirk is set, an external configuration step
|
||||
* is required and will be indicated with the controller
|
||||
* configuation.
|
||||
* configuration.
|
||||
*
|
||||
* This quirk can be set before hci_register_dev is called or
|
||||
* during the hdev->setup vendor callback.
|
||||
@ -205,6 +205,15 @@ enum {
|
||||
*
|
||||
*/
|
||||
HCI_QUIRK_NON_PERSISTENT_SETUP,
|
||||
|
||||
/* When this quirk is set, wide band speech is supported by
|
||||
* the driver since no reliable mechanism exist to report
|
||||
* this from the hardware, a driver flag is use to convey
|
||||
* this support
|
||||
*
|
||||
* This quirk must be set before hci_register_dev is called.
|
||||
*/
|
||||
HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
|
||||
};
|
||||
|
||||
/* HCI device flags */
|
||||
@ -277,6 +286,7 @@ enum {
|
||||
HCI_FAST_CONNECTABLE,
|
||||
HCI_BREDR_ENABLED,
|
||||
HCI_LE_SCAN_INTERRUPTED,
|
||||
HCI_WIDEBAND_SPEECH_ENABLED,
|
||||
|
||||
HCI_DUT_MODE,
|
||||
HCI_VENDOR_DIAG,
|
||||
@ -932,10 +942,14 @@ struct hci_cp_sniff_subrate {
|
||||
#define HCI_OP_RESET 0x0c03
|
||||
|
||||
#define HCI_OP_SET_EVENT_FLT 0x0c05
|
||||
struct hci_cp_set_event_flt {
|
||||
__u8 flt_type;
|
||||
__u8 cond_type;
|
||||
__u8 condition[0];
|
||||
#define HCI_SET_EVENT_FLT_SIZE 9
|
||||
struct hci_cp_set_event_filter {
|
||||
__u8 flt_type;
|
||||
__u8 cond_type;
|
||||
struct {
|
||||
bdaddr_t bdaddr;
|
||||
__u8 auto_accept;
|
||||
} __packed addr_conn_flt;
|
||||
} __packed;
|
||||
|
||||
/* Filter types */
|
||||
@ -949,8 +963,9 @@ struct hci_cp_set_event_flt {
|
||||
#define HCI_CONN_SETUP_ALLOW_BDADDR 0x02
|
||||
|
||||
/* CONN_SETUP Conditions */
|
||||
#define HCI_CONN_SETUP_AUTO_OFF 0x01
|
||||
#define HCI_CONN_SETUP_AUTO_ON 0x02
|
||||
#define HCI_CONN_SETUP_AUTO_OFF 0x01
|
||||
#define HCI_CONN_SETUP_AUTO_ON 0x02
|
||||
#define HCI_CONN_SETUP_AUTO_ON_WITH_RS 0x03
|
||||
|
||||
#define HCI_OP_READ_STORED_LINK_KEY 0x0c0d
|
||||
struct hci_cp_read_stored_link_key {
|
||||
@ -1086,6 +1101,19 @@ struct hci_rp_read_inq_rsp_tx_power {
|
||||
__s8 tx_power;
|
||||
} __packed;
|
||||
|
||||
#define HCI_OP_READ_DEF_ERR_DATA_REPORTING 0x0c5a
|
||||
#define ERR_DATA_REPORTING_DISABLED 0x00
|
||||
#define ERR_DATA_REPORTING_ENABLED 0x01
|
||||
struct hci_rp_read_def_err_data_reporting {
|
||||
__u8 status;
|
||||
__u8 err_data_reporting;
|
||||
} __packed;
|
||||
|
||||
#define HCI_OP_WRITE_DEF_ERR_DATA_REPORTING 0x0c5b
|
||||
struct hci_cp_write_def_err_data_reporting {
|
||||
__u8 err_data_reporting;
|
||||
} __packed;
|
||||
|
||||
#define HCI_OP_SET_EVENT_MASK_PAGE_2 0x0c63
|
||||
|
||||
#define HCI_OP_READ_LOCATION_DATA 0x0c64
|
||||
@ -1335,7 +1363,7 @@ struct hci_rp_read_local_amp_assoc {
|
||||
__u8 status;
|
||||
__u8 phy_handle;
|
||||
__le16 rem_len;
|
||||
__u8 frag[0];
|
||||
__u8 frag[];
|
||||
} __packed;
|
||||
|
||||
#define HCI_OP_WRITE_REMOTE_AMP_ASSOC 0x140b
|
||||
@ -1343,7 +1371,7 @@ struct hci_cp_write_remote_amp_assoc {
|
||||
__u8 phy_handle;
|
||||
__le16 len_so_far;
|
||||
__le16 rem_len;
|
||||
__u8 frag[0];
|
||||
__u8 frag[];
|
||||
} __packed;
|
||||
struct hci_rp_write_remote_amp_assoc {
|
||||
__u8 status;
|
||||
@ -1613,7 +1641,7 @@ struct hci_cp_le_set_ext_scan_params {
|
||||
__u8 own_addr_type;
|
||||
__u8 filter_policy;
|
||||
__u8 scanning_phys;
|
||||
__u8 data[0];
|
||||
__u8 data[];
|
||||
} __packed;
|
||||
|
||||
#define LE_SCAN_PHY_1M 0x01
|
||||
@ -1641,7 +1669,7 @@ struct hci_cp_le_ext_create_conn {
|
||||
__u8 peer_addr_type;
|
||||
bdaddr_t peer_addr;
|
||||
__u8 phys;
|
||||
__u8 data[0];
|
||||
__u8 data[];
|
||||
} __packed;
|
||||
|
||||
struct hci_cp_le_ext_conn_param {
|
||||
@ -1693,7 +1721,7 @@ struct hci_rp_le_set_ext_adv_params {
|
||||
struct hci_cp_le_set_ext_adv_enable {
|
||||
__u8 enable;
|
||||
__u8 num_of_sets;
|
||||
__u8 data[0];
|
||||
__u8 data[];
|
||||
} __packed;
|
||||
|
||||
struct hci_cp_ext_adv_set {
|
||||
@ -1724,6 +1752,8 @@ struct hci_cp_le_set_ext_scan_rsp_data {
|
||||
|
||||
#define LE_SET_ADV_DATA_NO_FRAG 0x01
|
||||
|
||||
#define HCI_OP_LE_REMOVE_ADV_SET 0x203c
|
||||
|
||||
#define HCI_OP_LE_CLEAR_ADV_SETS 0x203d
|
||||
|
||||
#define HCI_OP_LE_SET_ADV_SET_RAND_ADDR 0x2035
|
||||
@ -1775,14 +1805,14 @@ struct hci_cp_le_set_cig_params {
|
||||
__le16 m_latency;
|
||||
__le16 s_latency;
|
||||
__u8 num_cis;
|
||||
struct hci_cis_params cis[0];
|
||||
struct hci_cis_params cis[];
|
||||
} __packed;
|
||||
|
||||
struct hci_rp_le_set_cig_params {
|
||||
__u8 status;
|
||||
__u8 cig_id;
|
||||
__u8 num_handles;
|
||||
__le16 handle[0];
|
||||
__le16 handle[];
|
||||
} __packed;
|
||||
|
||||
#define HCI_OP_LE_CREATE_CIS 0x2064
|
||||
@ -1793,7 +1823,7 @@ struct hci_cis {
|
||||
|
||||
struct hci_cp_le_create_cis {
|
||||
__u8 num_cis;
|
||||
struct hci_cis cis[0];
|
||||
struct hci_cis cis[];
|
||||
} __packed;
|
||||
|
||||
#define HCI_OP_LE_REMOVE_CIG 0x2065
|
||||
@ -1937,7 +1967,7 @@ struct hci_comp_pkts_info {
|
||||
|
||||
struct hci_ev_num_comp_pkts {
|
||||
__u8 num_hndl;
|
||||
struct hci_comp_pkts_info handles[0];
|
||||
struct hci_comp_pkts_info handles[];
|
||||
} __packed;
|
||||
|
||||
#define HCI_EV_MODE_CHANGE 0x14
|
||||
@ -2170,7 +2200,7 @@ struct hci_comp_blocks_info {
|
||||
struct hci_ev_num_comp_blocks {
|
||||
__le16 num_blocks;
|
||||
__u8 num_hndl;
|
||||
struct hci_comp_blocks_info handles[0];
|
||||
struct hci_comp_blocks_info handles[];
|
||||
} __packed;
|
||||
|
||||
#define HCI_EV_SYNC_TRAIN_COMPLETE 0x4F
|
||||
@ -2226,7 +2256,7 @@ struct hci_ev_le_advertising_info {
|
||||
__u8 bdaddr_type;
|
||||
bdaddr_t bdaddr;
|
||||
__u8 length;
|
||||
__u8 data[0];
|
||||
__u8 data[];
|
||||
} __packed;
|
||||
|
||||
#define HCI_EV_LE_CONN_UPDATE_COMPLETE 0x03
|
||||
@ -2302,7 +2332,7 @@ struct hci_ev_le_ext_adv_report {
|
||||
__u8 direct_addr_type;
|
||||
bdaddr_t direct_addr;
|
||||
__u8 length;
|
||||
__u8 data[0];
|
||||
__u8 data[];
|
||||
} __packed;
|
||||
|
||||
#define HCI_EV_LE_ENHANCED_CONN_COMPLETE 0x0a
|
||||
@ -2362,7 +2392,7 @@ struct hci_evt_le_cis_req {
|
||||
#define HCI_EV_STACK_INTERNAL 0xfd
|
||||
struct hci_ev_stack_internal {
|
||||
__u16 type;
|
||||
__u8 data[0];
|
||||
__u8 data[];
|
||||
} __packed;
|
||||
|
||||
#define HCI_EV_SI_DEVICE 0x01
|
||||
@ -2409,7 +2439,7 @@ struct hci_sco_hdr {
|
||||
struct hci_iso_hdr {
|
||||
__le16 handle;
|
||||
__le16 dlen;
|
||||
__u8 data[0];
|
||||
__u8 data[];
|
||||
} __packed;
|
||||
|
||||
/* ISO data packet status flags */
|
||||
|
@ -88,6 +88,31 @@ struct discovery_state {
|
||||
unsigned long scan_duration;
|
||||
};
|
||||
|
||||
#define SUSPEND_NOTIFIER_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */
|
||||
|
||||
enum suspend_tasks {
|
||||
SUSPEND_PAUSE_DISCOVERY,
|
||||
SUSPEND_UNPAUSE_DISCOVERY,
|
||||
|
||||
SUSPEND_PAUSE_ADVERTISING,
|
||||
SUSPEND_UNPAUSE_ADVERTISING,
|
||||
|
||||
SUSPEND_SCAN_DISABLE,
|
||||
SUSPEND_SCAN_ENABLE,
|
||||
SUSPEND_DISCONNECTING,
|
||||
|
||||
SUSPEND_POWERING_DOWN,
|
||||
|
||||
SUSPEND_PREPARE_NOTIFIER,
|
||||
__SUSPEND_NUM_TASKS
|
||||
};
|
||||
|
||||
enum suspended_state {
|
||||
BT_RUNNING = 0,
|
||||
BT_SUSPEND_DISCONNECT,
|
||||
BT_SUSPEND_COMPLETE,
|
||||
};
|
||||
|
||||
struct hci_conn_hash {
|
||||
struct list_head list;
|
||||
unsigned int acl_num;
|
||||
@ -260,6 +285,7 @@ struct hci_dev {
|
||||
__u8 stored_num_keys;
|
||||
__u8 io_capability;
|
||||
__s8 inq_tx_power;
|
||||
__u8 err_data_reporting;
|
||||
__u16 page_scan_interval;
|
||||
__u16 page_scan_window;
|
||||
__u8 page_scan_type;
|
||||
@ -389,11 +415,28 @@ struct hci_dev {
|
||||
void *smp_bredr_data;
|
||||
|
||||
struct discovery_state discovery;
|
||||
|
||||
int discovery_old_state;
|
||||
bool discovery_paused;
|
||||
int advertising_old_state;
|
||||
bool advertising_paused;
|
||||
|
||||
struct notifier_block suspend_notifier;
|
||||
struct work_struct suspend_prepare;
|
||||
enum suspended_state suspend_state_next;
|
||||
enum suspended_state suspend_state;
|
||||
bool scanning_paused;
|
||||
bool suspended;
|
||||
|
||||
wait_queue_head_t suspend_wait_q;
|
||||
DECLARE_BITMAP(suspend_tasks, __SUSPEND_NUM_TASKS);
|
||||
|
||||
struct hci_conn_hash conn_hash;
|
||||
|
||||
struct list_head mgmt_pending;
|
||||
struct list_head blacklist;
|
||||
struct list_head whitelist;
|
||||
struct list_head wakeable;
|
||||
struct list_head uuids;
|
||||
struct list_head link_keys;
|
||||
struct list_head long_term_keys;
|
||||
@ -575,6 +618,7 @@ struct hci_conn_params {
|
||||
|
||||
struct hci_conn *conn;
|
||||
bool explicit_connect;
|
||||
bool wakeable;
|
||||
};
|
||||
|
||||
extern struct list_head hci_dev_list;
|
||||
@ -1477,6 +1521,8 @@ void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode);
|
||||
struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
|
||||
const void *param, u32 timeout);
|
||||
|
||||
u32 hci_conn_get_phy(struct hci_conn *conn);
|
||||
|
||||
/* ----- HCI Sockets ----- */
|
||||
void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb);
|
||||
void hci_send_to_channel(unsigned short channel, struct sk_buff *skb,
|
||||
|
@ -144,19 +144,19 @@ struct hci_dev_req {
|
||||
|
||||
struct hci_dev_list_req {
|
||||
__u16 dev_num;
|
||||
struct hci_dev_req dev_req[0]; /* hci_dev_req structures */
|
||||
struct hci_dev_req dev_req[]; /* hci_dev_req structures */
|
||||
};
|
||||
|
||||
struct hci_conn_list_req {
|
||||
__u16 dev_id;
|
||||
__u16 conn_num;
|
||||
struct hci_conn_info conn_info[0];
|
||||
struct hci_conn_info conn_info[];
|
||||
};
|
||||
|
||||
struct hci_conn_info_req {
|
||||
bdaddr_t bdaddr;
|
||||
__u8 type;
|
||||
struct hci_conn_info conn_info[0];
|
||||
struct hci_conn_info conn_info[];
|
||||
};
|
||||
|
||||
struct hci_auth_info_req {
|
||||
|
@ -119,6 +119,10 @@ struct l2cap_conninfo {
|
||||
#define L2CAP_LE_CONN_REQ 0x14
|
||||
#define L2CAP_LE_CONN_RSP 0x15
|
||||
#define L2CAP_LE_CREDITS 0x16
|
||||
#define L2CAP_ECRED_CONN_REQ 0x17
|
||||
#define L2CAP_ECRED_CONN_RSP 0x18
|
||||
#define L2CAP_ECRED_RECONF_REQ 0x19
|
||||
#define L2CAP_ECRED_RECONF_RSP 0x1a
|
||||
|
||||
/* L2CAP extended feature mask */
|
||||
#define L2CAP_FEAT_FLOWCTL 0x00000001
|
||||
@ -290,6 +294,8 @@ struct l2cap_conn_rsp {
|
||||
#define L2CAP_CR_LE_ENCRYPTION 0x0008
|
||||
#define L2CAP_CR_LE_INVALID_SCID 0x0009
|
||||
#define L2CAP_CR_LE_SCID_IN_USE 0X000A
|
||||
#define L2CAP_CR_LE_UNACCEPT_PARAMS 0X000B
|
||||
#define L2CAP_CR_LE_INVALID_PARAMS 0X000C
|
||||
|
||||
/* connect/create channel status */
|
||||
#define L2CAP_CS_NO_INFO 0x0000
|
||||
@ -299,14 +305,14 @@ struct l2cap_conn_rsp {
|
||||
struct l2cap_conf_req {
|
||||
__le16 dcid;
|
||||
__le16 flags;
|
||||
__u8 data[0];
|
||||
__u8 data[];
|
||||
} __packed;
|
||||
|
||||
struct l2cap_conf_rsp {
|
||||
__le16 scid;
|
||||
__le16 flags;
|
||||
__le16 result;
|
||||
__u8 data[0];
|
||||
__u8 data[];
|
||||
} __packed;
|
||||
|
||||
#define L2CAP_CONF_SUCCESS 0x0000
|
||||
@ -322,7 +328,7 @@ struct l2cap_conf_rsp {
|
||||
struct l2cap_conf_opt {
|
||||
__u8 type;
|
||||
__u8 len;
|
||||
__u8 val[0];
|
||||
__u8 val[];
|
||||
} __packed;
|
||||
#define L2CAP_CONF_OPT_SIZE 2
|
||||
|
||||
@ -359,6 +365,7 @@ struct l2cap_conf_rfc {
|
||||
* ever be used in the BR/EDR configuration phase.
|
||||
*/
|
||||
#define L2CAP_MODE_LE_FLOWCTL 0x80
|
||||
#define L2CAP_MODE_EXT_FLOWCTL 0x81
|
||||
|
||||
struct l2cap_conf_efs {
|
||||
__u8 id;
|
||||
@ -392,7 +399,7 @@ struct l2cap_info_req {
|
||||
struct l2cap_info_rsp {
|
||||
__le16 type;
|
||||
__le16 result;
|
||||
__u8 data[0];
|
||||
__u8 data[];
|
||||
} __packed;
|
||||
|
||||
struct l2cap_create_chan_req {
|
||||
@ -483,6 +490,39 @@ struct l2cap_le_credits {
|
||||
__le16 credits;
|
||||
} __packed;
|
||||
|
||||
#define L2CAP_ECRED_MIN_MTU 64
|
||||
#define L2CAP_ECRED_MIN_MPS 64
|
||||
|
||||
struct l2cap_ecred_conn_req {
|
||||
__le16 psm;
|
||||
__le16 mtu;
|
||||
__le16 mps;
|
||||
__le16 credits;
|
||||
__le16 scid[0];
|
||||
} __packed;
|
||||
|
||||
struct l2cap_ecred_conn_rsp {
|
||||
__le16 mtu;
|
||||
__le16 mps;
|
||||
__le16 credits;
|
||||
__le16 result;
|
||||
__le16 dcid[0];
|
||||
};
|
||||
|
||||
struct l2cap_ecred_reconf_req {
|
||||
__le16 mtu;
|
||||
__le16 mps;
|
||||
__le16 scid[0];
|
||||
} __packed;
|
||||
|
||||
#define L2CAP_RECONF_SUCCESS 0x0000
|
||||
#define L2CAP_RECONF_INVALID_MTU 0x0001
|
||||
#define L2CAP_RECONF_INVALID_MPS 0x0002
|
||||
|
||||
struct l2cap_ecred_reconf_rsp {
|
||||
__le16 result;
|
||||
} __packed;
|
||||
|
||||
/* ----- L2CAP channels and connections ----- */
|
||||
struct l2cap_seq_list {
|
||||
__u16 head;
|
||||
@ -724,6 +764,7 @@ enum {
|
||||
FLAG_EFS_ENABLE,
|
||||
FLAG_DEFER_SETUP,
|
||||
FLAG_LE_CONN_REQ_SENT,
|
||||
FLAG_ECRED_CONN_REQ_SENT,
|
||||
FLAG_PENDING_SECURITY,
|
||||
FLAG_HOLD_HCI_CONN,
|
||||
};
|
||||
@ -917,12 +958,14 @@ static inline long l2cap_chan_no_get_sndtimeo(struct l2cap_chan *chan)
|
||||
}
|
||||
|
||||
extern bool disable_ertm;
|
||||
extern bool enable_ecred;
|
||||
|
||||
int l2cap_init_sockets(void);
|
||||
void l2cap_cleanup_sockets(void);
|
||||
bool l2cap_is_socket(struct socket *sock);
|
||||
|
||||
void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan);
|
||||
void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan);
|
||||
void __l2cap_connect_rsp_defer(struct l2cap_chan *chan);
|
||||
|
||||
int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm);
|
||||
@ -932,6 +975,7 @@ struct l2cap_chan *l2cap_chan_create(void);
|
||||
void l2cap_chan_close(struct l2cap_chan *chan, int reason);
|
||||
int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
|
||||
bdaddr_t *dst, u8 dst_type);
|
||||
int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu);
|
||||
int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len);
|
||||
void l2cap_chan_busy(struct l2cap_chan *chan, int busy);
|
||||
int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator);
|
||||
|
@ -101,7 +101,8 @@ struct mgmt_rp_read_index_list {
|
||||
#define MGMT_SETTING_PRIVACY 0x00002000
|
||||
#define MGMT_SETTING_CONFIGURATION 0x00004000
|
||||
#define MGMT_SETTING_STATIC_ADDRESS 0x00008000
|
||||
#define MGMT_SETTING_PHY_CONFIGURATION 0x00010000
|
||||
#define MGMT_SETTING_PHY_CONFIGURATION 0x00010000
|
||||
#define MGMT_SETTING_WIDEBAND_SPEECH 0x00020000
|
||||
|
||||
#define MGMT_OP_READ_INFO 0x0004
|
||||
#define MGMT_READ_INFO_SIZE 0
|
||||
@ -671,6 +672,8 @@ struct mgmt_cp_set_blocked_keys {
|
||||
} __packed;
|
||||
#define MGMT_OP_SET_BLOCKED_KEYS_SIZE 2
|
||||
|
||||
#define MGMT_OP_SET_WIDEBAND_SPEECH 0x0047
|
||||
|
||||
#define MGMT_EV_CMD_COMPLETE 0x0001
|
||||
struct mgmt_ev_cmd_complete {
|
||||
__le16 opcode;
|
||||
|
@ -34,7 +34,6 @@
|
||||
#define RFCOMM_DEFAULT_MTU 127
|
||||
#define RFCOMM_DEFAULT_CREDITS 7
|
||||
|
||||
#define RFCOMM_MAX_L2CAP_MTU 1013
|
||||
#define RFCOMM_MAX_CREDITS 40
|
||||
|
||||
#define RFCOMM_SKB_HEAD_RESERVE 8
|
||||
@ -356,7 +355,7 @@ struct rfcomm_dev_info {
|
||||
|
||||
struct rfcomm_dev_list_req {
|
||||
u16 dev_num;
|
||||
struct rfcomm_dev_info dev_info[0];
|
||||
struct rfcomm_dev_info dev_info[];
|
||||
};
|
||||
|
||||
int rfcomm_dev_ioctl(struct sock *sk, unsigned int cmd, void __user *arg);
|
||||
|
@ -36,14 +36,14 @@ struct a2mp_cmd {
|
||||
__u8 code;
|
||||
__u8 ident;
|
||||
__le16 len;
|
||||
__u8 data[0];
|
||||
__u8 data[];
|
||||
} __packed;
|
||||
|
||||
/* A2MP command codes */
|
||||
#define A2MP_COMMAND_REJ 0x01
|
||||
struct a2mp_cmd_rej {
|
||||
__le16 reason;
|
||||
__u8 data[0];
|
||||
__u8 data[];
|
||||
} __packed;
|
||||
|
||||
#define A2MP_DISCOVER_REQ 0x02
|
||||
@ -62,7 +62,7 @@ struct a2mp_cl {
|
||||
struct a2mp_discov_rsp {
|
||||
__le16 mtu;
|
||||
__le16 ext_feat;
|
||||
struct a2mp_cl cl[0];
|
||||
struct a2mp_cl cl[];
|
||||
} __packed;
|
||||
|
||||
#define A2MP_CHANGE_NOTIFY 0x04
|
||||
@ -93,7 +93,7 @@ struct a2mp_amp_assoc_req {
|
||||
struct a2mp_amp_assoc_rsp {
|
||||
__u8 id;
|
||||
__u8 status;
|
||||
__u8 amp_assoc[0];
|
||||
__u8 amp_assoc[];
|
||||
} __packed;
|
||||
|
||||
#define A2MP_CREATEPHYSLINK_REQ 0x0A
|
||||
@ -101,7 +101,7 @@ struct a2mp_amp_assoc_rsp {
|
||||
struct a2mp_physlink_req {
|
||||
__u8 local_id;
|
||||
__u8 remote_id;
|
||||
__u8 amp_assoc[0];
|
||||
__u8 amp_assoc[];
|
||||
} __packed;
|
||||
|
||||
#define A2MP_CREATEPHYSLINK_RSP 0x0B
|
||||
|
@ -74,14 +74,14 @@ struct bnep_setup_conn_req {
|
||||
__u8 type;
|
||||
__u8 ctrl;
|
||||
__u8 uuid_size;
|
||||
__u8 service[0];
|
||||
__u8 service[];
|
||||
} __packed;
|
||||
|
||||
struct bnep_set_filter_req {
|
||||
__u8 type;
|
||||
__u8 ctrl;
|
||||
__be16 len;
|
||||
__u8 list[0];
|
||||
__u8 list[];
|
||||
} __packed;
|
||||
|
||||
struct bnep_control_rsp {
|
||||
@ -93,7 +93,7 @@ struct bnep_control_rsp {
|
||||
struct bnep_ext_hdr {
|
||||
__u8 type;
|
||||
__u8 len;
|
||||
__u8 data[0];
|
||||
__u8 data[];
|
||||
} __packed;
|
||||
|
||||
/* BNEP ioctl defines */
|
||||
|
@ -467,6 +467,23 @@ static void hci_conn_auto_accept(struct work_struct *work)
|
||||
&conn->dst);
|
||||
}
|
||||
|
||||
static void le_disable_advertising(struct hci_dev *hdev)
|
||||
{
|
||||
if (ext_adv_capable(hdev)) {
|
||||
struct hci_cp_le_set_ext_adv_enable cp;
|
||||
|
||||
cp.enable = 0x00;
|
||||
cp.num_of_sets = 0x00;
|
||||
|
||||
hci_send_cmd(hdev, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp),
|
||||
&cp);
|
||||
} else {
|
||||
u8 enable = 0x00;
|
||||
hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
|
||||
&enable);
|
||||
}
|
||||
}
|
||||
|
||||
static void le_conn_timeout(struct work_struct *work)
|
||||
{
|
||||
struct hci_conn *conn = container_of(work, struct hci_conn,
|
||||
@ -481,9 +498,8 @@ static void le_conn_timeout(struct work_struct *work)
|
||||
* (which doesn't have a timeout of its own).
|
||||
*/
|
||||
if (conn->role == HCI_ROLE_SLAVE) {
|
||||
u8 enable = 0x00;
|
||||
hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
|
||||
&enable);
|
||||
/* Disable LE Advertising */
|
||||
le_disable_advertising(hdev);
|
||||
hci_le_conn_failed(conn, HCI_ERROR_ADVERTISING_TIMEOUT);
|
||||
return;
|
||||
}
|
||||
@ -898,6 +914,16 @@ static void hci_req_directed_advertising(struct hci_request *req,
|
||||
cp.peer_addr_type = conn->dst_type;
|
||||
bacpy(&cp.peer_addr, &conn->dst);
|
||||
|
||||
/* As per Core Spec 5.2 Vol 2, PART E, Sec 7.8.53, for
|
||||
* advertising_event_property LE_LEGACY_ADV_DIRECT_IND
|
||||
* does not supports advertising data when the advertising set already
|
||||
* contains some, the controller shall return erroc code 'Invalid
|
||||
* HCI Command Parameters(0x12).
|
||||
* So it is required to remove adv set for handle 0x00. since we use
|
||||
* instance 0 for directed adv.
|
||||
*/
|
||||
hci_req_add(req, HCI_OP_LE_REMOVE_ADV_SET, sizeof(cp.handle), &cp.handle);
|
||||
|
||||
hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
|
||||
|
||||
if (own_addr_type == ADDR_LE_DEV_RANDOM &&
|
||||
@ -1029,11 +1055,8 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
|
||||
* anyway have to disable it in order to start directed
|
||||
* advertising.
|
||||
*/
|
||||
if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
|
||||
u8 enable = 0x00;
|
||||
hci_req_add(&req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable),
|
||||
&enable);
|
||||
}
|
||||
if (hci_dev_test_flag(hdev, HCI_LE_ADV))
|
||||
__hci_req_disable_advertising(&req);
|
||||
|
||||
/* If requested to connect as slave use directed advertising */
|
||||
if (conn->role == HCI_ROLE_SLAVE) {
|
||||
@ -1725,3 +1748,110 @@ struct hci_chan *hci_chan_lookup_handle(struct hci_dev *hdev, __u16 handle)
|
||||
|
||||
return hchan;
|
||||
}
|
||||
|
||||
u32 hci_conn_get_phy(struct hci_conn *conn)
|
||||
{
|
||||
u32 phys = 0;
|
||||
|
||||
hci_dev_lock(conn->hdev);
|
||||
|
||||
/* BLUETOOTH CORE SPECIFICATION Version 5.2 | Vol 2, Part B page 471:
|
||||
* Table 6.2: Packets defined for synchronous, asynchronous, and
|
||||
* CSB logical transport types.
|
||||
*/
|
||||
switch (conn->type) {
|
||||
case SCO_LINK:
|
||||
/* SCO logical transport (1 Mb/s):
|
||||
* HV1, HV2, HV3 and DV.
|
||||
*/
|
||||
phys |= BT_PHY_BR_1M_1SLOT;
|
||||
|
||||
break;
|
||||
|
||||
case ACL_LINK:
|
||||
/* ACL logical transport (1 Mb/s) ptt=0:
|
||||
* DH1, DM3, DH3, DM5 and DH5.
|
||||
*/
|
||||
phys |= BT_PHY_BR_1M_1SLOT;
|
||||
|
||||
if (conn->pkt_type & (HCI_DM3 | HCI_DH3))
|
||||
phys |= BT_PHY_BR_1M_3SLOT;
|
||||
|
||||
if (conn->pkt_type & (HCI_DM5 | HCI_DH5))
|
||||
phys |= BT_PHY_BR_1M_5SLOT;
|
||||
|
||||
/* ACL logical transport (2 Mb/s) ptt=1:
|
||||
* 2-DH1, 2-DH3 and 2-DH5.
|
||||
*/
|
||||
if (!(conn->pkt_type & HCI_2DH1))
|
||||
phys |= BT_PHY_EDR_2M_1SLOT;
|
||||
|
||||
if (!(conn->pkt_type & HCI_2DH3))
|
||||
phys |= BT_PHY_EDR_2M_3SLOT;
|
||||
|
||||
if (!(conn->pkt_type & HCI_2DH5))
|
||||
phys |= BT_PHY_EDR_2M_5SLOT;
|
||||
|
||||
/* ACL logical transport (3 Mb/s) ptt=1:
|
||||
* 3-DH1, 3-DH3 and 3-DH5.
|
||||
*/
|
||||
if (!(conn->pkt_type & HCI_3DH1))
|
||||
phys |= BT_PHY_EDR_3M_1SLOT;
|
||||
|
||||
if (!(conn->pkt_type & HCI_3DH3))
|
||||
phys |= BT_PHY_EDR_3M_3SLOT;
|
||||
|
||||
if (!(conn->pkt_type & HCI_3DH5))
|
||||
phys |= BT_PHY_EDR_3M_5SLOT;
|
||||
|
||||
break;
|
||||
|
||||
case ESCO_LINK:
|
||||
/* eSCO logical transport (1 Mb/s): EV3, EV4 and EV5 */
|
||||
phys |= BT_PHY_BR_1M_1SLOT;
|
||||
|
||||
if (!(conn->pkt_type & (ESCO_EV4 | ESCO_EV5)))
|
||||
phys |= BT_PHY_BR_1M_3SLOT;
|
||||
|
||||
/* eSCO logical transport (2 Mb/s): 2-EV3, 2-EV5 */
|
||||
if (!(conn->pkt_type & ESCO_2EV3))
|
||||
phys |= BT_PHY_EDR_2M_1SLOT;
|
||||
|
||||
if (!(conn->pkt_type & ESCO_2EV5))
|
||||
phys |= BT_PHY_EDR_2M_3SLOT;
|
||||
|
||||
/* eSCO logical transport (3 Mb/s): 3-EV3, 3-EV5 */
|
||||
if (!(conn->pkt_type & ESCO_3EV3))
|
||||
phys |= BT_PHY_EDR_3M_1SLOT;
|
||||
|
||||
if (!(conn->pkt_type & ESCO_3EV5))
|
||||
phys |= BT_PHY_EDR_3M_3SLOT;
|
||||
|
||||
break;
|
||||
|
||||
case LE_LINK:
|
||||
if (conn->le_tx_phy & HCI_LE_SET_PHY_1M)
|
||||
phys |= BT_PHY_LE_1M_TX;
|
||||
|
||||
if (conn->le_rx_phy & HCI_LE_SET_PHY_1M)
|
||||
phys |= BT_PHY_LE_1M_RX;
|
||||
|
||||
if (conn->le_tx_phy & HCI_LE_SET_PHY_2M)
|
||||
phys |= BT_PHY_LE_2M_TX;
|
||||
|
||||
if (conn->le_rx_phy & HCI_LE_SET_PHY_2M)
|
||||
phys |= BT_PHY_LE_2M_RX;
|
||||
|
||||
if (conn->le_tx_phy & HCI_LE_SET_PHY_CODED)
|
||||
phys |= BT_PHY_LE_CODED_TX;
|
||||
|
||||
if (conn->le_rx_phy & HCI_LE_SET_PHY_CODED)
|
||||
phys |= BT_PHY_LE_CODED_RX;
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
hci_dev_unlock(conn->hdev);
|
||||
|
||||
return phys;
|
||||
}
|
||||
|
@ -31,6 +31,8 @@
|
||||
#include <linux/debugfs.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/property.h>
|
||||
#include <linux/suspend.h>
|
||||
#include <linux/wait.h>
|
||||
#include <asm/unaligned.h>
|
||||
|
||||
#include <net/bluetooth/bluetooth.h>
|
||||
@ -603,6 +605,9 @@ static int hci_init3_req(struct hci_request *req, unsigned long opt)
|
||||
if (hdev->commands[8] & 0x01)
|
||||
hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
|
||||
|
||||
if (hdev->commands[18] & 0x04)
|
||||
hci_req_add(req, HCI_OP_READ_DEF_ERR_DATA_REPORTING, 0, NULL);
|
||||
|
||||
/* Some older Broadcom based Bluetooth 1.2 controllers do not
|
||||
* support the Read Page Scan Type command. Check support for
|
||||
* this command in the bit mask of supported commands.
|
||||
@ -838,6 +843,26 @@ static int hci_init4_req(struct hci_request *req, unsigned long opt)
|
||||
sizeof(support), &support);
|
||||
}
|
||||
|
||||
/* Set erroneous data reporting if supported to the wideband speech
|
||||
* setting value
|
||||
*/
|
||||
if (hdev->commands[18] & 0x08) {
|
||||
bool enabled = hci_dev_test_flag(hdev,
|
||||
HCI_WIDEBAND_SPEECH_ENABLED);
|
||||
|
||||
if (enabled !=
|
||||
(hdev->err_data_reporting == ERR_DATA_REPORTING_ENABLED)) {
|
||||
struct hci_cp_write_def_err_data_reporting cp;
|
||||
|
||||
cp.err_data_reporting = enabled ?
|
||||
ERR_DATA_REPORTING_ENABLED :
|
||||
ERR_DATA_REPORTING_DISABLED;
|
||||
|
||||
hci_req_add(req, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING,
|
||||
sizeof(cp), &cp);
|
||||
}
|
||||
}
|
||||
|
||||
/* Set Suggested Default Data Length to maximum if supported */
|
||||
if (hdev->le_features[0] & HCI_LE_DATA_LEN_EXT) {
|
||||
struct hci_cp_le_write_def_data_len cp;
|
||||
@ -1764,6 +1789,9 @@ int hci_dev_do_close(struct hci_dev *hdev)
|
||||
clear_bit(HCI_RUNNING, &hdev->flags);
|
||||
hci_sock_dev_event(hdev, HCI_DEV_CLOSE);
|
||||
|
||||
if (test_and_clear_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks))
|
||||
wake_up(&hdev->suspend_wait_q);
|
||||
|
||||
/* After this point our queues are empty
|
||||
* and no tasks are scheduled. */
|
||||
hdev->close(hdev);
|
||||
@ -2285,7 +2313,7 @@ void hci_link_keys_clear(struct hci_dev *hdev)
|
||||
{
|
||||
struct link_key *key;
|
||||
|
||||
list_for_each_entry_rcu(key, &hdev->link_keys, list) {
|
||||
list_for_each_entry(key, &hdev->link_keys, list) {
|
||||
list_del_rcu(&key->list);
|
||||
kfree_rcu(key, rcu);
|
||||
}
|
||||
@ -2295,7 +2323,7 @@ void hci_smp_ltks_clear(struct hci_dev *hdev)
|
||||
{
|
||||
struct smp_ltk *k;
|
||||
|
||||
list_for_each_entry_rcu(k, &hdev->long_term_keys, list) {
|
||||
list_for_each_entry(k, &hdev->long_term_keys, list) {
|
||||
list_del_rcu(&k->list);
|
||||
kfree_rcu(k, rcu);
|
||||
}
|
||||
@ -2305,7 +2333,7 @@ void hci_smp_irks_clear(struct hci_dev *hdev)
|
||||
{
|
||||
struct smp_irk *k;
|
||||
|
||||
list_for_each_entry_rcu(k, &hdev->identity_resolving_keys, list) {
|
||||
list_for_each_entry(k, &hdev->identity_resolving_keys, list) {
|
||||
list_del_rcu(&k->list);
|
||||
kfree_rcu(k, rcu);
|
||||
}
|
||||
@ -2315,7 +2343,7 @@ void hci_blocked_keys_clear(struct hci_dev *hdev)
|
||||
{
|
||||
struct blocked_key *b;
|
||||
|
||||
list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
|
||||
list_for_each_entry(b, &hdev->blocked_keys, list) {
|
||||
list_del_rcu(&b->list);
|
||||
kfree_rcu(b, rcu);
|
||||
}
|
||||
@ -2327,7 +2355,7 @@ bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16])
|
||||
struct blocked_key *b;
|
||||
|
||||
rcu_read_lock();
|
||||
list_for_each_entry(b, &hdev->blocked_keys, list) {
|
||||
list_for_each_entry_rcu(b, &hdev->blocked_keys, list) {
|
||||
if (b->type == type && !memcmp(b->val, val, sizeof(b->val))) {
|
||||
blocked = true;
|
||||
break;
|
||||
@ -3241,6 +3269,93 @@ void hci_copy_identity_address(struct hci_dev *hdev, bdaddr_t *bdaddr,
|
||||
}
|
||||
}
|
||||
|
||||
static int hci_suspend_wait_event(struct hci_dev *hdev)
|
||||
{
|
||||
#define WAKE_COND \
|
||||
(find_first_bit(hdev->suspend_tasks, __SUSPEND_NUM_TASKS) == \
|
||||
__SUSPEND_NUM_TASKS)
|
||||
|
||||
int i;
|
||||
int ret = wait_event_timeout(hdev->suspend_wait_q,
|
||||
WAKE_COND, SUSPEND_NOTIFIER_TIMEOUT);
|
||||
|
||||
if (ret == 0) {
|
||||
bt_dev_dbg(hdev, "Timed out waiting for suspend");
|
||||
for (i = 0; i < __SUSPEND_NUM_TASKS; ++i) {
|
||||
if (test_bit(i, hdev->suspend_tasks))
|
||||
bt_dev_dbg(hdev, "Bit %d is set", i);
|
||||
clear_bit(i, hdev->suspend_tasks);
|
||||
}
|
||||
|
||||
ret = -ETIMEDOUT;
|
||||
} else {
|
||||
ret = 0;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void hci_prepare_suspend(struct work_struct *work)
|
||||
{
|
||||
struct hci_dev *hdev =
|
||||
container_of(work, struct hci_dev, suspend_prepare);
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
hci_req_prepare_suspend(hdev, hdev->suspend_state_next);
|
||||
hci_dev_unlock(hdev);
|
||||
}
|
||||
|
||||
static int hci_suspend_notifier(struct notifier_block *nb, unsigned long action,
|
||||
void *data)
|
||||
{
|
||||
struct hci_dev *hdev =
|
||||
container_of(nb, struct hci_dev, suspend_notifier);
|
||||
int ret = 0;
|
||||
|
||||
/* If powering down, wait for completion. */
|
||||
if (mgmt_powering_down(hdev)) {
|
||||
set_bit(SUSPEND_POWERING_DOWN, hdev->suspend_tasks);
|
||||
ret = hci_suspend_wait_event(hdev);
|
||||
if (ret)
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* Suspend notifier should only act on events when powered. */
|
||||
if (!hdev_is_powered(hdev))
|
||||
goto done;
|
||||
|
||||
if (action == PM_SUSPEND_PREPARE) {
|
||||
/* Suspend consists of two actions:
|
||||
* - First, disconnect everything and make the controller not
|
||||
* connectable (disabling scanning)
|
||||
* - Second, program event filter/whitelist and enable scan
|
||||
*/
|
||||
hdev->suspend_state_next = BT_SUSPEND_DISCONNECT;
|
||||
set_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
|
||||
queue_work(hdev->req_workqueue, &hdev->suspend_prepare);
|
||||
ret = hci_suspend_wait_event(hdev);
|
||||
|
||||
/* If the disconnect portion failed, don't attempt to complete
|
||||
* by configuring the whitelist. The suspend notifier will
|
||||
* follow a cancelled suspend with a PM_POST_SUSPEND
|
||||
* notification.
|
||||
*/
|
||||
if (!ret) {
|
||||
hdev->suspend_state_next = BT_SUSPEND_COMPLETE;
|
||||
set_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
|
||||
queue_work(hdev->req_workqueue, &hdev->suspend_prepare);
|
||||
ret = hci_suspend_wait_event(hdev);
|
||||
}
|
||||
} else if (action == PM_POST_SUSPEND) {
|
||||
hdev->suspend_state_next = BT_RUNNING;
|
||||
set_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
|
||||
queue_work(hdev->req_workqueue, &hdev->suspend_prepare);
|
||||
ret = hci_suspend_wait_event(hdev);
|
||||
}
|
||||
|
||||
done:
|
||||
return ret ? notifier_from_errno(-EBUSY) : NOTIFY_STOP;
|
||||
}
|
||||
/* Alloc HCI device */
|
||||
struct hci_dev *hci_alloc_dev(void)
|
||||
{
|
||||
@ -3299,6 +3414,7 @@ struct hci_dev *hci_alloc_dev(void)
|
||||
INIT_LIST_HEAD(&hdev->mgmt_pending);
|
||||
INIT_LIST_HEAD(&hdev->blacklist);
|
||||
INIT_LIST_HEAD(&hdev->whitelist);
|
||||
INIT_LIST_HEAD(&hdev->wakeable);
|
||||
INIT_LIST_HEAD(&hdev->uuids);
|
||||
INIT_LIST_HEAD(&hdev->link_keys);
|
||||
INIT_LIST_HEAD(&hdev->long_term_keys);
|
||||
@ -3318,6 +3434,7 @@ struct hci_dev *hci_alloc_dev(void)
|
||||
INIT_WORK(&hdev->tx_work, hci_tx_work);
|
||||
INIT_WORK(&hdev->power_on, hci_power_on);
|
||||
INIT_WORK(&hdev->error_reset, hci_error_reset);
|
||||
INIT_WORK(&hdev->suspend_prepare, hci_prepare_suspend);
|
||||
|
||||
INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
|
||||
|
||||
@ -3326,6 +3443,7 @@ struct hci_dev *hci_alloc_dev(void)
|
||||
skb_queue_head_init(&hdev->raw_q);
|
||||
|
||||
init_waitqueue_head(&hdev->req_wait_q);
|
||||
init_waitqueue_head(&hdev->suspend_wait_q);
|
||||
|
||||
INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout);
|
||||
|
||||
@ -3437,6 +3555,11 @@ int hci_register_dev(struct hci_dev *hdev)
|
||||
hci_sock_dev_event(hdev, HCI_DEV_REG);
|
||||
hci_dev_hold(hdev);
|
||||
|
||||
hdev->suspend_notifier.notifier_call = hci_suspend_notifier;
|
||||
error = register_pm_notifier(&hdev->suspend_notifier);
|
||||
if (error)
|
||||
goto err_wqueue;
|
||||
|
||||
queue_work(hdev->req_workqueue, &hdev->power_on);
|
||||
|
||||
return id;
|
||||
@ -3470,6 +3593,8 @@ void hci_unregister_dev(struct hci_dev *hdev)
|
||||
|
||||
hci_dev_do_close(hdev);
|
||||
|
||||
unregister_pm_notifier(&hdev->suspend_notifier);
|
||||
|
||||
if (!test_bit(HCI_INIT, &hdev->flags) &&
|
||||
!hci_dev_test_flag(hdev, HCI_SETUP) &&
|
||||
!hci_dev_test_flag(hdev, HCI_CONFIG)) {
|
||||
@ -4387,13 +4512,16 @@ static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
{
|
||||
struct hci_sco_hdr *hdr = (void *) skb->data;
|
||||
struct hci_conn *conn;
|
||||
__u16 handle;
|
||||
__u16 handle, flags;
|
||||
|
||||
skb_pull(skb, HCI_SCO_HDR_SIZE);
|
||||
|
||||
handle = __le16_to_cpu(hdr->handle);
|
||||
flags = hci_flags(handle);
|
||||
handle = hci_handle(handle);
|
||||
|
||||
BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
|
||||
BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
|
||||
handle, flags);
|
||||
|
||||
hdev->stat.sco_rx++;
|
||||
|
||||
|
@ -901,6 +901,37 @@ static void hci_cc_read_inq_rsp_tx_power(struct hci_dev *hdev,
|
||||
hdev->inq_tx_power = rp->tx_power;
|
||||
}
|
||||
|
||||
static void hci_cc_read_def_err_data_reporting(struct hci_dev *hdev,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct hci_rp_read_def_err_data_reporting *rp = (void *)skb->data;
|
||||
|
||||
BT_DBG("%s status 0x%2.2x", hdev->name, rp->status);
|
||||
|
||||
if (rp->status)
|
||||
return;
|
||||
|
||||
hdev->err_data_reporting = rp->err_data_reporting;
|
||||
}
|
||||
|
||||
static void hci_cc_write_def_err_data_reporting(struct hci_dev *hdev,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
__u8 status = *((__u8 *)skb->data);
|
||||
struct hci_cp_write_def_err_data_reporting *cp;
|
||||
|
||||
BT_DBG("%s status 0x%2.2x", hdev->name, status);
|
||||
|
||||
if (status)
|
||||
return;
|
||||
|
||||
cp = hci_sent_cmd_data(hdev, HCI_OP_WRITE_DEF_ERR_DATA_REPORTING);
|
||||
if (!cp)
|
||||
return;
|
||||
|
||||
hdev->err_data_reporting = cp->err_data_reporting;
|
||||
}
|
||||
|
||||
static void hci_cc_pin_code_reply(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
{
|
||||
struct hci_rp_pin_code_reply *rp = (void *) skb->data;
|
||||
@ -2202,10 +2233,22 @@ static void hci_cs_disconnect(struct hci_dev *hdev, u8 status)
|
||||
hci_dev_lock(hdev);
|
||||
|
||||
conn = hci_conn_hash_lookup_handle(hdev, __le16_to_cpu(cp->handle));
|
||||
if (conn)
|
||||
if (conn) {
|
||||
u8 type = conn->type;
|
||||
|
||||
mgmt_disconnect_failed(hdev, &conn->dst, conn->type,
|
||||
conn->dst_type, status);
|
||||
|
||||
/* If the disconnection failed for any reason, the upper layer
|
||||
* does not retry to disconnect in current implementation.
|
||||
* Hence, we need to do some basic cleanup here and re-enable
|
||||
* advertising if necessary.
|
||||
*/
|
||||
hci_conn_del(conn);
|
||||
if (type == LE_LINK)
|
||||
hci_req_reenable_advertising(hdev);
|
||||
}
|
||||
|
||||
hci_dev_unlock(hdev);
|
||||
}
|
||||
|
||||
@ -2474,6 +2517,7 @@ static void hci_inquiry_result_evt(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
{
|
||||
struct hci_ev_conn_complete *ev = (void *) skb->data;
|
||||
struct inquiry_entry *ie;
|
||||
struct hci_conn *conn;
|
||||
|
||||
BT_DBG("%s", hdev->name);
|
||||
@ -2482,6 +2526,21 @@ static void hci_conn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
|
||||
conn = hci_conn_hash_lookup_ba(hdev, ev->link_type, &ev->bdaddr);
|
||||
if (!conn) {
|
||||
/* Connection may not exist if auto-connected. Check the inquiry
|
||||
* cache to see if we've already discovered this bdaddr before.
|
||||
* If found and link is an ACL type, create a connection class
|
||||
* automatically.
|
||||
*/
|
||||
ie = hci_inquiry_cache_lookup(hdev, &ev->bdaddr);
|
||||
if (ie && ev->link_type == ACL_LINK) {
|
||||
conn = hci_conn_add(hdev, ev->link_type, &ev->bdaddr,
|
||||
HCI_ROLE_SLAVE);
|
||||
if (!conn) {
|
||||
bt_dev_err(hdev, "no memory for new conn");
|
||||
goto unlock;
|
||||
}
|
||||
}
|
||||
|
||||
if (ev->link_type != SCO_LINK)
|
||||
goto unlock;
|
||||
|
||||
@ -2743,6 +2802,14 @@ static void hci_disconn_complete_evt(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
hci_disconn_cfm(conn, ev->reason);
|
||||
hci_conn_del(conn);
|
||||
|
||||
/* The suspend notifier is waiting for all devices to disconnect so
|
||||
* clear the bit from pending tasks and inform the wait queue.
|
||||
*/
|
||||
if (list_empty(&hdev->conn_hash.list) &&
|
||||
test_and_clear_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks)) {
|
||||
wake_up(&hdev->suspend_wait_q);
|
||||
}
|
||||
|
||||
/* Re-enable advertising if necessary, since it might
|
||||
* have been disabled by the connection. From the
|
||||
* HCI_LE_Set_Advertise_Enable command description in
|
||||
@ -3302,6 +3369,14 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
|
||||
hci_cc_read_inq_rsp_tx_power(hdev, skb);
|
||||
break;
|
||||
|
||||
case HCI_OP_READ_DEF_ERR_DATA_REPORTING:
|
||||
hci_cc_read_def_err_data_reporting(hdev, skb);
|
||||
break;
|
||||
|
||||
case HCI_OP_WRITE_DEF_ERR_DATA_REPORTING:
|
||||
hci_cc_write_def_err_data_reporting(hdev, skb);
|
||||
break;
|
||||
|
||||
case HCI_OP_PIN_CODE_REPLY:
|
||||
hci_cc_pin_code_reply(hdev, skb);
|
||||
break;
|
||||
@ -4557,6 +4632,16 @@ static void hci_user_confirm_request_evt(struct hci_dev *hdev,
|
||||
goto confirm;
|
||||
}
|
||||
|
||||
/* If there already exists link key in local host, leave the
|
||||
* decision to user space since the remote device could be
|
||||
* legitimate or malicious.
|
||||
*/
|
||||
if (hci_find_link_key(hdev, &ev->bdaddr)) {
|
||||
bt_dev_dbg(hdev, "Local host already has link key");
|
||||
confirm_hint = 1;
|
||||
goto confirm;
|
||||
}
|
||||
|
||||
BT_DBG("Auto-accept of user confirmation with %ums delay",
|
||||
hdev->auto_accept_delay);
|
||||
|
||||
@ -5858,6 +5943,11 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
u8 status = 0, event = hdr->evt, req_evt = 0;
|
||||
u16 opcode = HCI_OP_NOP;
|
||||
|
||||
if (!event) {
|
||||
bt_dev_warn(hdev, "Received unexpected HCI Event 00000000");
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) {
|
||||
struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
|
||||
opcode = __le16_to_cpu(cmd_hdr->opcode);
|
||||
@ -6069,6 +6159,7 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
req_complete_skb(hdev, status, opcode, orig_skb);
|
||||
}
|
||||
|
||||
done:
|
||||
kfree_skb(orig_skb);
|
||||
kfree_skb(skb);
|
||||
hdev->stat.evt_rx++;
|
||||
|
@ -34,6 +34,9 @@
|
||||
#define HCI_REQ_PEND 1
|
||||
#define HCI_REQ_CANCELED 2
|
||||
|
||||
#define LE_SUSPEND_SCAN_WINDOW 0x0012
|
||||
#define LE_SUSPEND_SCAN_INTERVAL 0x0060
|
||||
|
||||
void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
|
||||
{
|
||||
skb_queue_head_init(&req->cmd_q);
|
||||
@ -654,6 +657,11 @@ void hci_req_add_le_scan_disable(struct hci_request *req)
|
||||
{
|
||||
struct hci_dev *hdev = req->hdev;
|
||||
|
||||
if (hdev->scanning_paused) {
|
||||
bt_dev_dbg(hdev, "Scanning is paused for suspend");
|
||||
return;
|
||||
}
|
||||
|
||||
if (use_ext_scan(hdev)) {
|
||||
struct hci_cp_le_set_ext_scan_enable cp;
|
||||
|
||||
@ -670,15 +678,55 @@ void hci_req_add_le_scan_disable(struct hci_request *req)
|
||||
}
|
||||
}
|
||||
|
||||
static void add_to_white_list(struct hci_request *req,
|
||||
struct hci_conn_params *params)
|
||||
static void del_from_white_list(struct hci_request *req, bdaddr_t *bdaddr,
|
||||
u8 bdaddr_type)
|
||||
{
|
||||
struct hci_cp_le_del_from_white_list cp;
|
||||
|
||||
cp.bdaddr_type = bdaddr_type;
|
||||
bacpy(&cp.bdaddr, bdaddr);
|
||||
|
||||
bt_dev_dbg(req->hdev, "Remove %pMR (0x%x) from whitelist", &cp.bdaddr,
|
||||
cp.bdaddr_type);
|
||||
hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST, sizeof(cp), &cp);
|
||||
}
|
||||
|
||||
/* Adds connection to white list if needed. On error, returns -1. */
|
||||
static int add_to_white_list(struct hci_request *req,
|
||||
struct hci_conn_params *params, u8 *num_entries,
|
||||
bool allow_rpa)
|
||||
{
|
||||
struct hci_cp_le_add_to_white_list cp;
|
||||
struct hci_dev *hdev = req->hdev;
|
||||
|
||||
/* Already in white list */
|
||||
if (hci_bdaddr_list_lookup(&hdev->le_white_list, ¶ms->addr,
|
||||
params->addr_type))
|
||||
return 0;
|
||||
|
||||
/* Select filter policy to accept all advertising */
|
||||
if (*num_entries >= hdev->le_white_list_size)
|
||||
return -1;
|
||||
|
||||
/* White list can not be used with RPAs */
|
||||
if (!allow_rpa &&
|
||||
hci_find_irk_by_addr(hdev, ¶ms->addr, params->addr_type)) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* During suspend, only wakeable devices can be in whitelist */
|
||||
if (hdev->suspended && !params->wakeable)
|
||||
return 0;
|
||||
|
||||
*num_entries += 1;
|
||||
cp.bdaddr_type = params->addr_type;
|
||||
bacpy(&cp.bdaddr, ¶ms->addr);
|
||||
|
||||
bt_dev_dbg(hdev, "Add %pMR (0x%x) to whitelist", &cp.bdaddr,
|
||||
cp.bdaddr_type);
|
||||
hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static u8 update_white_list(struct hci_request *req)
|
||||
@ -686,7 +734,14 @@ static u8 update_white_list(struct hci_request *req)
|
||||
struct hci_dev *hdev = req->hdev;
|
||||
struct hci_conn_params *params;
|
||||
struct bdaddr_list *b;
|
||||
uint8_t white_list_entries = 0;
|
||||
u8 num_entries = 0;
|
||||
bool pend_conn, pend_report;
|
||||
/* We allow whitelisting even with RPAs in suspend. In the worst case,
|
||||
* we won't be able to wake from devices that use the privacy1.2
|
||||
* features. Additionally, once we support privacy1.2 and IRK
|
||||
* offloading, we can update this to also check for those conditions.
|
||||
*/
|
||||
bool allow_rpa = hdev->suspended;
|
||||
|
||||
/* Go through the current white list programmed into the
|
||||
* controller one by one and check if that address is still
|
||||
@ -695,29 +750,28 @@ static u8 update_white_list(struct hci_request *req)
|
||||
* command to remove it from the controller.
|
||||
*/
|
||||
list_for_each_entry(b, &hdev->le_white_list, list) {
|
||||
/* If the device is neither in pend_le_conns nor
|
||||
* pend_le_reports then remove it from the whitelist.
|
||||
pend_conn = hci_pend_le_action_lookup(&hdev->pend_le_conns,
|
||||
&b->bdaddr,
|
||||
b->bdaddr_type);
|
||||
pend_report = hci_pend_le_action_lookup(&hdev->pend_le_reports,
|
||||
&b->bdaddr,
|
||||
b->bdaddr_type);
|
||||
|
||||
/* If the device is not likely to connect or report,
|
||||
* remove it from the whitelist.
|
||||
*/
|
||||
if (!hci_pend_le_action_lookup(&hdev->pend_le_conns,
|
||||
&b->bdaddr, b->bdaddr_type) &&
|
||||
!hci_pend_le_action_lookup(&hdev->pend_le_reports,
|
||||
&b->bdaddr, b->bdaddr_type)) {
|
||||
struct hci_cp_le_del_from_white_list cp;
|
||||
|
||||
cp.bdaddr_type = b->bdaddr_type;
|
||||
bacpy(&cp.bdaddr, &b->bdaddr);
|
||||
|
||||
hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
|
||||
sizeof(cp), &cp);
|
||||
if (!pend_conn && !pend_report) {
|
||||
del_from_white_list(req, &b->bdaddr, b->bdaddr_type);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
|
||||
/* White list can not be used with RPAs */
|
||||
/* White list can not be used with RPAs */
|
||||
if (!allow_rpa &&
|
||||
hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
|
||||
return 0x00;
|
||||
}
|
||||
|
||||
white_list_entries++;
|
||||
num_entries++;
|
||||
}
|
||||
|
||||
/* Since all no longer valid white list entries have been
|
||||
@ -731,47 +785,17 @@ static u8 update_white_list(struct hci_request *req)
|
||||
* white list.
|
||||
*/
|
||||
list_for_each_entry(params, &hdev->pend_le_conns, action) {
|
||||
if (hci_bdaddr_list_lookup(&hdev->le_white_list,
|
||||
¶ms->addr, params->addr_type))
|
||||
continue;
|
||||
|
||||
if (white_list_entries >= hdev->le_white_list_size) {
|
||||
/* Select filter policy to accept all advertising */
|
||||
if (add_to_white_list(req, params, &num_entries, allow_rpa))
|
||||
return 0x00;
|
||||
}
|
||||
|
||||
if (hci_find_irk_by_addr(hdev, ¶ms->addr,
|
||||
params->addr_type)) {
|
||||
/* White list can not be used with RPAs */
|
||||
return 0x00;
|
||||
}
|
||||
|
||||
white_list_entries++;
|
||||
add_to_white_list(req, params);
|
||||
}
|
||||
|
||||
/* After adding all new pending connections, walk through
|
||||
* the list of pending reports and also add these to the
|
||||
* white list if there is still space.
|
||||
* white list if there is still space. Abort if space runs out.
|
||||
*/
|
||||
list_for_each_entry(params, &hdev->pend_le_reports, action) {
|
||||
if (hci_bdaddr_list_lookup(&hdev->le_white_list,
|
||||
¶ms->addr, params->addr_type))
|
||||
continue;
|
||||
|
||||
if (white_list_entries >= hdev->le_white_list_size) {
|
||||
/* Select filter policy to accept all advertising */
|
||||
if (add_to_white_list(req, params, &num_entries, allow_rpa))
|
||||
return 0x00;
|
||||
}
|
||||
|
||||
if (hci_find_irk_by_addr(hdev, ¶ms->addr,
|
||||
params->addr_type)) {
|
||||
/* White list can not be used with RPAs */
|
||||
return 0x00;
|
||||
}
|
||||
|
||||
white_list_entries++;
|
||||
add_to_white_list(req, params);
|
||||
}
|
||||
|
||||
/* Select filter policy to use white list */
|
||||
@ -866,6 +890,12 @@ void hci_req_add_le_passive_scan(struct hci_request *req)
|
||||
struct hci_dev *hdev = req->hdev;
|
||||
u8 own_addr_type;
|
||||
u8 filter_policy;
|
||||
u8 window, interval;
|
||||
|
||||
if (hdev->scanning_paused) {
|
||||
bt_dev_dbg(hdev, "Scanning is paused for suspend");
|
||||
return;
|
||||
}
|
||||
|
||||
/* Set require_privacy to false since no SCAN_REQ are send
|
||||
* during passive scanning. Not using an non-resolvable address
|
||||
@ -896,8 +926,17 @@ void hci_req_add_le_passive_scan(struct hci_request *req)
|
||||
(hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
|
||||
filter_policy |= 0x02;
|
||||
|
||||
hci_req_start_scan(req, LE_SCAN_PASSIVE, hdev->le_scan_interval,
|
||||
hdev->le_scan_window, own_addr_type, filter_policy);
|
||||
if (hdev->suspended) {
|
||||
window = LE_SUSPEND_SCAN_WINDOW;
|
||||
interval = LE_SUSPEND_SCAN_INTERVAL;
|
||||
} else {
|
||||
window = hdev->le_scan_window;
|
||||
interval = hdev->le_scan_interval;
|
||||
}
|
||||
|
||||
bt_dev_dbg(hdev, "LE passive scan with whitelist = %d", filter_policy);
|
||||
hci_req_start_scan(req, LE_SCAN_PASSIVE, interval, window,
|
||||
own_addr_type, filter_policy);
|
||||
}
|
||||
|
||||
static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
|
||||
@ -918,6 +957,187 @@ static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
|
||||
return adv_instance->scan_rsp_len;
|
||||
}
|
||||
|
||||
static void hci_req_clear_event_filter(struct hci_request *req)
|
||||
{
|
||||
struct hci_cp_set_event_filter f;
|
||||
|
||||
memset(&f, 0, sizeof(f));
|
||||
f.flt_type = HCI_FLT_CLEAR_ALL;
|
||||
hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &f);
|
||||
|
||||
/* Update page scan state (since we may have modified it when setting
|
||||
* the event filter).
|
||||
*/
|
||||
__hci_req_update_scan(req);
|
||||
}
|
||||
|
||||
static void hci_req_set_event_filter(struct hci_request *req)
|
||||
{
|
||||
struct bdaddr_list *b;
|
||||
struct hci_cp_set_event_filter f;
|
||||
struct hci_dev *hdev = req->hdev;
|
||||
u8 scan;
|
||||
|
||||
/* Always clear event filter when starting */
|
||||
hci_req_clear_event_filter(req);
|
||||
|
||||
list_for_each_entry(b, &hdev->wakeable, list) {
|
||||
memset(&f, 0, sizeof(f));
|
||||
bacpy(&f.addr_conn_flt.bdaddr, &b->bdaddr);
|
||||
f.flt_type = HCI_FLT_CONN_SETUP;
|
||||
f.cond_type = HCI_CONN_SETUP_ALLOW_BDADDR;
|
||||
f.addr_conn_flt.auto_accept = HCI_CONN_SETUP_AUTO_ON;
|
||||
|
||||
bt_dev_dbg(hdev, "Adding event filters for %pMR", &b->bdaddr);
|
||||
hci_req_add(req, HCI_OP_SET_EVENT_FLT, sizeof(f), &f);
|
||||
}
|
||||
|
||||
scan = !list_empty(&hdev->wakeable) ? SCAN_PAGE : SCAN_DISABLED;
|
||||
hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
|
||||
}
|
||||
|
||||
static void hci_req_config_le_suspend_scan(struct hci_request *req)
|
||||
{
|
||||
/* Can't change params without disabling first */
|
||||
hci_req_add_le_scan_disable(req);
|
||||
|
||||
/* Configure params and enable scanning */
|
||||
hci_req_add_le_passive_scan(req);
|
||||
|
||||
/* Block suspend notifier on response */
|
||||
set_bit(SUSPEND_SCAN_ENABLE, req->hdev->suspend_tasks);
|
||||
}
|
||||
|
||||
static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode)
|
||||
{
|
||||
bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode,
|
||||
status);
|
||||
if (test_and_clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) ||
|
||||
test_and_clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) {
|
||||
wake_up(&hdev->suspend_wait_q);
|
||||
}
|
||||
}
|
||||
|
||||
/* Call with hci_dev_lock */
|
||||
void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
|
||||
{
|
||||
int old_state;
|
||||
struct hci_conn *conn;
|
||||
struct hci_request req;
|
||||
u8 page_scan;
|
||||
int disconnect_counter;
|
||||
|
||||
if (next == hdev->suspend_state) {
|
||||
bt_dev_dbg(hdev, "Same state before and after: %d", next);
|
||||
goto done;
|
||||
}
|
||||
|
||||
hdev->suspend_state = next;
|
||||
hci_req_init(&req, hdev);
|
||||
|
||||
if (next == BT_SUSPEND_DISCONNECT) {
|
||||
/* Mark device as suspended */
|
||||
hdev->suspended = true;
|
||||
|
||||
/* Pause discovery if not already stopped */
|
||||
old_state = hdev->discovery.state;
|
||||
if (old_state != DISCOVERY_STOPPED) {
|
||||
set_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks);
|
||||
hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
|
||||
queue_work(hdev->req_workqueue, &hdev->discov_update);
|
||||
}
|
||||
|
||||
hdev->discovery_paused = true;
|
||||
hdev->discovery_old_state = old_state;
|
||||
|
||||
/* Stop advertising */
|
||||
old_state = hci_dev_test_flag(hdev, HCI_ADVERTISING);
|
||||
if (old_state) {
|
||||
set_bit(SUSPEND_PAUSE_ADVERTISING, hdev->suspend_tasks);
|
||||
cancel_delayed_work(&hdev->discov_off);
|
||||
queue_delayed_work(hdev->req_workqueue,
|
||||
&hdev->discov_off, 0);
|
||||
}
|
||||
|
||||
hdev->advertising_paused = true;
|
||||
hdev->advertising_old_state = old_state;
|
||||
/* Disable page scan */
|
||||
page_scan = SCAN_DISABLED;
|
||||
hci_req_add(&req, HCI_OP_WRITE_SCAN_ENABLE, 1, &page_scan);
|
||||
|
||||
/* Disable LE passive scan */
|
||||
hci_req_add_le_scan_disable(&req);
|
||||
|
||||
/* Mark task needing completion */
|
||||
set_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
|
||||
|
||||
/* Prevent disconnects from causing scanning to be re-enabled */
|
||||
hdev->scanning_paused = true;
|
||||
|
||||
/* Run commands before disconnecting */
|
||||
hci_req_run(&req, suspend_req_complete);
|
||||
|
||||
disconnect_counter = 0;
|
||||
/* Soft disconnect everything (power off) */
|
||||
list_for_each_entry(conn, &hdev->conn_hash.list, list) {
|
||||
hci_disconnect(conn, HCI_ERROR_REMOTE_POWER_OFF);
|
||||
disconnect_counter++;
|
||||
}
|
||||
|
||||
if (disconnect_counter > 0) {
|
||||
bt_dev_dbg(hdev,
|
||||
"Had %d disconnects. Will wait on them",
|
||||
disconnect_counter);
|
||||
set_bit(SUSPEND_DISCONNECTING, hdev->suspend_tasks);
|
||||
}
|
||||
} else if (next == BT_SUSPEND_COMPLETE) {
|
||||
/* Unpause to take care of updating scanning params */
|
||||
hdev->scanning_paused = false;
|
||||
/* Enable event filter for paired devices */
|
||||
hci_req_set_event_filter(&req);
|
||||
/* Enable passive scan at lower duty cycle */
|
||||
hci_req_config_le_suspend_scan(&req);
|
||||
/* Pause scan changes again. */
|
||||
hdev->scanning_paused = true;
|
||||
hci_req_run(&req, suspend_req_complete);
|
||||
} else {
|
||||
hdev->suspended = false;
|
||||
hdev->scanning_paused = false;
|
||||
|
||||
hci_req_clear_event_filter(&req);
|
||||
/* Reset passive/background scanning to normal */
|
||||
hci_req_config_le_suspend_scan(&req);
|
||||
|
||||
/* Unpause advertising */
|
||||
hdev->advertising_paused = false;
|
||||
if (hdev->advertising_old_state) {
|
||||
set_bit(SUSPEND_UNPAUSE_ADVERTISING,
|
||||
hdev->suspend_tasks);
|
||||
hci_dev_set_flag(hdev, HCI_ADVERTISING);
|
||||
queue_work(hdev->req_workqueue,
|
||||
&hdev->discoverable_update);
|
||||
hdev->advertising_old_state = 0;
|
||||
}
|
||||
|
||||
/* Unpause discovery */
|
||||
hdev->discovery_paused = false;
|
||||
if (hdev->discovery_old_state != DISCOVERY_STOPPED &&
|
||||
hdev->discovery_old_state != DISCOVERY_STOPPING) {
|
||||
set_bit(SUSPEND_UNPAUSE_DISCOVERY, hdev->suspend_tasks);
|
||||
hci_discovery_set_state(hdev, DISCOVERY_STARTING);
|
||||
queue_work(hdev->req_workqueue, &hdev->discov_update);
|
||||
}
|
||||
|
||||
hci_req_run(&req, suspend_req_complete);
|
||||
}
|
||||
|
||||
hdev->suspend_state = next;
|
||||
|
||||
done:
|
||||
clear_bit(SUSPEND_PREPARE_NOTIFIER, hdev->suspend_tasks);
|
||||
wake_up(&hdev->suspend_wait_q);
|
||||
}
|
||||
|
||||
static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
|
||||
{
|
||||
u8 instance = hdev->cur_adv_instance;
|
||||
@ -1499,7 +1719,7 @@ int hci_get_random_address(struct hci_dev *hdev, bool require_privacy,
|
||||
|
||||
err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
|
||||
if (err < 0) {
|
||||
BT_ERR("%s failed to generate new RPA", hdev->name);
|
||||
bt_dev_err(hdev, "failed to generate new RPA");
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -2015,6 +2235,9 @@ void __hci_req_update_scan(struct hci_request *req)
|
||||
if (mgmt_powering_down(hdev))
|
||||
return;
|
||||
|
||||
if (hdev->scanning_paused)
|
||||
return;
|
||||
|
||||
if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
|
||||
disconnected_whitelist_entries(hdev))
|
||||
scan = SCAN_PAGE;
|
||||
@ -2504,23 +2727,6 @@ static int active_scan(struct hci_request *req, unsigned long opt)
|
||||
|
||||
BT_DBG("%s", hdev->name);
|
||||
|
||||
if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
|
||||
hci_dev_lock(hdev);
|
||||
|
||||
/* Don't let discovery abort an outgoing connection attempt
|
||||
* that's using directed advertising.
|
||||
*/
|
||||
if (hci_lookup_le_connect(hdev)) {
|
||||
hci_dev_unlock(hdev);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
cancel_adv_timeout(hdev);
|
||||
hci_dev_unlock(hdev);
|
||||
|
||||
__hci_req_disable_advertising(req);
|
||||
}
|
||||
|
||||
/* If controller is scanning, it means the background scanning is
|
||||
* running. Thus, we should temporarily stop it in order to set the
|
||||
* discovery scanning parameters.
|
||||
|
@ -68,6 +68,8 @@ void __hci_req_update_eir(struct hci_request *req);
|
||||
void hci_req_add_le_scan_disable(struct hci_request *req);
|
||||
void hci_req_add_le_passive_scan(struct hci_request *req);
|
||||
|
||||
void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next);
|
||||
|
||||
void hci_req_reenable_advertising(struct hci_dev *hdev);
|
||||
void __hci_req_enable_advertising(struct hci_request *req);
|
||||
void __hci_req_disable_advertising(struct hci_request *req);
|
||||
|
@ -1279,7 +1279,7 @@ static int hidp_session_thread(void *arg)
|
||||
add_wait_queue(sk_sleep(session->intr_sock->sk), &intr_wait);
|
||||
/* This memory barrier is paired with wq_has_sleeper(). See
|
||||
* sock_poll_wait() for more information why this is needed. */
|
||||
smp_mb();
|
||||
smp_mb__before_atomic();
|
||||
|
||||
/* notify synchronous startup that we're ready */
|
||||
atomic_inc(&session->state);
|
||||
|
@ -45,6 +45,7 @@
|
||||
#define LE_FLOWCTL_MAX_CREDITS 65535
|
||||
|
||||
bool disable_ertm;
|
||||
bool enable_ecred;
|
||||
|
||||
static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
|
||||
|
||||
@ -419,6 +420,9 @@ static void l2cap_chan_timeout(struct work_struct *work)
|
||||
BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
|
||||
|
||||
mutex_lock(&conn->chan_lock);
|
||||
/* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
|
||||
* this work. No need to call l2cap_chan_hold(chan) here again.
|
||||
*/
|
||||
l2cap_chan_lock(chan);
|
||||
|
||||
if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
|
||||
@ -431,12 +435,12 @@ static void l2cap_chan_timeout(struct work_struct *work)
|
||||
|
||||
l2cap_chan_close(chan, reason);
|
||||
|
||||
l2cap_chan_unlock(chan);
|
||||
|
||||
chan->ops->close(chan);
|
||||
mutex_unlock(&conn->chan_lock);
|
||||
|
||||
l2cap_chan_unlock(chan);
|
||||
l2cap_chan_put(chan);
|
||||
|
||||
mutex_unlock(&conn->chan_lock);
|
||||
}
|
||||
|
||||
struct l2cap_chan *l2cap_chan_create(void)
|
||||
@ -532,6 +536,17 @@ static void l2cap_le_flowctl_init(struct l2cap_chan *chan, u16 tx_credits)
|
||||
skb_queue_head_init(&chan->tx_q);
|
||||
}
|
||||
|
||||
static void l2cap_ecred_init(struct l2cap_chan *chan, u16 tx_credits)
|
||||
{
|
||||
l2cap_le_flowctl_init(chan, tx_credits);
|
||||
|
||||
/* L2CAP implementations shall support a minimum MPS of 64 octets */
|
||||
if (chan->mps < L2CAP_ECRED_MIN_MPS) {
|
||||
chan->mps = L2CAP_ECRED_MIN_MPS;
|
||||
chan->rx_credits = (chan->imtu / chan->mps) + 1;
|
||||
}
|
||||
}
|
||||
|
||||
void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
|
||||
{
|
||||
BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
|
||||
@ -638,6 +653,7 @@ void l2cap_chan_del(struct l2cap_chan *chan, int err)
|
||||
break;
|
||||
|
||||
case L2CAP_MODE_LE_FLOWCTL:
|
||||
case L2CAP_MODE_EXT_FLOWCTL:
|
||||
skb_queue_purge(&chan->tx_q);
|
||||
break;
|
||||
|
||||
@ -704,6 +720,27 @@ static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
|
||||
&rsp);
|
||||
}
|
||||
|
||||
static void l2cap_chan_ecred_connect_reject(struct l2cap_chan *chan)
|
||||
{
|
||||
struct l2cap_conn *conn = chan->conn;
|
||||
struct l2cap_ecred_conn_rsp rsp;
|
||||
u16 result;
|
||||
|
||||
if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
|
||||
result = L2CAP_CR_LE_AUTHORIZATION;
|
||||
else
|
||||
result = L2CAP_CR_LE_BAD_PSM;
|
||||
|
||||
l2cap_state_change(chan, BT_DISCONN);
|
||||
|
||||
memset(&rsp, 0, sizeof(rsp));
|
||||
|
||||
rsp.result = cpu_to_le16(result);
|
||||
|
||||
l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
|
||||
&rsp);
|
||||
}
|
||||
|
||||
static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
|
||||
{
|
||||
struct l2cap_conn *conn = chan->conn;
|
||||
@ -749,8 +786,16 @@ void l2cap_chan_close(struct l2cap_chan *chan, int reason)
|
||||
if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
|
||||
if (conn->hcon->type == ACL_LINK)
|
||||
l2cap_chan_connect_reject(chan);
|
||||
else if (conn->hcon->type == LE_LINK)
|
||||
l2cap_chan_le_connect_reject(chan);
|
||||
else if (conn->hcon->type == LE_LINK) {
|
||||
switch (chan->mode) {
|
||||
case L2CAP_MODE_LE_FLOWCTL:
|
||||
l2cap_chan_le_connect_reject(chan);
|
||||
break;
|
||||
case L2CAP_MODE_EXT_FLOWCTL:
|
||||
l2cap_chan_ecred_connect_reject(chan);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
l2cap_chan_del(chan, reason);
|
||||
@ -1273,8 +1318,13 @@ static void l2cap_chan_ready(struct l2cap_chan *chan)
|
||||
chan->conf_state = 0;
|
||||
__clear_chan_timer(chan);
|
||||
|
||||
if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
|
||||
chan->ops->suspend(chan);
|
||||
switch (chan->mode) {
|
||||
case L2CAP_MODE_LE_FLOWCTL:
|
||||
case L2CAP_MODE_EXT_FLOWCTL:
|
||||
if (!chan->tx_credits)
|
||||
chan->ops->suspend(chan);
|
||||
break;
|
||||
}
|
||||
|
||||
chan->state = BT_CONNECTED;
|
||||
|
||||
@ -1306,6 +1356,31 @@ static void l2cap_le_connect(struct l2cap_chan *chan)
|
||||
sizeof(req), &req);
|
||||
}
|
||||
|
||||
static void l2cap_ecred_connect(struct l2cap_chan *chan)
|
||||
{
|
||||
struct l2cap_conn *conn = chan->conn;
|
||||
struct {
|
||||
struct l2cap_ecred_conn_req req;
|
||||
__le16 scid;
|
||||
} __packed pdu;
|
||||
|
||||
if (test_and_set_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags))
|
||||
return;
|
||||
|
||||
l2cap_ecred_init(chan, 0);
|
||||
|
||||
pdu.req.psm = chan->psm;
|
||||
pdu.req.mtu = cpu_to_le16(chan->imtu);
|
||||
pdu.req.mps = cpu_to_le16(chan->mps);
|
||||
pdu.req.credits = cpu_to_le16(chan->rx_credits);
|
||||
pdu.scid = cpu_to_le16(chan->scid);
|
||||
|
||||
chan->ident = l2cap_get_ident(conn);
|
||||
|
||||
l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_CONN_REQ,
|
||||
sizeof(pdu), &pdu);
|
||||
}
|
||||
|
||||
static void l2cap_le_start(struct l2cap_chan *chan)
|
||||
{
|
||||
struct l2cap_conn *conn = chan->conn;
|
||||
@ -1318,8 +1393,12 @@ static void l2cap_le_start(struct l2cap_chan *chan)
|
||||
return;
|
||||
}
|
||||
|
||||
if (chan->state == BT_CONNECT)
|
||||
l2cap_le_connect(chan);
|
||||
if (chan->state == BT_CONNECT) {
|
||||
if (chan->mode == L2CAP_MODE_EXT_FLOWCTL)
|
||||
l2cap_ecred_connect(chan);
|
||||
else
|
||||
l2cap_le_connect(chan);
|
||||
}
|
||||
}
|
||||
|
||||
static void l2cap_start_connection(struct l2cap_chan *chan)
|
||||
@ -1737,9 +1816,9 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
|
||||
|
||||
l2cap_chan_del(chan, err);
|
||||
|
||||
l2cap_chan_unlock(chan);
|
||||
|
||||
chan->ops->close(chan);
|
||||
|
||||
l2cap_chan_unlock(chan);
|
||||
l2cap_chan_put(chan);
|
||||
}
|
||||
|
||||
@ -2505,6 +2584,7 @@ int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
|
||||
|
||||
switch (chan->mode) {
|
||||
case L2CAP_MODE_LE_FLOWCTL:
|
||||
case L2CAP_MODE_EXT_FLOWCTL:
|
||||
/* Check outgoing MTU */
|
||||
if (len > chan->omtu)
|
||||
return -EMSGSIZE;
|
||||
@ -3773,6 +3853,45 @@ void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
|
||||
&rsp);
|
||||
}
|
||||
|
||||
void __l2cap_ecred_conn_rsp_defer(struct l2cap_chan *chan)
|
||||
{
|
||||
struct {
|
||||
struct l2cap_ecred_conn_rsp rsp;
|
||||
__le16 dcid[5];
|
||||
} __packed pdu;
|
||||
struct l2cap_conn *conn = chan->conn;
|
||||
u16 ident = chan->ident;
|
||||
int i = 0;
|
||||
|
||||
if (!ident)
|
||||
return;
|
||||
|
||||
BT_DBG("chan %p ident %d", chan, ident);
|
||||
|
||||
pdu.rsp.mtu = cpu_to_le16(chan->imtu);
|
||||
pdu.rsp.mps = cpu_to_le16(chan->mps);
|
||||
pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
|
||||
pdu.rsp.result = cpu_to_le16(L2CAP_CR_LE_SUCCESS);
|
||||
|
||||
mutex_lock(&conn->chan_lock);
|
||||
|
||||
list_for_each_entry(chan, &conn->chan_l, list) {
|
||||
if (chan->ident != ident)
|
||||
continue;
|
||||
|
||||
/* Reset ident so only one response is sent */
|
||||
chan->ident = 0;
|
||||
|
||||
/* Include all channels pending with the same ident */
|
||||
pdu.dcid[i++] = cpu_to_le16(chan->scid);
|
||||
}
|
||||
|
||||
mutex_unlock(&conn->chan_lock);
|
||||
|
||||
l2cap_send_cmd(conn, ident, L2CAP_ECRED_CONN_RSP,
|
||||
sizeof(pdu.rsp) + i * sizeof(__le16), &pdu);
|
||||
}
|
||||
|
||||
void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
|
||||
{
|
||||
struct l2cap_conn_rsp rsp;
|
||||
@ -4181,7 +4300,8 @@ static inline int l2cap_config_req(struct l2cap_conn *conn,
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
|
||||
if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
|
||||
chan->state != BT_CONNECTED) {
|
||||
cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
|
||||
chan->dcid);
|
||||
goto unlock;
|
||||
@ -4405,6 +4525,7 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
|
||||
return 0;
|
||||
}
|
||||
|
||||
l2cap_chan_hold(chan);
|
||||
l2cap_chan_lock(chan);
|
||||
|
||||
rsp.dcid = cpu_to_le16(chan->scid);
|
||||
@ -4413,12 +4534,11 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
|
||||
|
||||
chan->ops->set_shutdown(chan);
|
||||
|
||||
l2cap_chan_hold(chan);
|
||||
l2cap_chan_del(chan, ECONNRESET);
|
||||
|
||||
l2cap_chan_unlock(chan);
|
||||
|
||||
chan->ops->close(chan);
|
||||
|
||||
l2cap_chan_unlock(chan);
|
||||
l2cap_chan_put(chan);
|
||||
|
||||
mutex_unlock(&conn->chan_lock);
|
||||
@ -4450,20 +4570,21 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
|
||||
return 0;
|
||||
}
|
||||
|
||||
l2cap_chan_hold(chan);
|
||||
l2cap_chan_lock(chan);
|
||||
|
||||
if (chan->state != BT_DISCONN) {
|
||||
l2cap_chan_unlock(chan);
|
||||
l2cap_chan_put(chan);
|
||||
mutex_unlock(&conn->chan_lock);
|
||||
return 0;
|
||||
}
|
||||
|
||||
l2cap_chan_hold(chan);
|
||||
l2cap_chan_del(chan, 0);
|
||||
|
||||
l2cap_chan_unlock(chan);
|
||||
|
||||
chan->ops->close(chan);
|
||||
|
||||
l2cap_chan_unlock(chan);
|
||||
l2cap_chan_put(chan);
|
||||
|
||||
mutex_unlock(&conn->chan_lock);
|
||||
@ -5714,6 +5835,356 @@ static inline int l2cap_le_credits(struct l2cap_conn *conn,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int l2cap_ecred_conn_req(struct l2cap_conn *conn,
|
||||
struct l2cap_cmd_hdr *cmd, u16 cmd_len,
|
||||
u8 *data)
|
||||
{
|
||||
struct l2cap_ecred_conn_req *req = (void *) data;
|
||||
struct {
|
||||
struct l2cap_ecred_conn_rsp rsp;
|
||||
__le16 dcid[5];
|
||||
} __packed pdu;
|
||||
struct l2cap_chan *chan, *pchan;
|
||||
u16 mtu, mps;
|
||||
__le16 psm;
|
||||
u8 result, len = 0;
|
||||
int i, num_scid;
|
||||
bool defer = false;
|
||||
|
||||
if (!enable_ecred)
|
||||
return -EINVAL;
|
||||
|
||||
if (cmd_len < sizeof(*req) || cmd_len - sizeof(*req) % sizeof(u16)) {
|
||||
result = L2CAP_CR_LE_INVALID_PARAMS;
|
||||
goto response;
|
||||
}
|
||||
|
||||
mtu = __le16_to_cpu(req->mtu);
|
||||
mps = __le16_to_cpu(req->mps);
|
||||
|
||||
if (mtu < L2CAP_ECRED_MIN_MTU || mps < L2CAP_ECRED_MIN_MPS) {
|
||||
result = L2CAP_CR_LE_UNACCEPT_PARAMS;
|
||||
goto response;
|
||||
}
|
||||
|
||||
psm = req->psm;
|
||||
|
||||
BT_DBG("psm 0x%2.2x mtu %u mps %u", __le16_to_cpu(psm), mtu, mps);
|
||||
|
||||
memset(&pdu, 0, sizeof(pdu));
|
||||
|
||||
/* Check if we have socket listening on psm */
|
||||
pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
|
||||
&conn->hcon->dst, LE_LINK);
|
||||
if (!pchan) {
|
||||
result = L2CAP_CR_LE_BAD_PSM;
|
||||
goto response;
|
||||
}
|
||||
|
||||
mutex_lock(&conn->chan_lock);
|
||||
l2cap_chan_lock(pchan);
|
||||
|
||||
if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
|
||||
SMP_ALLOW_STK)) {
|
||||
result = L2CAP_CR_LE_AUTHENTICATION;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
result = L2CAP_CR_LE_SUCCESS;
|
||||
cmd_len -= sizeof(req);
|
||||
num_scid = cmd_len / sizeof(u16);
|
||||
|
||||
for (i = 0; i < num_scid; i++) {
|
||||
u16 scid = __le16_to_cpu(req->scid[i]);
|
||||
|
||||
BT_DBG("scid[%d] 0x%4.4x", i, scid);
|
||||
|
||||
pdu.dcid[i] = 0x0000;
|
||||
len += sizeof(*pdu.dcid);
|
||||
|
||||
/* Check for valid dynamic CID range */
|
||||
if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
|
||||
result = L2CAP_CR_LE_INVALID_SCID;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* Check if we already have channel with that dcid */
|
||||
if (__l2cap_get_chan_by_dcid(conn, scid)) {
|
||||
result = L2CAP_CR_LE_SCID_IN_USE;
|
||||
continue;
|
||||
}
|
||||
|
||||
chan = pchan->ops->new_connection(pchan);
|
||||
if (!chan) {
|
||||
result = L2CAP_CR_LE_NO_MEM;
|
||||
continue;
|
||||
}
|
||||
|
||||
bacpy(&chan->src, &conn->hcon->src);
|
||||
bacpy(&chan->dst, &conn->hcon->dst);
|
||||
chan->src_type = bdaddr_src_type(conn->hcon);
|
||||
chan->dst_type = bdaddr_dst_type(conn->hcon);
|
||||
chan->psm = psm;
|
||||
chan->dcid = scid;
|
||||
chan->omtu = mtu;
|
||||
chan->remote_mps = mps;
|
||||
|
||||
__l2cap_chan_add(conn, chan);
|
||||
|
||||
l2cap_ecred_init(chan, __le16_to_cpu(req->credits));
|
||||
|
||||
/* Init response */
|
||||
if (!pdu.rsp.credits) {
|
||||
pdu.rsp.mtu = cpu_to_le16(chan->imtu);
|
||||
pdu.rsp.mps = cpu_to_le16(chan->mps);
|
||||
pdu.rsp.credits = cpu_to_le16(chan->rx_credits);
|
||||
}
|
||||
|
||||
pdu.dcid[i] = cpu_to_le16(chan->scid);
|
||||
|
||||
__set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
|
||||
|
||||
chan->ident = cmd->ident;
|
||||
|
||||
if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
|
||||
l2cap_state_change(chan, BT_CONNECT2);
|
||||
defer = true;
|
||||
chan->ops->defer(chan);
|
||||
} else {
|
||||
l2cap_chan_ready(chan);
|
||||
}
|
||||
}
|
||||
|
||||
unlock:
|
||||
l2cap_chan_unlock(pchan);
|
||||
mutex_unlock(&conn->chan_lock);
|
||||
l2cap_chan_put(pchan);
|
||||
|
||||
response:
|
||||
pdu.rsp.result = cpu_to_le16(result);
|
||||
|
||||
if (defer)
|
||||
return 0;
|
||||
|
||||
l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_CONN_RSP,
|
||||
sizeof(pdu.rsp) + len, &pdu);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int l2cap_ecred_conn_rsp(struct l2cap_conn *conn,
|
||||
struct l2cap_cmd_hdr *cmd, u16 cmd_len,
|
||||
u8 *data)
|
||||
{
|
||||
struct l2cap_ecred_conn_rsp *rsp = (void *) data;
|
||||
struct hci_conn *hcon = conn->hcon;
|
||||
u16 mtu, mps, credits, result;
|
||||
struct l2cap_chan *chan;
|
||||
int err = 0, sec_level;
|
||||
int i = 0;
|
||||
|
||||
if (cmd_len < sizeof(*rsp))
|
||||
return -EPROTO;
|
||||
|
||||
mtu = __le16_to_cpu(rsp->mtu);
|
||||
mps = __le16_to_cpu(rsp->mps);
|
||||
credits = __le16_to_cpu(rsp->credits);
|
||||
result = __le16_to_cpu(rsp->result);
|
||||
|
||||
BT_DBG("mtu %u mps %u credits %u result 0x%4.4x", mtu, mps, credits,
|
||||
result);
|
||||
|
||||
mutex_lock(&conn->chan_lock);
|
||||
|
||||
cmd_len -= sizeof(*rsp);
|
||||
|
||||
list_for_each_entry(chan, &conn->chan_l, list) {
|
||||
u16 dcid;
|
||||
|
||||
if (chan->ident != cmd->ident ||
|
||||
chan->mode != L2CAP_MODE_EXT_FLOWCTL ||
|
||||
chan->state == BT_CONNECTED)
|
||||
continue;
|
||||
|
||||
l2cap_chan_lock(chan);
|
||||
|
||||
/* Check that there is a dcid for each pending channel */
|
||||
if (cmd_len < sizeof(dcid)) {
|
||||
l2cap_chan_del(chan, ECONNREFUSED);
|
||||
l2cap_chan_unlock(chan);
|
||||
continue;
|
||||
}
|
||||
|
||||
dcid = __le16_to_cpu(rsp->dcid[i++]);
|
||||
cmd_len -= sizeof(u16);
|
||||
|
||||
BT_DBG("dcid[%d] 0x%4.4x", i, dcid);
|
||||
|
||||
/* Check if dcid is already in use */
|
||||
if (dcid && __l2cap_get_chan_by_dcid(conn, dcid)) {
|
||||
/* If a device receives a
|
||||
* L2CAP_CREDIT_BASED_CONNECTION_RSP packet with an
|
||||
* already-assigned Destination CID, then both the
|
||||
* original channel and the new channel shall be
|
||||
* immediately discarded and not used.
|
||||
*/
|
||||
l2cap_chan_del(chan, ECONNREFUSED);
|
||||
l2cap_chan_unlock(chan);
|
||||
chan = __l2cap_get_chan_by_dcid(conn, dcid);
|
||||
l2cap_chan_lock(chan);
|
||||
l2cap_chan_del(chan, ECONNRESET);
|
||||
l2cap_chan_unlock(chan);
|
||||
continue;
|
||||
}
|
||||
|
||||
switch (result) {
|
||||
case L2CAP_CR_LE_AUTHENTICATION:
|
||||
case L2CAP_CR_LE_ENCRYPTION:
|
||||
/* If we already have MITM protection we can't do
|
||||
* anything.
|
||||
*/
|
||||
if (hcon->sec_level > BT_SECURITY_MEDIUM) {
|
||||
l2cap_chan_del(chan, ECONNREFUSED);
|
||||
break;
|
||||
}
|
||||
|
||||
sec_level = hcon->sec_level + 1;
|
||||
if (chan->sec_level < sec_level)
|
||||
chan->sec_level = sec_level;
|
||||
|
||||
/* We'll need to send a new Connect Request */
|
||||
clear_bit(FLAG_ECRED_CONN_REQ_SENT, &chan->flags);
|
||||
|
||||
smp_conn_security(hcon, chan->sec_level);
|
||||
break;
|
||||
|
||||
case L2CAP_CR_LE_BAD_PSM:
|
||||
l2cap_chan_del(chan, ECONNREFUSED);
|
||||
break;
|
||||
|
||||
default:
|
||||
/* If dcid was not set it means channels was refused */
|
||||
if (!dcid) {
|
||||
l2cap_chan_del(chan, ECONNREFUSED);
|
||||
break;
|
||||
}
|
||||
|
||||
chan->ident = 0;
|
||||
chan->dcid = dcid;
|
||||
chan->omtu = mtu;
|
||||
chan->remote_mps = mps;
|
||||
chan->tx_credits = credits;
|
||||
l2cap_chan_ready(chan);
|
||||
break;
|
||||
}
|
||||
|
||||
l2cap_chan_unlock(chan);
|
||||
}
|
||||
|
||||
mutex_unlock(&conn->chan_lock);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static inline int l2cap_ecred_reconf_req(struct l2cap_conn *conn,
|
||||
struct l2cap_cmd_hdr *cmd, u16 cmd_len,
|
||||
u8 *data)
|
||||
{
|
||||
struct l2cap_ecred_reconf_req *req = (void *) data;
|
||||
struct l2cap_ecred_reconf_rsp rsp;
|
||||
u16 mtu, mps, result;
|
||||
struct l2cap_chan *chan;
|
||||
int i, num_scid;
|
||||
|
||||
if (!enable_ecred)
|
||||
return -EINVAL;
|
||||
|
||||
if (cmd_len < sizeof(*req) || cmd_len - sizeof(*req) % sizeof(u16)) {
|
||||
result = L2CAP_CR_LE_INVALID_PARAMS;
|
||||
goto respond;
|
||||
}
|
||||
|
||||
mtu = __le16_to_cpu(req->mtu);
|
||||
mps = __le16_to_cpu(req->mps);
|
||||
|
||||
BT_DBG("mtu %u mps %u", mtu, mps);
|
||||
|
||||
if (mtu < L2CAP_ECRED_MIN_MTU) {
|
||||
result = L2CAP_RECONF_INVALID_MTU;
|
||||
goto respond;
|
||||
}
|
||||
|
||||
if (mps < L2CAP_ECRED_MIN_MPS) {
|
||||
result = L2CAP_RECONF_INVALID_MPS;
|
||||
goto respond;
|
||||
}
|
||||
|
||||
cmd_len -= sizeof(*req);
|
||||
num_scid = cmd_len / sizeof(u16);
|
||||
result = L2CAP_RECONF_SUCCESS;
|
||||
|
||||
for (i = 0; i < num_scid; i++) {
|
||||
u16 scid;
|
||||
|
||||
scid = __le16_to_cpu(req->scid[i]);
|
||||
if (!scid)
|
||||
return -EPROTO;
|
||||
|
||||
chan = __l2cap_get_chan_by_dcid(conn, scid);
|
||||
if (!chan)
|
||||
continue;
|
||||
|
||||
/* If the MTU value is decreased for any of the included
|
||||
* channels, then the receiver shall disconnect all
|
||||
* included channels.
|
||||
*/
|
||||
if (chan->omtu > mtu) {
|
||||
BT_ERR("chan %p decreased MTU %u -> %u", chan,
|
||||
chan->omtu, mtu);
|
||||
result = L2CAP_RECONF_INVALID_MTU;
|
||||
}
|
||||
|
||||
chan->omtu = mtu;
|
||||
chan->remote_mps = mps;
|
||||
}
|
||||
|
||||
respond:
|
||||
rsp.result = cpu_to_le16(result);
|
||||
|
||||
l2cap_send_cmd(conn, cmd->ident, L2CAP_ECRED_RECONF_RSP, sizeof(rsp),
|
||||
&rsp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int l2cap_ecred_reconf_rsp(struct l2cap_conn *conn,
|
||||
struct l2cap_cmd_hdr *cmd, u16 cmd_len,
|
||||
u8 *data)
|
||||
{
|
||||
struct l2cap_chan *chan;
|
||||
struct l2cap_ecred_conn_rsp *rsp = (void *) data;
|
||||
u16 result;
|
||||
|
||||
if (cmd_len < sizeof(*rsp))
|
||||
return -EPROTO;
|
||||
|
||||
result = __le16_to_cpu(rsp->result);
|
||||
|
||||
BT_DBG("result 0x%4.4x", rsp->result);
|
||||
|
||||
if (!result)
|
||||
return 0;
|
||||
|
||||
list_for_each_entry(chan, &conn->chan_l, list) {
|
||||
if (chan->ident != cmd->ident)
|
||||
continue;
|
||||
|
||||
l2cap_chan_del(chan, ECONNRESET);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
|
||||
struct l2cap_cmd_hdr *cmd, u16 cmd_len,
|
||||
u8 *data)
|
||||
@ -5769,6 +6240,22 @@ static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
|
||||
err = l2cap_le_credits(conn, cmd, cmd_len, data);
|
||||
break;
|
||||
|
||||
case L2CAP_ECRED_CONN_REQ:
|
||||
err = l2cap_ecred_conn_req(conn, cmd, cmd_len, data);
|
||||
break;
|
||||
|
||||
case L2CAP_ECRED_CONN_RSP:
|
||||
err = l2cap_ecred_conn_rsp(conn, cmd, cmd_len, data);
|
||||
break;
|
||||
|
||||
case L2CAP_ECRED_RECONF_REQ:
|
||||
err = l2cap_ecred_reconf_req(conn, cmd, cmd_len, data);
|
||||
break;
|
||||
|
||||
case L2CAP_ECRED_RECONF_RSP:
|
||||
err = l2cap_ecred_reconf_rsp(conn, cmd, cmd_len, data);
|
||||
break;
|
||||
|
||||
case L2CAP_DISCONN_REQ:
|
||||
err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
|
||||
break;
|
||||
@ -5831,9 +6318,7 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn,
|
||||
struct sk_buff *skb)
|
||||
{
|
||||
struct hci_conn *hcon = conn->hcon;
|
||||
u8 *data = skb->data;
|
||||
int len = skb->len;
|
||||
struct l2cap_cmd_hdr cmd;
|
||||
struct l2cap_cmd_hdr *cmd;
|
||||
int err;
|
||||
|
||||
l2cap_raw_recv(conn, skb);
|
||||
@ -5841,35 +6326,34 @@ static inline void l2cap_sig_channel(struct l2cap_conn *conn,
|
||||
if (hcon->type != ACL_LINK)
|
||||
goto drop;
|
||||
|
||||
while (len >= L2CAP_CMD_HDR_SIZE) {
|
||||
u16 cmd_len;
|
||||
memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
|
||||
data += L2CAP_CMD_HDR_SIZE;
|
||||
len -= L2CAP_CMD_HDR_SIZE;
|
||||
while (skb->len >= L2CAP_CMD_HDR_SIZE) {
|
||||
u16 len;
|
||||
|
||||
cmd_len = le16_to_cpu(cmd.len);
|
||||
cmd = (void *) skb->data;
|
||||
skb_pull(skb, L2CAP_CMD_HDR_SIZE);
|
||||
|
||||
BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
|
||||
cmd.ident);
|
||||
len = le16_to_cpu(cmd->len);
|
||||
|
||||
if (cmd_len > len || !cmd.ident) {
|
||||
BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len,
|
||||
cmd->ident);
|
||||
|
||||
if (len > skb->len || !cmd->ident) {
|
||||
BT_DBG("corrupted command");
|
||||
break;
|
||||
}
|
||||
|
||||
err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
|
||||
err = l2cap_bredr_sig_cmd(conn, cmd, len, skb->data);
|
||||
if (err) {
|
||||
struct l2cap_cmd_rej_unk rej;
|
||||
|
||||
BT_ERR("Wrong link type (%d)", err);
|
||||
|
||||
rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
|
||||
l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
|
||||
l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
|
||||
sizeof(rej), &rej);
|
||||
}
|
||||
|
||||
data += cmd_len;
|
||||
len -= cmd_len;
|
||||
skb_pull(skb, len);
|
||||
}
|
||||
|
||||
drop:
|
||||
@ -6814,11 +7298,13 @@ static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
|
||||
struct l2cap_le_credits pkt;
|
||||
u16 return_credits;
|
||||
|
||||
return_credits = ((chan->imtu / chan->mps) + 1) - chan->rx_credits;
|
||||
return_credits = (chan->imtu / chan->mps) + 1;
|
||||
|
||||
if (!return_credits)
|
||||
if (chan->rx_credits >= return_credits)
|
||||
return;
|
||||
|
||||
return_credits -= chan->rx_credits;
|
||||
|
||||
BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
|
||||
|
||||
chan->rx_credits += return_credits;
|
||||
@ -6831,7 +7317,7 @@ static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
|
||||
l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
|
||||
}
|
||||
|
||||
static int l2cap_le_recv(struct l2cap_chan *chan, struct sk_buff *skb)
|
||||
static int l2cap_ecred_recv(struct l2cap_chan *chan, struct sk_buff *skb)
|
||||
{
|
||||
int err;
|
||||
|
||||
@ -6846,7 +7332,7 @@ static int l2cap_le_recv(struct l2cap_chan *chan, struct sk_buff *skb)
|
||||
return err;
|
||||
}
|
||||
|
||||
static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
|
||||
static int l2cap_ecred_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
|
||||
{
|
||||
int err;
|
||||
|
||||
@ -6894,7 +7380,7 @@ static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
|
||||
}
|
||||
|
||||
if (skb->len == sdu_len)
|
||||
return l2cap_le_recv(chan, skb);
|
||||
return l2cap_ecred_recv(chan, skb);
|
||||
|
||||
chan->sdu = skb;
|
||||
chan->sdu_len = sdu_len;
|
||||
@ -6926,7 +7412,7 @@ static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
|
||||
skb = NULL;
|
||||
|
||||
if (chan->sdu->len == chan->sdu_len) {
|
||||
err = l2cap_le_recv(chan, chan->sdu);
|
||||
err = l2cap_ecred_recv(chan, chan->sdu);
|
||||
if (!err) {
|
||||
chan->sdu = NULL;
|
||||
chan->sdu_last_frag = NULL;
|
||||
@ -6987,7 +7473,8 @@ static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
|
||||
|
||||
switch (chan->mode) {
|
||||
case L2CAP_MODE_LE_FLOWCTL:
|
||||
if (l2cap_le_data_rcv(chan, skb) < 0)
|
||||
case L2CAP_MODE_EXT_FLOWCTL:
|
||||
if (l2cap_ecred_data_rcv(chan, skb) < 0)
|
||||
goto drop;
|
||||
|
||||
goto done;
|
||||
@ -7214,8 +7701,8 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
|
||||
struct hci_dev *hdev;
|
||||
int err;
|
||||
|
||||
BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
|
||||
dst_type, __le16_to_cpu(psm));
|
||||
BT_DBG("%pMR -> %pMR (type %u) psm 0x%4.4x mode 0x%2.2x", &chan->src,
|
||||
dst, dst_type, __le16_to_cpu(psm), chan->mode);
|
||||
|
||||
hdev = hci_get_route(dst, &chan->src, chan->src_type);
|
||||
if (!hdev)
|
||||
@ -7244,6 +7731,12 @@ int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
|
||||
break;
|
||||
case L2CAP_MODE_LE_FLOWCTL:
|
||||
break;
|
||||
case L2CAP_MODE_EXT_FLOWCTL:
|
||||
if (!enable_ecred) {
|
||||
err = -EOPNOTSUPP;
|
||||
goto done;
|
||||
}
|
||||
break;
|
||||
case L2CAP_MODE_ERTM:
|
||||
case L2CAP_MODE_STREAMING:
|
||||
if (!disable_ertm)
|
||||
@ -7368,6 +7861,38 @@ done:
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(l2cap_chan_connect);
|
||||
|
||||
static void l2cap_ecred_reconfigure(struct l2cap_chan *chan)
|
||||
{
|
||||
struct l2cap_conn *conn = chan->conn;
|
||||
struct {
|
||||
struct l2cap_ecred_reconf_req req;
|
||||
__le16 scid;
|
||||
} pdu;
|
||||
|
||||
pdu.req.mtu = cpu_to_le16(chan->imtu);
|
||||
pdu.req.mps = cpu_to_le16(chan->mps);
|
||||
pdu.scid = cpu_to_le16(chan->scid);
|
||||
|
||||
chan->ident = l2cap_get_ident(conn);
|
||||
|
||||
l2cap_send_cmd(conn, chan->ident, L2CAP_ECRED_RECONF_REQ,
|
||||
sizeof(pdu), &pdu);
|
||||
}
|
||||
|
||||
int l2cap_chan_reconfigure(struct l2cap_chan *chan, __u16 mtu)
|
||||
{
|
||||
if (chan->imtu > mtu)
|
||||
return -EINVAL;
|
||||
|
||||
BT_DBG("chan %p mtu 0x%4.4x", chan, mtu);
|
||||
|
||||
chan->imtu = mtu;
|
||||
|
||||
l2cap_ecred_reconfigure(chan);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* ---- L2CAP interface with lower layer (HCI) ---- */
|
||||
|
||||
int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
|
||||
@ -7579,7 +8104,8 @@ static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
|
||||
else
|
||||
__set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
|
||||
} else if (chan->state == BT_CONNECT2 &&
|
||||
chan->mode != L2CAP_MODE_LE_FLOWCTL) {
|
||||
!(chan->mode == L2CAP_MODE_EXT_FLOWCTL ||
|
||||
chan->mode == L2CAP_MODE_LE_FLOWCTL)) {
|
||||
struct l2cap_conn_rsp rsp;
|
||||
__u16 res, stat;
|
||||
|
||||
@ -7787,3 +8313,6 @@ void l2cap_exit(void)
|
||||
|
||||
module_param(disable_ertm, bool, 0644);
|
||||
MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
|
||||
|
||||
module_param(enable_ecred, bool, 0644);
|
||||
MODULE_PARM_DESC(enable_ecred, "Enable enhanced credit flow control mode");
|
||||
|
@ -232,7 +232,7 @@ static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (chan->psm && bdaddr_type_is_le(chan->src_type))
|
||||
if (chan->psm && bdaddr_type_is_le(chan->src_type) && !chan->mode)
|
||||
chan->mode = L2CAP_MODE_LE_FLOWCTL;
|
||||
|
||||
err = l2cap_chan_connect(chan, la.l2_psm, __le16_to_cpu(la.l2_cid),
|
||||
@ -274,6 +274,12 @@ static int l2cap_sock_listen(struct socket *sock, int backlog)
|
||||
case L2CAP_MODE_BASIC:
|
||||
case L2CAP_MODE_LE_FLOWCTL:
|
||||
break;
|
||||
case L2CAP_MODE_EXT_FLOWCTL:
|
||||
if (!enable_ecred) {
|
||||
err = -EOPNOTSUPP;
|
||||
goto done;
|
||||
}
|
||||
break;
|
||||
case L2CAP_MODE_ERTM:
|
||||
case L2CAP_MODE_STREAMING:
|
||||
if (!disable_ertm)
|
||||
@ -427,6 +433,8 @@ static int l2cap_sock_getsockopt_old(struct socket *sock, int optname,
|
||||
opts.max_tx = chan->max_tx;
|
||||
opts.txwin_size = chan->tx_win;
|
||||
|
||||
BT_DBG("mode 0x%2.2x", chan->mode);
|
||||
|
||||
len = min_t(unsigned int, len, sizeof(opts));
|
||||
if (copy_to_user(optval, (char *) &opts, len))
|
||||
err = -EFAULT;
|
||||
@ -499,6 +507,7 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname,
|
||||
struct l2cap_chan *chan = l2cap_pi(sk)->chan;
|
||||
struct bt_security sec;
|
||||
struct bt_power pwr;
|
||||
u32 phys;
|
||||
int len, err = 0;
|
||||
|
||||
BT_DBG("sk %p", sk);
|
||||
@ -603,6 +612,18 @@ static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname,
|
||||
err = -EFAULT;
|
||||
break;
|
||||
|
||||
case BT_PHY:
|
||||
if (sk->sk_state != BT_CONNECTED) {
|
||||
err = -ENOTCONN;
|
||||
break;
|
||||
}
|
||||
|
||||
phys = hci_conn_get_phy(chan->conn->hcon);
|
||||
|
||||
if (put_user(phys, (u32 __user *) optval))
|
||||
err = -EFAULT;
|
||||
break;
|
||||
|
||||
default:
|
||||
err = -ENOPROTOOPT;
|
||||
break;
|
||||
@ -694,6 +715,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
|
||||
break;
|
||||
}
|
||||
|
||||
BT_DBG("mode 0x%2.2x", chan->mode);
|
||||
|
||||
chan->imtu = opts.imtu;
|
||||
chan->omtu = opts.omtu;
|
||||
chan->fcs = opts.fcs;
|
||||
@ -926,7 +949,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
|
||||
break;
|
||||
}
|
||||
|
||||
if (sk->sk_state == BT_CONNECTED) {
|
||||
if (chan->mode == L2CAP_MODE_LE_FLOWCTL &&
|
||||
sk->sk_state == BT_CONNECTED) {
|
||||
err = -EISCONN;
|
||||
break;
|
||||
}
|
||||
@ -936,7 +960,12 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
|
||||
break;
|
||||
}
|
||||
|
||||
chan->imtu = opt;
|
||||
if (chan->mode == L2CAP_MODE_EXT_FLOWCTL &&
|
||||
sk->sk_state == BT_CONNECTED)
|
||||
err = l2cap_chan_reconfigure(chan, opt);
|
||||
else
|
||||
chan->imtu = opt;
|
||||
|
||||
break;
|
||||
|
||||
default:
|
||||
@ -991,7 +1020,11 @@ static int l2cap_sock_recvmsg(struct socket *sock, struct msghdr *msg,
|
||||
|
||||
if (sk->sk_state == BT_CONNECT2 && test_bit(BT_SK_DEFER_SETUP,
|
||||
&bt_sk(sk)->flags)) {
|
||||
if (bdaddr_type_is_le(pi->chan->src_type)) {
|
||||
if (pi->chan->mode == L2CAP_MODE_EXT_FLOWCTL) {
|
||||
sk->sk_state = BT_CONNECTED;
|
||||
pi->chan->state = BT_CONNECTED;
|
||||
__l2cap_ecred_conn_rsp_defer(pi->chan);
|
||||
} else if (bdaddr_type_is_le(pi->chan->src_type)) {
|
||||
sk->sk_state = BT_CONNECTED;
|
||||
pi->chan->state = BT_CONNECTED;
|
||||
__l2cap_le_connect_rsp_defer(pi->chan);
|
||||
@ -1042,7 +1075,7 @@ done:
|
||||
}
|
||||
|
||||
/* Kill socket (only if zapped and orphan)
|
||||
* Must be called on unlocked socket.
|
||||
* Must be called on unlocked socket, with l2cap channel lock.
|
||||
*/
|
||||
static void l2cap_sock_kill(struct sock *sk)
|
||||
{
|
||||
@ -1193,6 +1226,7 @@ static int l2cap_sock_release(struct socket *sock)
|
||||
{
|
||||
struct sock *sk = sock->sk;
|
||||
int err;
|
||||
struct l2cap_chan *chan;
|
||||
|
||||
BT_DBG("sock %p, sk %p", sock, sk);
|
||||
|
||||
@ -1202,9 +1236,17 @@ static int l2cap_sock_release(struct socket *sock)
|
||||
bt_sock_unlink(&l2cap_sk_list, sk);
|
||||
|
||||
err = l2cap_sock_shutdown(sock, 2);
|
||||
chan = l2cap_pi(sk)->chan;
|
||||
|
||||
l2cap_chan_hold(chan);
|
||||
l2cap_chan_lock(chan);
|
||||
|
||||
sock_orphan(sk);
|
||||
l2cap_sock_kill(sk);
|
||||
|
||||
l2cap_chan_unlock(chan);
|
||||
l2cap_chan_put(chan);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -1222,12 +1264,15 @@ static void l2cap_sock_cleanup_listen(struct sock *parent)
|
||||
BT_DBG("child chan %p state %s", chan,
|
||||
state_to_string(chan->state));
|
||||
|
||||
l2cap_chan_hold(chan);
|
||||
l2cap_chan_lock(chan);
|
||||
|
||||
__clear_chan_timer(chan);
|
||||
l2cap_chan_close(chan, ECONNRESET);
|
||||
l2cap_chan_unlock(chan);
|
||||
|
||||
l2cap_sock_kill(sk);
|
||||
|
||||
l2cap_chan_unlock(chan);
|
||||
l2cap_chan_put(chan);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -38,7 +38,7 @@
|
||||
#include "mgmt_util.h"
|
||||
|
||||
#define MGMT_VERSION 1
|
||||
#define MGMT_REVISION 15
|
||||
#define MGMT_REVISION 16
|
||||
|
||||
static const u16 mgmt_commands[] = {
|
||||
MGMT_OP_READ_INDEX_LIST,
|
||||
@ -107,6 +107,7 @@ static const u16 mgmt_commands[] = {
|
||||
MGMT_OP_READ_EXT_INFO,
|
||||
MGMT_OP_SET_APPEARANCE,
|
||||
MGMT_OP_SET_BLOCKED_KEYS,
|
||||
MGMT_OP_SET_WIDEBAND_SPEECH,
|
||||
};
|
||||
|
||||
static const u16 mgmt_events[] = {
|
||||
@ -762,6 +763,10 @@ static u32 get_supported_settings(struct hci_dev *hdev)
|
||||
|
||||
if (lmp_sc_capable(hdev))
|
||||
settings |= MGMT_SETTING_SECURE_CONN;
|
||||
|
||||
if (test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED,
|
||||
&hdev->quirks))
|
||||
settings |= MGMT_SETTING_WIDEBAND_SPEECH;
|
||||
}
|
||||
|
||||
if (lmp_le_capable(hdev)) {
|
||||
@ -846,6 +851,9 @@ static u32 get_current_settings(struct hci_dev *hdev)
|
||||
settings |= MGMT_SETTING_STATIC_ADDRESS;
|
||||
}
|
||||
|
||||
if (hci_dev_test_flag(hdev, HCI_WIDEBAND_SPEECH_ENABLED))
|
||||
settings |= MGMT_SETTING_WIDEBAND_SPEECH;
|
||||
|
||||
return settings;
|
||||
}
|
||||
|
||||
@ -1382,6 +1390,12 @@ static int set_discoverable(struct sock *sk, struct hci_dev *hdev, void *data,
|
||||
goto failed;
|
||||
}
|
||||
|
||||
if (hdev->advertising_paused) {
|
||||
err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_DISCOVERABLE,
|
||||
MGMT_STATUS_BUSY);
|
||||
goto failed;
|
||||
}
|
||||
|
||||
if (!hdev_is_powered(hdev)) {
|
||||
bool changed = false;
|
||||
|
||||
@ -3589,6 +3603,62 @@ static int set_blocked_keys(struct sock *sk, struct hci_dev *hdev, void *data,
|
||||
err, NULL, 0);
|
||||
}
|
||||
|
||||
static int set_wideband_speech(struct sock *sk, struct hci_dev *hdev,
|
||||
void *data, u16 len)
|
||||
{
|
||||
struct mgmt_mode *cp = data;
|
||||
int err;
|
||||
bool changed = false;
|
||||
|
||||
BT_DBG("request for %s", hdev->name);
|
||||
|
||||
if (!test_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks))
|
||||
return mgmt_cmd_status(sk, hdev->id,
|
||||
MGMT_OP_SET_WIDEBAND_SPEECH,
|
||||
MGMT_STATUS_NOT_SUPPORTED);
|
||||
|
||||
if (cp->val != 0x00 && cp->val != 0x01)
|
||||
return mgmt_cmd_status(sk, hdev->id,
|
||||
MGMT_OP_SET_WIDEBAND_SPEECH,
|
||||
MGMT_STATUS_INVALID_PARAMS);
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
|
||||
if (pending_find(MGMT_OP_SET_WIDEBAND_SPEECH, hdev)) {
|
||||
err = mgmt_cmd_status(sk, hdev->id,
|
||||
MGMT_OP_SET_WIDEBAND_SPEECH,
|
||||
MGMT_STATUS_BUSY);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if (hdev_is_powered(hdev) &&
|
||||
!!cp->val != hci_dev_test_flag(hdev,
|
||||
HCI_WIDEBAND_SPEECH_ENABLED)) {
|
||||
err = mgmt_cmd_status(sk, hdev->id,
|
||||
MGMT_OP_SET_WIDEBAND_SPEECH,
|
||||
MGMT_STATUS_REJECTED);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if (cp->val)
|
||||
changed = !hci_dev_test_and_set_flag(hdev,
|
||||
HCI_WIDEBAND_SPEECH_ENABLED);
|
||||
else
|
||||
changed = hci_dev_test_and_clear_flag(hdev,
|
||||
HCI_WIDEBAND_SPEECH_ENABLED);
|
||||
|
||||
err = send_settings_rsp(sk, MGMT_OP_SET_WIDEBAND_SPEECH, hdev);
|
||||
if (err < 0)
|
||||
goto unlock;
|
||||
|
||||
if (changed)
|
||||
err = new_settings(hdev, sk);
|
||||
|
||||
unlock:
|
||||
hci_dev_unlock(hdev);
|
||||
return err;
|
||||
}
|
||||
|
||||
static void read_local_oob_data_complete(struct hci_dev *hdev, u8 status,
|
||||
u16 opcode, struct sk_buff *skb)
|
||||
{
|
||||
@ -3865,6 +3935,13 @@ void mgmt_start_discovery_complete(struct hci_dev *hdev, u8 status)
|
||||
}
|
||||
|
||||
hci_dev_unlock(hdev);
|
||||
|
||||
/* Handle suspend notifier */
|
||||
if (test_and_clear_bit(SUSPEND_UNPAUSE_DISCOVERY,
|
||||
hdev->suspend_tasks)) {
|
||||
bt_dev_dbg(hdev, "Unpaused discovery");
|
||||
wake_up(&hdev->suspend_wait_q);
|
||||
}
|
||||
}
|
||||
|
||||
static bool discovery_type_is_valid(struct hci_dev *hdev, uint8_t type,
|
||||
@ -3926,6 +4003,13 @@ static int start_discovery_internal(struct sock *sk, struct hci_dev *hdev,
|
||||
goto failed;
|
||||
}
|
||||
|
||||
/* Can't start discovery when it is paused */
|
||||
if (hdev->discovery_paused) {
|
||||
err = mgmt_cmd_complete(sk, hdev->id, op, MGMT_STATUS_BUSY,
|
||||
&cp->type, sizeof(cp->type));
|
||||
goto failed;
|
||||
}
|
||||
|
||||
/* Clear the discovery filter first to free any previously
|
||||
* allocated memory for the UUID list.
|
||||
*/
|
||||
@ -4093,6 +4177,12 @@ void mgmt_stop_discovery_complete(struct hci_dev *hdev, u8 status)
|
||||
}
|
||||
|
||||
hci_dev_unlock(hdev);
|
||||
|
||||
/* Handle suspend notifier */
|
||||
if (test_and_clear_bit(SUSPEND_PAUSE_DISCOVERY, hdev->suspend_tasks)) {
|
||||
bt_dev_dbg(hdev, "Paused discovery");
|
||||
wake_up(&hdev->suspend_wait_q);
|
||||
}
|
||||
}
|
||||
|
||||
static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
|
||||
@ -4324,6 +4414,17 @@ static void set_advertising_complete(struct hci_dev *hdev, u8 status,
|
||||
if (match.sk)
|
||||
sock_put(match.sk);
|
||||
|
||||
/* Handle suspend notifier */
|
||||
if (test_and_clear_bit(SUSPEND_PAUSE_ADVERTISING,
|
||||
hdev->suspend_tasks)) {
|
||||
bt_dev_dbg(hdev, "Paused advertising");
|
||||
wake_up(&hdev->suspend_wait_q);
|
||||
} else if (test_and_clear_bit(SUSPEND_UNPAUSE_ADVERTISING,
|
||||
hdev->suspend_tasks)) {
|
||||
bt_dev_dbg(hdev, "Unpaused advertising");
|
||||
wake_up(&hdev->suspend_wait_q);
|
||||
}
|
||||
|
||||
/* If "Set Advertising" was just disabled and instance advertising was
|
||||
* set up earlier, then re-enable multi-instance advertising.
|
||||
*/
|
||||
@ -4375,6 +4476,10 @@ static int set_advertising(struct sock *sk, struct hci_dev *hdev, void *data,
|
||||
return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
|
||||
MGMT_STATUS_INVALID_PARAMS);
|
||||
|
||||
if (hdev->advertising_paused)
|
||||
return mgmt_cmd_status(sk, hdev->id, MGMT_OP_SET_ADVERTISING,
|
||||
MGMT_STATUS_BUSY);
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
|
||||
val = !!cp->val;
|
||||
@ -6743,8 +6848,11 @@ static int add_advertising(struct sock *sk, struct hci_dev *hdev,
|
||||
if (!err)
|
||||
err = hci_req_run(&req, add_advertising_complete);
|
||||
|
||||
if (err < 0)
|
||||
if (err < 0) {
|
||||
err = mgmt_cmd_status(sk, hdev->id, MGMT_OP_ADD_ADVERTISING,
|
||||
MGMT_STATUS_FAILED);
|
||||
mgmt_pending_remove(cmd);
|
||||
}
|
||||
|
||||
unlock:
|
||||
hci_dev_unlock(hdev);
|
||||
@ -6990,6 +7098,7 @@ static const struct hci_mgmt_handler mgmt_handlers[] = {
|
||||
{ set_phy_configuration, MGMT_SET_PHY_CONFIGURATION_SIZE },
|
||||
{ set_blocked_keys, MGMT_OP_SET_BLOCKED_KEYS_SIZE,
|
||||
HCI_MGMT_VAR_LEN },
|
||||
{ set_wideband_speech, MGMT_SETTING_SIZE },
|
||||
};
|
||||
|
||||
void mgmt_index_added(struct hci_dev *hdev)
|
||||
|
@ -40,7 +40,6 @@
|
||||
static bool disable_cfc;
|
||||
static bool l2cap_ertm;
|
||||
static int channel_mtu = -1;
|
||||
static unsigned int l2cap_mtu = RFCOMM_MAX_L2CAP_MTU;
|
||||
|
||||
static struct task_struct *rfcomm_thread;
|
||||
|
||||
@ -73,8 +72,6 @@ static struct rfcomm_session *rfcomm_session_del(struct rfcomm_session *s);
|
||||
|
||||
/* ---- RFCOMM frame parsing macros ---- */
|
||||
#define __get_dlci(b) ((b & 0xfc) >> 2)
|
||||
#define __get_channel(b) ((b & 0xf8) >> 3)
|
||||
#define __get_dir(b) ((b & 0x04) >> 2)
|
||||
#define __get_type(b) ((b & 0xef))
|
||||
|
||||
#define __test_ea(b) ((b & 0x01))
|
||||
@ -87,7 +84,6 @@ static struct rfcomm_session *rfcomm_session_del(struct rfcomm_session *s);
|
||||
#define __ctrl(type, pf) (((type & 0xef) | (pf << 4)))
|
||||
#define __dlci(dir, chn) (((chn & 0x1f) << 1) | dir)
|
||||
#define __srv_channel(dlci) (dlci >> 1)
|
||||
#define __dir(dlci) (dlci & 0x01)
|
||||
|
||||
#define __len8(len) (((len) << 1) | 1)
|
||||
#define __len16(len) ((len) << 1)
|
||||
@ -752,7 +748,8 @@ static struct rfcomm_session *rfcomm_session_create(bdaddr_t *src,
|
||||
/* Set L2CAP options */
|
||||
sk = sock->sk;
|
||||
lock_sock(sk);
|
||||
l2cap_pi(sk)->chan->imtu = l2cap_mtu;
|
||||
/* Set MTU to 0 so L2CAP can auto select the MTU */
|
||||
l2cap_pi(sk)->chan->imtu = 0;
|
||||
l2cap_pi(sk)->chan->sec_level = sec_level;
|
||||
if (l2cap_ertm)
|
||||
l2cap_pi(sk)->chan->mode = L2CAP_MODE_ERTM;
|
||||
@ -2039,7 +2036,8 @@ static int rfcomm_add_listener(bdaddr_t *ba)
|
||||
/* Set L2CAP options */
|
||||
sk = sock->sk;
|
||||
lock_sock(sk);
|
||||
l2cap_pi(sk)->chan->imtu = l2cap_mtu;
|
||||
/* Set MTU to 0 so L2CAP can auto select the MTU */
|
||||
l2cap_pi(sk)->chan->imtu = 0;
|
||||
release_sock(sk);
|
||||
|
||||
/* Start listening on the socket */
|
||||
@ -2237,9 +2235,6 @@ MODULE_PARM_DESC(disable_cfc, "Disable credit based flow control");
|
||||
module_param(channel_mtu, int, 0644);
|
||||
MODULE_PARM_DESC(channel_mtu, "Default MTU for the RFCOMM channel");
|
||||
|
||||
module_param(l2cap_mtu, uint, 0644);
|
||||
MODULE_PARM_DESC(l2cap_mtu, "Default MTU for the L2CAP connection");
|
||||
|
||||
module_param(l2cap_ertm, bool, 0644);
|
||||
MODULE_PARM_DESC(l2cap_ertm, "Use L2CAP ERTM mode for connection");
|
||||
|
||||
|
@ -413,10 +413,8 @@ static int __rfcomm_create_dev(struct sock *sk, void __user *arg)
|
||||
dlc = rfcomm_dlc_exists(&req.src, &req.dst, req.channel);
|
||||
if (IS_ERR(dlc))
|
||||
return PTR_ERR(dlc);
|
||||
else if (dlc) {
|
||||
rfcomm_dlc_put(dlc);
|
||||
if (dlc)
|
||||
return -EBUSY;
|
||||
}
|
||||
dlc = rfcomm_dlc_alloc(GFP_KERNEL);
|
||||
if (!dlc)
|
||||
return -ENOMEM;
|
||||
|
@ -922,6 +922,7 @@ static int sco_sock_getsockopt(struct socket *sock, int level, int optname,
|
||||
struct sock *sk = sock->sk;
|
||||
int len, err = 0;
|
||||
struct bt_voice voice;
|
||||
u32 phys;
|
||||
|
||||
BT_DBG("sk %p", sk);
|
||||
|
||||
@ -956,6 +957,18 @@ static int sco_sock_getsockopt(struct socket *sock, int level, int optname,
|
||||
|
||||
break;
|
||||
|
||||
case BT_PHY:
|
||||
if (sk->sk_state != BT_CONNECTED) {
|
||||
err = -ENOTCONN;
|
||||
break;
|
||||
}
|
||||
|
||||
phys = hci_conn_get_phy(sco_pi(sk)->conn->hcon);
|
||||
|
||||
if (put_user(phys, (u32 __user *) optval))
|
||||
err = -EFAULT;
|
||||
break;
|
||||
|
||||
default:
|
||||
err = -ENOPROTOOPT;
|
||||
break;
|
||||
|
@ -1145,7 +1145,7 @@ static void sc_generate_link_key(struct smp_chan *smp)
|
||||
return;
|
||||
|
||||
if (test_bit(SMP_FLAG_CT2, &smp->flags)) {
|
||||
/* SALT = 0x00000000000000000000000000000000746D7031 */
|
||||
/* SALT = 0x000000000000000000000000746D7031 */
|
||||
const u8 salt[16] = { 0x31, 0x70, 0x6d, 0x74 };
|
||||
|
||||
if (smp_h7(smp->tfm_cmac, smp->tk, salt, smp->link_key)) {
|
||||
@ -1203,7 +1203,7 @@ static void sc_generate_ltk(struct smp_chan *smp)
|
||||
set_bit(SMP_FLAG_DEBUG_KEY, &smp->flags);
|
||||
|
||||
if (test_bit(SMP_FLAG_CT2, &smp->flags)) {
|
||||
/* SALT = 0x00000000000000000000000000000000746D7032 */
|
||||
/* SALT = 0x000000000000000000000000746D7032 */
|
||||
const u8 salt[16] = { 0x32, 0x70, 0x6d, 0x74 };
|
||||
|
||||
if (smp_h7(smp->tfm_cmac, key->val, salt, smp->tk))
|
||||
@ -2115,7 +2115,7 @@ static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb)
|
||||
struct l2cap_chan *chan = conn->smp;
|
||||
struct smp_chan *smp = chan->data;
|
||||
struct hci_conn *hcon = conn->hcon;
|
||||
u8 *pkax, *pkbx, *na, *nb;
|
||||
u8 *pkax, *pkbx, *na, *nb, confirm_hint;
|
||||
u32 passkey;
|
||||
int err;
|
||||
|
||||
@ -2168,6 +2168,24 @@ static u8 smp_cmd_pairing_random(struct l2cap_conn *conn, struct sk_buff *skb)
|
||||
smp_send_cmd(conn, SMP_CMD_PAIRING_RANDOM, sizeof(smp->prnd),
|
||||
smp->prnd);
|
||||
SMP_ALLOW_CMD(smp, SMP_CMD_DHKEY_CHECK);
|
||||
|
||||
/* Only Just-Works pairing requires extra checks */
|
||||
if (smp->method != JUST_WORKS)
|
||||
goto mackey_and_ltk;
|
||||
|
||||
/* If there already exists long term key in local host, leave
|
||||
* the decision to user space since the remote device could
|
||||
* be legitimate or malicious.
|
||||
*/
|
||||
if (hci_find_ltk(hcon->hdev, &hcon->dst, hcon->dst_type,
|
||||
hcon->role)) {
|
||||
/* Set passkey to 0. The value can be any number since
|
||||
* it'll be ignored anyway.
|
||||
*/
|
||||
passkey = 0;
|
||||
confirm_hint = 1;
|
||||
goto confirm;
|
||||
}
|
||||
}
|
||||
|
||||
mackey_and_ltk:
|
||||
@ -2188,8 +2206,11 @@ mackey_and_ltk:
|
||||
if (err)
|
||||
return SMP_UNSPECIFIED;
|
||||
|
||||
confirm_hint = 0;
|
||||
|
||||
confirm:
|
||||
err = mgmt_user_confirm_request(hcon->hdev, &hcon->dst, hcon->type,
|
||||
hcon->dst_type, passkey, 0);
|
||||
hcon->dst_type, passkey, confirm_hint);
|
||||
if (err)
|
||||
return SMP_UNSPECIFIED;
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user