mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-04 04:02:26 +00:00
Merge branch 'for-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth-next
Johan Hedberg says: ==================== pull request: bluetooth-next 2015-10-22 Here's probably the last bluetooth-next pull request for 4.4. Among several other changes it contains the rest of the fixes & cleanups from the Bluetooth UnplugFest (that didn't need to be hurried to 4.3). - Refactoring & cleanups to 6lowpan code - New USB ids for two Atheros controllers and BCM43142A0 from Broadcom - Fix (quirk) for broken Broadcom BCM2045 controllers - Support for latest Apple controllers - Improvements to the vendor diagnostic message support Please let me know if there are any issues pulling. Thanks. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
commit
a72c9512bf
@ -183,7 +183,7 @@ config BT_HCIBCM203X
|
||||
|
||||
config BT_HCIBPA10X
|
||||
tristate "HCI BPA10x USB driver"
|
||||
depends on USB
|
||||
depends on USB && BT_HCIUART
|
||||
select BT_HCIUART_H4
|
||||
help
|
||||
Bluetooth HCI BPA10x USB driver.
|
||||
|
@ -93,6 +93,7 @@ static const struct usb_device_id ath3k_table[] = {
|
||||
{ USB_DEVICE(0x04CA, 0x300f) },
|
||||
{ USB_DEVICE(0x04CA, 0x3010) },
|
||||
{ USB_DEVICE(0x0930, 0x0219) },
|
||||
{ USB_DEVICE(0x0930, 0x021c) },
|
||||
{ USB_DEVICE(0x0930, 0x0220) },
|
||||
{ USB_DEVICE(0x0930, 0x0227) },
|
||||
{ USB_DEVICE(0x0b05, 0x17d0) },
|
||||
@ -104,6 +105,7 @@ static const struct usb_device_id ath3k_table[] = {
|
||||
{ USB_DEVICE(0x0CF3, 0x311F) },
|
||||
{ USB_DEVICE(0x0cf3, 0x3121) },
|
||||
{ USB_DEVICE(0x0CF3, 0x817a) },
|
||||
{ USB_DEVICE(0x0CF3, 0x817b) },
|
||||
{ USB_DEVICE(0x0cf3, 0xe003) },
|
||||
{ USB_DEVICE(0x0CF3, 0xE004) },
|
||||
{ USB_DEVICE(0x0CF3, 0xE005) },
|
||||
@ -153,6 +155,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
|
||||
{ USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
|
||||
@ -164,6 +167,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
|
||||
{ USB_DEVICE(0x0cf3, 0x311F), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0CF3, 0x817a), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0CF3, 0x817b), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0cf3, 0xe006), .driver_info = BTUSB_ATH3012 },
|
||||
|
@ -323,7 +323,7 @@ int btbcm_initialize(struct hci_dev *hdev, char *fw_name, size_t len)
|
||||
}
|
||||
|
||||
BT_INFO("%s: %s (%3.3u.%3.3u.%3.3u) build %4.4u", hdev->name,
|
||||
hw_name ? : "BCM", (subver & 0x7000) >> 13,
|
||||
hw_name ? : "BCM", (subver & 0xe000) >> 13,
|
||||
(subver & 0x1f00) >> 8, (subver & 0x00ff), rev & 0x0fff);
|
||||
|
||||
return 0;
|
||||
@ -353,7 +353,7 @@ int btbcm_finalize(struct hci_dev *hdev)
|
||||
kfree_skb(skb);
|
||||
|
||||
BT_INFO("%s: BCM (%3.3u.%3.3u.%3.3u) build %4.4u", hdev->name,
|
||||
(subver & 0x7000) >> 13, (subver & 0x1f00) >> 8,
|
||||
(subver & 0xe000) >> 13, (subver & 0x1f00) >> 8,
|
||||
(subver & 0x00ff), rev & 0x0fff);
|
||||
|
||||
btbcm_check_bdaddr(hdev);
|
||||
@ -461,7 +461,7 @@ int btbcm_setup_patchram(struct hci_dev *hdev)
|
||||
}
|
||||
|
||||
BT_INFO("%s: %s (%3.3u.%3.3u.%3.3u) build %4.4u", hdev->name,
|
||||
hw_name ? : "BCM", (subver & 0x7000) >> 13,
|
||||
hw_name ? : "BCM", (subver & 0xe000) >> 13,
|
||||
(subver & 0x1f00) >> 8, (subver & 0x00ff), rev & 0x0fff);
|
||||
|
||||
err = request_firmware(&fw, fw_name, &hdev->dev);
|
||||
@ -490,7 +490,7 @@ int btbcm_setup_patchram(struct hci_dev *hdev)
|
||||
kfree_skb(skb);
|
||||
|
||||
BT_INFO("%s: %s (%3.3u.%3.3u.%3.3u) build %4.4u", hdev->name,
|
||||
hw_name ? : "BCM", (subver & 0x7000) >> 13,
|
||||
hw_name ? : "BCM", (subver & 0xe000) >> 13,
|
||||
(subver & 0x1f00) >> 8, (subver & 0x00ff), rev & 0x0fff);
|
||||
|
||||
/* Read Local Name */
|
||||
@ -527,6 +527,15 @@ int btbcm_setup_apple(struct hci_dev *hdev)
|
||||
kfree_skb(skb);
|
||||
}
|
||||
|
||||
/* Read USB Product Info */
|
||||
skb = btbcm_read_usb_product(hdev);
|
||||
if (!IS_ERR(skb)) {
|
||||
BT_INFO("%s: BCM: product %4.4x:%4.4x", hdev->name,
|
||||
get_unaligned_le16(skb->data + 1),
|
||||
get_unaligned_le16(skb->data + 3));
|
||||
kfree_skb(skb);
|
||||
}
|
||||
|
||||
/* Read Local Name */
|
||||
skb = btbcm_read_local_name(hdev);
|
||||
if (!IS_ERR(skb)) {
|
||||
|
@ -91,6 +91,75 @@ int btintel_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(btintel_set_bdaddr);
|
||||
|
||||
int btintel_set_diag(struct hci_dev *hdev, bool enable)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
u8 param[3];
|
||||
int err;
|
||||
|
||||
if (enable) {
|
||||
param[0] = 0x03;
|
||||
param[1] = 0x03;
|
||||
param[2] = 0x03;
|
||||
} else {
|
||||
param[0] = 0x00;
|
||||
param[1] = 0x00;
|
||||
param[2] = 0x00;
|
||||
}
|
||||
|
||||
skb = __hci_cmd_sync(hdev, 0xfc43, 3, param, HCI_INIT_TIMEOUT);
|
||||
if (IS_ERR(skb)) {
|
||||
err = PTR_ERR(skb);
|
||||
if (err == -ENODATA)
|
||||
goto done;
|
||||
BT_ERR("%s: Changing Intel diagnostic mode failed (%d)",
|
||||
hdev->name, err);
|
||||
return err;
|
||||
}
|
||||
kfree_skb(skb);
|
||||
|
||||
done:
|
||||
btintel_set_event_mask(hdev, enable);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(btintel_set_diag);
|
||||
|
||||
int btintel_set_diag_mfg(struct hci_dev *hdev, bool enable)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
u8 param[2];
|
||||
int err;
|
||||
|
||||
param[0] = 0x01;
|
||||
param[1] = 0x00;
|
||||
|
||||
skb = __hci_cmd_sync(hdev, 0xfc11, 2, param, HCI_INIT_TIMEOUT);
|
||||
if (IS_ERR(skb)) {
|
||||
err = PTR_ERR(skb);
|
||||
BT_ERR("%s: Entering Intel manufacturer mode failed (%d)",
|
||||
hdev->name, err);
|
||||
return PTR_ERR(skb);
|
||||
}
|
||||
kfree_skb(skb);
|
||||
|
||||
err = btintel_set_diag(hdev, enable);
|
||||
|
||||
param[0] = 0x00;
|
||||
param[1] = 0x00;
|
||||
|
||||
skb = __hci_cmd_sync(hdev, 0xfc11, 2, param, HCI_INIT_TIMEOUT);
|
||||
if (IS_ERR(skb)) {
|
||||
err = PTR_ERR(skb);
|
||||
BT_ERR("%s: Leaving Intel manufacturer mode failed (%d)",
|
||||
hdev->name, err);
|
||||
return PTR_ERR(skb);
|
||||
}
|
||||
kfree_skb(skb);
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(btintel_set_diag_mfg);
|
||||
|
||||
void btintel_hw_error(struct hci_dev *hdev, u8 code)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
@ -216,6 +285,64 @@ int btintel_load_ddc_config(struct hci_dev *hdev, const char *ddc_name)
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(btintel_load_ddc_config);
|
||||
|
||||
int btintel_set_event_mask(struct hci_dev *hdev, bool debug)
|
||||
{
|
||||
u8 mask[8] = { 0x87, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
|
||||
struct sk_buff *skb;
|
||||
int err;
|
||||
|
||||
if (debug)
|
||||
mask[1] |= 0x62;
|
||||
|
||||
skb = __hci_cmd_sync(hdev, 0xfc52, 8, mask, HCI_INIT_TIMEOUT);
|
||||
if (IS_ERR(skb)) {
|
||||
err = PTR_ERR(skb);
|
||||
BT_ERR("%s: Setting Intel event mask failed (%d)",
|
||||
hdev->name, err);
|
||||
return err;
|
||||
}
|
||||
kfree_skb(skb);
|
||||
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(btintel_set_event_mask);
|
||||
|
||||
int btintel_set_event_mask_mfg(struct hci_dev *hdev, bool debug)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
u8 param[2];
|
||||
int err;
|
||||
|
||||
param[0] = 0x01;
|
||||
param[1] = 0x00;
|
||||
|
||||
skb = __hci_cmd_sync(hdev, 0xfc11, 2, param, HCI_INIT_TIMEOUT);
|
||||
if (IS_ERR(skb)) {
|
||||
err = PTR_ERR(skb);
|
||||
BT_ERR("%s: Entering Intel manufacturer mode failed (%d)",
|
||||
hdev->name, err);
|
||||
return PTR_ERR(skb);
|
||||
}
|
||||
kfree_skb(skb);
|
||||
|
||||
err = btintel_set_event_mask(hdev, debug);
|
||||
|
||||
param[0] = 0x00;
|
||||
param[1] = 0x00;
|
||||
|
||||
skb = __hci_cmd_sync(hdev, 0xfc11, 2, param, HCI_INIT_TIMEOUT);
|
||||
if (IS_ERR(skb)) {
|
||||
err = PTR_ERR(skb);
|
||||
BT_ERR("%s: Leaving Intel manufacturer mode failed (%d)",
|
||||
hdev->name, err);
|
||||
return PTR_ERR(skb);
|
||||
}
|
||||
kfree_skb(skb);
|
||||
|
||||
return err;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(btintel_set_event_mask_mfg);
|
||||
|
||||
/* ------- REGMAP IBT SUPPORT ------- */
|
||||
|
||||
#define IBT_REG_MODE_8BIT 0x00
|
||||
|
@ -73,12 +73,16 @@ struct intel_secure_send_result {
|
||||
|
||||
int btintel_check_bdaddr(struct hci_dev *hdev);
|
||||
int btintel_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr);
|
||||
int btintel_set_diag(struct hci_dev *hdev, bool enable);
|
||||
int btintel_set_diag_mfg(struct hci_dev *hdev, bool enable);
|
||||
void btintel_hw_error(struct hci_dev *hdev, u8 code);
|
||||
|
||||
void btintel_version_info(struct hci_dev *hdev, struct intel_version *ver);
|
||||
int btintel_secure_send(struct hci_dev *hdev, u8 fragment_type, u32 plen,
|
||||
const void *param);
|
||||
int btintel_load_ddc_config(struct hci_dev *hdev, const char *ddc_name);
|
||||
int btintel_set_event_mask(struct hci_dev *hdev, bool debug);
|
||||
int btintel_set_event_mask_mfg(struct hci_dev *hdev, bool debug);
|
||||
|
||||
struct regmap *btintel_regmap_init(struct hci_dev *hdev, u16 opcode_read,
|
||||
u16 opcode_write);
|
||||
@ -95,6 +99,16 @@ static inline int btintel_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdadd
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline int btintel_set_diag(struct hci_dev *hdev, bool enable)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline int btintel_set_diag_mfg(struct hci_dev *hdev, bool enable)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline void btintel_hw_error(struct hci_dev *hdev, u8 code)
|
||||
{
|
||||
}
|
||||
@ -116,6 +130,16 @@ static inline int btintel_load_ddc_config(struct hci_dev *hdev,
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline int btintel_set_event_mask(struct hci_dev *hdev, bool debug)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline int btintel_set_event_mask_mfg(struct hci_dev *hdev, bool debug)
|
||||
{
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
static inline struct regmap *btintel_regmap_init(struct hci_dev *hdev,
|
||||
u16 opcode_read,
|
||||
u16 opcode_write)
|
||||
|
@ -60,6 +60,8 @@ static struct usb_driver btusb_driver;
|
||||
#define BTUSB_QCA_ROME 0x8000
|
||||
#define BTUSB_BCM_APPLE 0x10000
|
||||
#define BTUSB_REALTEK 0x20000
|
||||
#define BTUSB_BCM2045 0x40000
|
||||
#define BTUSB_IFNUM_2 0x80000
|
||||
|
||||
static const struct usb_device_id btusb_table[] = {
|
||||
/* Generic Bluetooth USB device */
|
||||
@ -73,7 +75,7 @@ static const struct usb_device_id btusb_table[] = {
|
||||
|
||||
/* Apple-specific (Broadcom) devices */
|
||||
{ USB_VENDOR_AND_INTERFACE_INFO(0x05ac, 0xff, 0x01, 0x01),
|
||||
.driver_info = BTUSB_BCM_APPLE },
|
||||
.driver_info = BTUSB_BCM_APPLE | BTUSB_IFNUM_2 },
|
||||
|
||||
/* MediaTek MT76x0E */
|
||||
{ USB_DEVICE(0x0e8d, 0x763f) },
|
||||
@ -124,6 +126,9 @@ static const struct usb_device_id btusb_table[] = {
|
||||
/* Broadcom BCM20702B0 (Dynex/Insignia) */
|
||||
{ USB_DEVICE(0x19ff, 0x0239), .driver_info = BTUSB_BCM_PATCHRAM },
|
||||
|
||||
/* Broadcom BCM43142A0 (Foxconn/Lenovo) */
|
||||
{ USB_DEVICE(0x105b, 0xe065), .driver_info = BTUSB_BCM_PATCHRAM },
|
||||
|
||||
/* Foxconn - Hon Hai */
|
||||
{ USB_VENDOR_AND_INTERFACE_INFO(0x0489, 0xff, 0x01, 0x01),
|
||||
.driver_info = BTUSB_BCM_PATCHRAM },
|
||||
@ -164,6 +169,9 @@ static const struct usb_device_id blacklist_table[] = {
|
||||
/* Broadcom BCM2033 without firmware */
|
||||
{ USB_DEVICE(0x0a5c, 0x2033), .driver_info = BTUSB_IGNORE },
|
||||
|
||||
/* Broadcom BCM2045 devices */
|
||||
{ USB_DEVICE(0x0a5c, 0x2045), .driver_info = BTUSB_BCM2045 },
|
||||
|
||||
/* Atheros 3011 with sflash firmware */
|
||||
{ USB_DEVICE(0x0489, 0xe027), .driver_info = BTUSB_IGNORE },
|
||||
{ USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE },
|
||||
@ -195,6 +203,7 @@ static const struct usb_device_id blacklist_table[] = {
|
||||
{ USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
|
||||
@ -206,6 +215,7 @@ static const struct usb_device_id blacklist_table[] = {
|
||||
{ USB_DEVICE(0x0cf3, 0x311f), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0cf3, 0x817a), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0cf3, 0x817b), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
|
||||
{ USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
|
||||
@ -341,12 +351,14 @@ static const struct usb_device_id blacklist_table[] = {
|
||||
#define BTUSB_FIRMWARE_FAILED 8
|
||||
#define BTUSB_BOOTING 9
|
||||
#define BTUSB_RESET_RESUME 10
|
||||
#define BTUSB_DIAG_RUNNING 11
|
||||
|
||||
struct btusb_data {
|
||||
struct hci_dev *hdev;
|
||||
struct usb_device *udev;
|
||||
struct usb_interface *intf;
|
||||
struct usb_interface *isoc;
|
||||
struct usb_interface *diag;
|
||||
|
||||
unsigned long flags;
|
||||
|
||||
@ -361,6 +373,7 @@ struct btusb_data {
|
||||
struct usb_anchor intr_anchor;
|
||||
struct usb_anchor bulk_anchor;
|
||||
struct usb_anchor isoc_anchor;
|
||||
struct usb_anchor diag_anchor;
|
||||
spinlock_t rxlock;
|
||||
|
||||
struct sk_buff *evt_skb;
|
||||
@ -372,6 +385,8 @@ struct btusb_data {
|
||||
struct usb_endpoint_descriptor *bulk_rx_ep;
|
||||
struct usb_endpoint_descriptor *isoc_tx_ep;
|
||||
struct usb_endpoint_descriptor *isoc_rx_ep;
|
||||
struct usb_endpoint_descriptor *diag_tx_ep;
|
||||
struct usb_endpoint_descriptor *diag_rx_ep;
|
||||
|
||||
__u8 cmdreq_type;
|
||||
__u8 cmdreq;
|
||||
@ -869,6 +884,92 @@ static int btusb_submit_isoc_urb(struct hci_dev *hdev, gfp_t mem_flags)
|
||||
return err;
|
||||
}
|
||||
|
||||
static void btusb_diag_complete(struct urb *urb)
|
||||
{
|
||||
struct hci_dev *hdev = urb->context;
|
||||
struct btusb_data *data = hci_get_drvdata(hdev);
|
||||
int err;
|
||||
|
||||
BT_DBG("%s urb %p status %d count %d", hdev->name, urb, urb->status,
|
||||
urb->actual_length);
|
||||
|
||||
if (urb->status == 0) {
|
||||
struct sk_buff *skb;
|
||||
|
||||
skb = bt_skb_alloc(urb->actual_length, GFP_ATOMIC);
|
||||
if (skb) {
|
||||
memcpy(skb_put(skb, urb->actual_length),
|
||||
urb->transfer_buffer, urb->actual_length);
|
||||
hci_recv_diag(hdev, skb);
|
||||
}
|
||||
} else if (urb->status == -ENOENT) {
|
||||
/* Avoid suspend failed when usb_kill_urb */
|
||||
return;
|
||||
}
|
||||
|
||||
if (!test_bit(BTUSB_DIAG_RUNNING, &data->flags))
|
||||
return;
|
||||
|
||||
usb_anchor_urb(urb, &data->diag_anchor);
|
||||
usb_mark_last_busy(data->udev);
|
||||
|
||||
err = usb_submit_urb(urb, GFP_ATOMIC);
|
||||
if (err < 0) {
|
||||
/* -EPERM: urb is being killed;
|
||||
* -ENODEV: device got disconnected */
|
||||
if (err != -EPERM && err != -ENODEV)
|
||||
BT_ERR("%s urb %p failed to resubmit (%d)",
|
||||
hdev->name, urb, -err);
|
||||
usb_unanchor_urb(urb);
|
||||
}
|
||||
}
|
||||
|
||||
static int btusb_submit_diag_urb(struct hci_dev *hdev, gfp_t mem_flags)
|
||||
{
|
||||
struct btusb_data *data = hci_get_drvdata(hdev);
|
||||
struct urb *urb;
|
||||
unsigned char *buf;
|
||||
unsigned int pipe;
|
||||
int err, size = HCI_MAX_FRAME_SIZE;
|
||||
|
||||
BT_DBG("%s", hdev->name);
|
||||
|
||||
if (!data->diag_rx_ep)
|
||||
return -ENODEV;
|
||||
|
||||
urb = usb_alloc_urb(0, mem_flags);
|
||||
if (!urb)
|
||||
return -ENOMEM;
|
||||
|
||||
buf = kmalloc(size, mem_flags);
|
||||
if (!buf) {
|
||||
usb_free_urb(urb);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
pipe = usb_rcvbulkpipe(data->udev, data->diag_rx_ep->bEndpointAddress);
|
||||
|
||||
usb_fill_bulk_urb(urb, data->udev, pipe, buf, size,
|
||||
btusb_diag_complete, hdev);
|
||||
|
||||
urb->transfer_flags |= URB_FREE_BUFFER;
|
||||
|
||||
usb_mark_last_busy(data->udev);
|
||||
usb_anchor_urb(urb, &data->diag_anchor);
|
||||
|
||||
err = usb_submit_urb(urb, mem_flags);
|
||||
if (err < 0) {
|
||||
if (err != -EPERM && err != -ENODEV)
|
||||
BT_ERR("%s urb %p submission failed (%d)",
|
||||
hdev->name, urb, -err);
|
||||
usb_unanchor_urb(urb);
|
||||
}
|
||||
|
||||
usb_free_urb(urb);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void btusb_tx_complete(struct urb *urb)
|
||||
{
|
||||
struct sk_buff *skb = urb->context;
|
||||
@ -956,6 +1057,11 @@ static int btusb_open(struct hci_dev *hdev)
|
||||
set_bit(BTUSB_BULK_RUNNING, &data->flags);
|
||||
btusb_submit_bulk_urb(hdev, GFP_KERNEL);
|
||||
|
||||
if (data->diag) {
|
||||
if (!btusb_submit_diag_urb(hdev, GFP_KERNEL))
|
||||
set_bit(BTUSB_DIAG_RUNNING, &data->flags);
|
||||
}
|
||||
|
||||
done:
|
||||
usb_autopm_put_interface(data->intf);
|
||||
return 0;
|
||||
@ -971,6 +1077,7 @@ static void btusb_stop_traffic(struct btusb_data *data)
|
||||
usb_kill_anchored_urbs(&data->intr_anchor);
|
||||
usb_kill_anchored_urbs(&data->bulk_anchor);
|
||||
usb_kill_anchored_urbs(&data->isoc_anchor);
|
||||
usb_kill_anchored_urbs(&data->diag_anchor);
|
||||
}
|
||||
|
||||
static int btusb_close(struct hci_dev *hdev)
|
||||
@ -986,6 +1093,7 @@ static int btusb_close(struct hci_dev *hdev)
|
||||
clear_bit(BTUSB_ISOC_RUNNING, &data->flags);
|
||||
clear_bit(BTUSB_BULK_RUNNING, &data->flags);
|
||||
clear_bit(BTUSB_INTR_RUNNING, &data->flags);
|
||||
clear_bit(BTUSB_DIAG_RUNNING, &data->flags);
|
||||
|
||||
btusb_stop_traffic(data);
|
||||
btusb_free_frags(data);
|
||||
@ -1593,8 +1701,7 @@ static int btusb_setup_intel(struct hci_dev *hdev)
|
||||
BT_INFO("%s: Intel device is already patched. patch num: %02x",
|
||||
hdev->name, ver->fw_patch_num);
|
||||
kfree_skb(skb);
|
||||
btintel_check_bdaddr(hdev);
|
||||
return 0;
|
||||
goto complete;
|
||||
}
|
||||
|
||||
/* Opens the firmware patch file based on the firmware version read
|
||||
@ -1606,8 +1713,7 @@ static int btusb_setup_intel(struct hci_dev *hdev)
|
||||
fw = btusb_setup_intel_get_fw(hdev, ver);
|
||||
if (!fw) {
|
||||
kfree_skb(skb);
|
||||
btintel_check_bdaddr(hdev);
|
||||
return 0;
|
||||
goto complete;
|
||||
}
|
||||
fw_ptr = fw->data;
|
||||
|
||||
@ -1680,8 +1786,7 @@ static int btusb_setup_intel(struct hci_dev *hdev)
|
||||
BT_INFO("%s: Intel Bluetooth firmware patch completed and activated",
|
||||
hdev->name);
|
||||
|
||||
btintel_check_bdaddr(hdev);
|
||||
return 0;
|
||||
goto complete;
|
||||
|
||||
exit_mfg_disable:
|
||||
/* Disable the manufacturer mode without reset */
|
||||
@ -1696,8 +1801,7 @@ static int btusb_setup_intel(struct hci_dev *hdev)
|
||||
|
||||
BT_INFO("%s: Intel Bluetooth firmware patch completed", hdev->name);
|
||||
|
||||
btintel_check_bdaddr(hdev);
|
||||
return 0;
|
||||
goto complete;
|
||||
|
||||
exit_mfg_deactivate:
|
||||
release_firmware(fw);
|
||||
@ -1717,6 +1821,12 @@ static int btusb_setup_intel(struct hci_dev *hdev)
|
||||
BT_INFO("%s: Intel Bluetooth firmware patch completed and deactivated",
|
||||
hdev->name);
|
||||
|
||||
complete:
|
||||
/* Set the event mask for Intel specific vendor events. This enables
|
||||
* a few extra events that are useful during general operation.
|
||||
*/
|
||||
btintel_set_event_mask_mfg(hdev, false);
|
||||
|
||||
btintel_check_bdaddr(hdev);
|
||||
return 0;
|
||||
}
|
||||
@ -2006,6 +2116,15 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
|
||||
BT_INFO("%s: Secure boot is %s", hdev->name,
|
||||
params->secure_boot ? "enabled" : "disabled");
|
||||
|
||||
BT_INFO("%s: OTP lock is %s", hdev->name,
|
||||
params->otp_lock ? "enabled" : "disabled");
|
||||
|
||||
BT_INFO("%s: API lock is %s", hdev->name,
|
||||
params->api_lock ? "enabled" : "disabled");
|
||||
|
||||
BT_INFO("%s: Debug lock is %s", hdev->name,
|
||||
params->debug_lock ? "enabled" : "disabled");
|
||||
|
||||
BT_INFO("%s: Minimum firmware build %u week %u %u", hdev->name,
|
||||
params->min_fw_build_nn, params->min_fw_build_cw,
|
||||
2000 + params->min_fw_build_yy);
|
||||
@ -2222,6 +2341,15 @@ static int btusb_setup_intel_new(struct hci_dev *hdev)
|
||||
*/
|
||||
btintel_load_ddc_config(hdev, fwname);
|
||||
|
||||
/* Set the event mask for Intel specific vendor events. This enables
|
||||
* a few extra events that are useful during general operation. It
|
||||
* does not enable any debugging related events.
|
||||
*
|
||||
* The device will function correctly without these events enabled
|
||||
* and thus no need to fail the setup.
|
||||
*/
|
||||
btintel_set_event_mask(hdev, false);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -2547,19 +2675,115 @@ static int btusb_setup_qca(struct hci_dev *hdev)
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BT_HCIBTUSB_BCM
|
||||
static inline int __set_diag_interface(struct hci_dev *hdev)
|
||||
{
|
||||
struct btusb_data *data = hci_get_drvdata(hdev);
|
||||
struct usb_interface *intf = data->diag;
|
||||
int i;
|
||||
|
||||
if (!data->diag)
|
||||
return -ENODEV;
|
||||
|
||||
data->diag_tx_ep = NULL;
|
||||
data->diag_rx_ep = NULL;
|
||||
|
||||
for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; i++) {
|
||||
struct usb_endpoint_descriptor *ep_desc;
|
||||
|
||||
ep_desc = &intf->cur_altsetting->endpoint[i].desc;
|
||||
|
||||
if (!data->diag_tx_ep && usb_endpoint_is_bulk_out(ep_desc)) {
|
||||
data->diag_tx_ep = ep_desc;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!data->diag_rx_ep && usb_endpoint_is_bulk_in(ep_desc)) {
|
||||
data->diag_rx_ep = ep_desc;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if (!data->diag_tx_ep || !data->diag_rx_ep) {
|
||||
BT_ERR("%s invalid diagnostic descriptors", hdev->name);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct urb *alloc_diag_urb(struct hci_dev *hdev, bool enable)
|
||||
{
|
||||
struct btusb_data *data = hci_get_drvdata(hdev);
|
||||
struct sk_buff *skb;
|
||||
struct urb *urb;
|
||||
unsigned int pipe;
|
||||
|
||||
if (!data->diag_tx_ep)
|
||||
return ERR_PTR(-ENODEV);
|
||||
|
||||
urb = usb_alloc_urb(0, GFP_KERNEL);
|
||||
if (!urb)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
skb = bt_skb_alloc(2, GFP_KERNEL);
|
||||
if (!skb) {
|
||||
usb_free_urb(urb);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
*skb_put(skb, 1) = 0xf0;
|
||||
*skb_put(skb, 1) = enable;
|
||||
|
||||
pipe = usb_sndbulkpipe(data->udev, data->diag_tx_ep->bEndpointAddress);
|
||||
|
||||
usb_fill_bulk_urb(urb, data->udev, pipe,
|
||||
skb->data, skb->len, btusb_tx_complete, skb);
|
||||
|
||||
skb->dev = (void *)hdev;
|
||||
|
||||
return urb;
|
||||
}
|
||||
|
||||
static int btusb_bcm_set_diag(struct hci_dev *hdev, bool enable)
|
||||
{
|
||||
struct btusb_data *data = hci_get_drvdata(hdev);
|
||||
struct urb *urb;
|
||||
|
||||
if (!data->diag)
|
||||
return -ENODEV;
|
||||
|
||||
if (!test_bit(HCI_RUNNING, &hdev->flags))
|
||||
return -ENETDOWN;
|
||||
|
||||
urb = alloc_diag_urb(hdev, enable);
|
||||
if (IS_ERR(urb))
|
||||
return PTR_ERR(urb);
|
||||
|
||||
return submit_or_queue_tx_urb(hdev, urb);
|
||||
}
|
||||
#endif
|
||||
|
||||
static int btusb_probe(struct usb_interface *intf,
|
||||
const struct usb_device_id *id)
|
||||
{
|
||||
struct usb_endpoint_descriptor *ep_desc;
|
||||
struct btusb_data *data;
|
||||
struct hci_dev *hdev;
|
||||
unsigned ifnum_base;
|
||||
int i, err;
|
||||
|
||||
BT_DBG("intf %p id %p", intf, id);
|
||||
|
||||
/* interface numbers are hardcoded in the spec */
|
||||
if (intf->cur_altsetting->desc.bInterfaceNumber != 0)
|
||||
return -ENODEV;
|
||||
if (intf->cur_altsetting->desc.bInterfaceNumber != 0) {
|
||||
if (!(id->driver_info & BTUSB_IFNUM_2))
|
||||
return -ENODEV;
|
||||
if (intf->cur_altsetting->desc.bInterfaceNumber != 2)
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
ifnum_base = intf->cur_altsetting->desc.bInterfaceNumber;
|
||||
|
||||
if (!id->driver_info) {
|
||||
const struct usb_device_id *match;
|
||||
@ -2627,6 +2851,7 @@ static int btusb_probe(struct usb_interface *intf,
|
||||
init_usb_anchor(&data->intr_anchor);
|
||||
init_usb_anchor(&data->bulk_anchor);
|
||||
init_usb_anchor(&data->isoc_anchor);
|
||||
init_usb_anchor(&data->diag_anchor);
|
||||
spin_lock_init(&data->rxlock);
|
||||
|
||||
if (id->driver_info & BTUSB_INTEL_NEW) {
|
||||
@ -2660,33 +2885,53 @@ static int btusb_probe(struct usb_interface *intf,
|
||||
hdev->send = btusb_send_frame;
|
||||
hdev->notify = btusb_notify;
|
||||
|
||||
if (id->driver_info & BTUSB_BCM2045)
|
||||
set_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks);
|
||||
|
||||
if (id->driver_info & BTUSB_BCM92035)
|
||||
hdev->setup = btusb_setup_bcm92035;
|
||||
|
||||
#ifdef CONFIG_BT_HCIBTUSB_BCM
|
||||
if (id->driver_info & BTUSB_BCM_PATCHRAM) {
|
||||
hdev->manufacturer = 15;
|
||||
hdev->setup = btbcm_setup_patchram;
|
||||
hdev->set_diag = btusb_bcm_set_diag;
|
||||
hdev->set_bdaddr = btbcm_set_bdaddr;
|
||||
|
||||
/* Broadcom LM_DIAG Interface numbers are hardcoded */
|
||||
data->diag = usb_ifnum_to_if(data->udev, ifnum_base + 2);
|
||||
}
|
||||
|
||||
if (id->driver_info & BTUSB_BCM_APPLE)
|
||||
if (id->driver_info & BTUSB_BCM_APPLE) {
|
||||
hdev->manufacturer = 15;
|
||||
hdev->setup = btbcm_setup_apple;
|
||||
hdev->set_diag = btusb_bcm_set_diag;
|
||||
|
||||
/* Broadcom LM_DIAG Interface numbers are hardcoded */
|
||||
data->diag = usb_ifnum_to_if(data->udev, ifnum_base + 2);
|
||||
}
|
||||
#endif
|
||||
|
||||
if (id->driver_info & BTUSB_INTEL) {
|
||||
hdev->manufacturer = 2;
|
||||
hdev->setup = btusb_setup_intel;
|
||||
hdev->shutdown = btusb_shutdown_intel;
|
||||
hdev->set_diag = btintel_set_diag_mfg;
|
||||
hdev->set_bdaddr = btintel_set_bdaddr;
|
||||
set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
|
||||
set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
|
||||
set_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks);
|
||||
}
|
||||
|
||||
if (id->driver_info & BTUSB_INTEL_NEW) {
|
||||
hdev->manufacturer = 2;
|
||||
hdev->send = btusb_send_frame_intel;
|
||||
hdev->setup = btusb_setup_intel_new;
|
||||
hdev->hw_error = btintel_hw_error;
|
||||
hdev->set_diag = btintel_set_diag;
|
||||
hdev->set_bdaddr = btintel_set_bdaddr;
|
||||
set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
|
||||
set_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks);
|
||||
}
|
||||
|
||||
if (id->driver_info & BTUSB_MARVELL)
|
||||
@ -2697,8 +2942,10 @@ static int btusb_probe(struct usb_interface *intf,
|
||||
set_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks);
|
||||
}
|
||||
|
||||
if (id->driver_info & BTUSB_INTEL_BOOT)
|
||||
if (id->driver_info & BTUSB_INTEL_BOOT) {
|
||||
hdev->manufacturer = 2;
|
||||
set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);
|
||||
}
|
||||
|
||||
if (id->driver_info & BTUSB_ATH3012) {
|
||||
hdev->set_bdaddr = btusb_set_bdaddr_ath3012;
|
||||
@ -2727,8 +2974,8 @@ static int btusb_probe(struct usb_interface *intf,
|
||||
/* AMP controllers do not support SCO packets */
|
||||
data->isoc = NULL;
|
||||
} else {
|
||||
/* Interface numbers are hardcoded in the specification */
|
||||
data->isoc = usb_ifnum_to_if(data->udev, 1);
|
||||
/* Interface orders are hardcoded in the specification */
|
||||
data->isoc = usb_ifnum_to_if(data->udev, ifnum_base + 1);
|
||||
}
|
||||
|
||||
if (!reset)
|
||||
@ -2791,6 +3038,16 @@ static int btusb_probe(struct usb_interface *intf,
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef CONFIG_BT_HCIBTUSB_BCM
|
||||
if (data->diag) {
|
||||
if (!usb_driver_claim_interface(&btusb_driver,
|
||||
data->diag, data))
|
||||
__set_diag_interface(hdev);
|
||||
else
|
||||
data->diag = NULL;
|
||||
}
|
||||
#endif
|
||||
|
||||
err = hci_register_dev(hdev);
|
||||
if (err < 0) {
|
||||
hci_free_dev(hdev);
|
||||
@ -2818,12 +3075,25 @@ static void btusb_disconnect(struct usb_interface *intf)
|
||||
if (data->isoc)
|
||||
usb_set_intfdata(data->isoc, NULL);
|
||||
|
||||
if (data->diag)
|
||||
usb_set_intfdata(data->diag, NULL);
|
||||
|
||||
hci_unregister_dev(hdev);
|
||||
|
||||
if (intf == data->isoc)
|
||||
if (intf == data->intf) {
|
||||
if (data->isoc)
|
||||
usb_driver_release_interface(&btusb_driver, data->isoc);
|
||||
if (data->diag)
|
||||
usb_driver_release_interface(&btusb_driver, data->diag);
|
||||
} else if (intf == data->isoc) {
|
||||
if (data->diag)
|
||||
usb_driver_release_interface(&btusb_driver, data->diag);
|
||||
usb_driver_release_interface(&btusb_driver, data->intf);
|
||||
else if (data->isoc)
|
||||
usb_driver_release_interface(&btusb_driver, data->isoc);
|
||||
} else if (intf == data->diag) {
|
||||
usb_driver_release_interface(&btusb_driver, data->intf);
|
||||
if (data->isoc)
|
||||
usb_driver_release_interface(&btusb_driver, data->isoc);
|
||||
}
|
||||
|
||||
hci_free_dev(hdev);
|
||||
}
|
||||
|
@ -243,6 +243,7 @@ static struct sk_buff *ath_dequeue(struct hci_uart *hu)
|
||||
static const struct hci_uart_proto athp = {
|
||||
.id = HCI_UART_ATH3K,
|
||||
.name = "ATH3K",
|
||||
.manufacturer = 69,
|
||||
.open = ath_open,
|
||||
.close = ath_close,
|
||||
.flush = ath_flush,
|
||||
|
@ -259,8 +259,8 @@ static int bcm_set_diag(struct hci_dev *hdev, bool enable)
|
||||
return -ENETDOWN;
|
||||
|
||||
skb = bt_skb_alloc(3, GFP_KERNEL);
|
||||
if (IS_ERR(skb))
|
||||
return PTR_ERR(skb);
|
||||
if (!skb)
|
||||
return -ENOMEM;
|
||||
|
||||
*skb_put(skb, 1) = BCM_LM_DIAG_PKT;
|
||||
*skb_put(skb, 1) = 0xf0;
|
||||
@ -799,6 +799,7 @@ static int bcm_remove(struct platform_device *pdev)
|
||||
static const struct hci_uart_proto bcm_proto = {
|
||||
.id = HCI_UART_BCM,
|
||||
.name = "BCM",
|
||||
.manufacturer = 15,
|
||||
.init_speed = 115200,
|
||||
.oper_speed = 4000000,
|
||||
.open = bcm_open,
|
||||
|
@ -557,6 +557,7 @@ static int intel_setup(struct hci_uart *hu)
|
||||
|
||||
bt_dev_dbg(hdev, "start intel_setup");
|
||||
|
||||
hu->hdev->set_diag = btintel_set_diag;
|
||||
hu->hdev->set_bdaddr = btintel_set_bdaddr;
|
||||
|
||||
calltime = ktime_get();
|
||||
@ -1147,6 +1148,7 @@ static struct sk_buff *intel_dequeue(struct hci_uart *hu)
|
||||
static const struct hci_uart_proto intel_proto = {
|
||||
.id = HCI_UART_INTEL,
|
||||
.name = "Intel",
|
||||
.manufacturer = 2,
|
||||
.init_speed = 115200,
|
||||
.oper_speed = 3000000,
|
||||
.open = intel_open,
|
||||
|
@ -587,6 +587,13 @@ static int hci_uart_register_dev(struct hci_uart *hu)
|
||||
hdev->bus = HCI_UART;
|
||||
hci_set_drvdata(hdev, hu);
|
||||
|
||||
/* Only when vendor specific setup callback is provided, consider
|
||||
* the manufacturer information valid. This avoids filling in the
|
||||
* value for Ericsson when nothing is specified.
|
||||
*/
|
||||
if (hu->proto->setup)
|
||||
hdev->manufacturer = hu->proto->manufacturer;
|
||||
|
||||
hdev->open = hci_uart_open;
|
||||
hdev->close = hci_uart_close;
|
||||
hdev->flush = hci_uart_flush;
|
||||
|
@ -947,6 +947,7 @@ static int qca_setup(struct hci_uart *hu)
|
||||
static struct hci_uart_proto qca_proto = {
|
||||
.id = HCI_UART_QCA,
|
||||
.name = "QCA",
|
||||
.manufacturer = 29,
|
||||
.init_speed = 115200,
|
||||
.oper_speed = 3000000,
|
||||
.open = qca_open,
|
||||
|
@ -59,6 +59,7 @@ struct hci_uart;
|
||||
struct hci_uart_proto {
|
||||
unsigned int id;
|
||||
const char *name;
|
||||
unsigned int manufacturer;
|
||||
unsigned int init_speed;
|
||||
unsigned int oper_speed;
|
||||
int (*open)(struct hci_uart *hu);
|
||||
|
@ -56,85 +56,23 @@
|
||||
#include <net/ipv6.h>
|
||||
#include <net/net_namespace.h>
|
||||
|
||||
#define UIP_802154_SHORTADDR_LEN 2 /* compressed ipv6 address length */
|
||||
#define UIP_IPH_LEN 40 /* ipv6 fixed header size */
|
||||
#define UIP_PROTO_UDP 17 /* ipv6 next header value for UDP */
|
||||
#define UIP_FRAGH_LEN 8 /* ipv6 fragment header size */
|
||||
|
||||
#define EUI64_ADDR_LEN 8
|
||||
|
||||
#define LOWPAN_NHC_MAX_ID_LEN 1
|
||||
/* Maximum next header compression length which we currently support inclusive
|
||||
* possible inline data.
|
||||
*/
|
||||
#define LOWPAN_NHC_MAX_HDR_LEN (sizeof(struct udphdr))
|
||||
/* Max IPHC Header len without IPv6 hdr specific inline data.
|
||||
* Useful for getting the "extra" bytes we need at worst case compression.
|
||||
*
|
||||
* LOWPAN_IPHC + CID + LOWPAN_NHC_MAX_ID_LEN
|
||||
*/
|
||||
#define LOWPAN_IPHC_MAX_HEADER_LEN (2 + 1 + LOWPAN_NHC_MAX_ID_LEN)
|
||||
|
||||
/*
|
||||
* ipv6 address based on mac
|
||||
* second bit-flip (Universe/Local) is done according RFC2464
|
||||
*/
|
||||
#define is_addr_mac_addr_based(a, m) \
|
||||
((((a)->s6_addr[8]) == (((m)[0]) ^ 0x02)) && \
|
||||
(((a)->s6_addr[9]) == (m)[1]) && \
|
||||
(((a)->s6_addr[10]) == (m)[2]) && \
|
||||
(((a)->s6_addr[11]) == (m)[3]) && \
|
||||
(((a)->s6_addr[12]) == (m)[4]) && \
|
||||
(((a)->s6_addr[13]) == (m)[5]) && \
|
||||
(((a)->s6_addr[14]) == (m)[6]) && \
|
||||
(((a)->s6_addr[15]) == (m)[7]))
|
||||
|
||||
/*
|
||||
* check whether we can compress the IID to 16 bits,
|
||||
* it's possible for unicast adresses with first 49 bits are zero only.
|
||||
*/
|
||||
#define lowpan_is_iid_16_bit_compressable(a) \
|
||||
((((a)->s6_addr16[4]) == 0) && \
|
||||
(((a)->s6_addr[10]) == 0) && \
|
||||
(((a)->s6_addr[11]) == 0xff) && \
|
||||
(((a)->s6_addr[12]) == 0xfe) && \
|
||||
(((a)->s6_addr[13]) == 0))
|
||||
|
||||
/* check whether the 112-bit gid of the multicast address is mappable to: */
|
||||
|
||||
/* 48 bits, FFXX::00XX:XXXX:XXXX */
|
||||
#define lowpan_is_mcast_addr_compressable48(a) \
|
||||
((((a)->s6_addr16[1]) == 0) && \
|
||||
(((a)->s6_addr16[2]) == 0) && \
|
||||
(((a)->s6_addr16[3]) == 0) && \
|
||||
(((a)->s6_addr16[4]) == 0) && \
|
||||
(((a)->s6_addr[10]) == 0))
|
||||
|
||||
/* 32 bits, FFXX::00XX:XXXX */
|
||||
#define lowpan_is_mcast_addr_compressable32(a) \
|
||||
((((a)->s6_addr16[1]) == 0) && \
|
||||
(((a)->s6_addr16[2]) == 0) && \
|
||||
(((a)->s6_addr16[3]) == 0) && \
|
||||
(((a)->s6_addr16[4]) == 0) && \
|
||||
(((a)->s6_addr16[5]) == 0) && \
|
||||
(((a)->s6_addr[12]) == 0))
|
||||
|
||||
/* 8 bits, FF02::00XX */
|
||||
#define lowpan_is_mcast_addr_compressable8(a) \
|
||||
((((a)->s6_addr[1]) == 2) && \
|
||||
(((a)->s6_addr16[1]) == 0) && \
|
||||
(((a)->s6_addr16[2]) == 0) && \
|
||||
(((a)->s6_addr16[3]) == 0) && \
|
||||
(((a)->s6_addr16[4]) == 0) && \
|
||||
(((a)->s6_addr16[5]) == 0) && \
|
||||
(((a)->s6_addr16[6]) == 0) && \
|
||||
(((a)->s6_addr[14]) == 0))
|
||||
|
||||
#define lowpan_is_addr_broadcast(a) \
|
||||
((((a)[0]) == 0xFF) && \
|
||||
(((a)[1]) == 0xFF) && \
|
||||
(((a)[2]) == 0xFF) && \
|
||||
(((a)[3]) == 0xFF) && \
|
||||
(((a)[4]) == 0xFF) && \
|
||||
(((a)[5]) == 0xFF) && \
|
||||
(((a)[6]) == 0xFF) && \
|
||||
(((a)[7]) == 0xFF))
|
||||
/* Maximum worst case IPHC header buffer size */
|
||||
#define LOWPAN_IPHC_MAX_HC_BUF_LEN (sizeof(struct ipv6hdr) + \
|
||||
LOWPAN_IPHC_MAX_HEADER_LEN + \
|
||||
LOWPAN_NHC_MAX_HDR_LEN)
|
||||
|
||||
#define LOWPAN_DISPATCH_IPV6 0x41 /* 01000001 = 65 */
|
||||
#define LOWPAN_DISPATCH_IPHC 0x60 /* 011xxxxx = ... */
|
||||
@ -150,69 +88,6 @@ static inline bool lowpan_is_iphc(u8 dispatch)
|
||||
return (dispatch & LOWPAN_DISPATCH_IPHC_MASK) == LOWPAN_DISPATCH_IPHC;
|
||||
}
|
||||
|
||||
#define LOWPAN_FRAG_TIMEOUT (HZ * 60) /* time-out 60 sec */
|
||||
|
||||
#define LOWPAN_FRAG1_HEAD_SIZE 0x4
|
||||
#define LOWPAN_FRAGN_HEAD_SIZE 0x5
|
||||
|
||||
/*
|
||||
* Values of fields within the IPHC encoding first byte
|
||||
* (C stands for compressed and I for inline)
|
||||
*/
|
||||
#define LOWPAN_IPHC_TF 0x18
|
||||
|
||||
#define LOWPAN_IPHC_FL_C 0x10
|
||||
#define LOWPAN_IPHC_TC_C 0x08
|
||||
#define LOWPAN_IPHC_NH_C 0x04
|
||||
#define LOWPAN_IPHC_TTL_1 0x01
|
||||
#define LOWPAN_IPHC_TTL_64 0x02
|
||||
#define LOWPAN_IPHC_TTL_255 0x03
|
||||
#define LOWPAN_IPHC_TTL_I 0x00
|
||||
|
||||
|
||||
/* Values of fields within the IPHC encoding second byte */
|
||||
#define LOWPAN_IPHC_CID 0x80
|
||||
|
||||
#define LOWPAN_IPHC_ADDR_00 0x00
|
||||
#define LOWPAN_IPHC_ADDR_01 0x01
|
||||
#define LOWPAN_IPHC_ADDR_02 0x02
|
||||
#define LOWPAN_IPHC_ADDR_03 0x03
|
||||
|
||||
#define LOWPAN_IPHC_SAC 0x40
|
||||
#define LOWPAN_IPHC_SAM 0x30
|
||||
|
||||
#define LOWPAN_IPHC_SAM_BIT 4
|
||||
|
||||
#define LOWPAN_IPHC_M 0x08
|
||||
#define LOWPAN_IPHC_DAC 0x04
|
||||
#define LOWPAN_IPHC_DAM_00 0x00
|
||||
#define LOWPAN_IPHC_DAM_01 0x01
|
||||
#define LOWPAN_IPHC_DAM_10 0x02
|
||||
#define LOWPAN_IPHC_DAM_11 0x03
|
||||
|
||||
#define LOWPAN_IPHC_DAM_BIT 0
|
||||
/*
|
||||
* LOWPAN_UDP encoding (works together with IPHC)
|
||||
*/
|
||||
#define LOWPAN_NHC_UDP_MASK 0xF8
|
||||
#define LOWPAN_NHC_UDP_ID 0xF0
|
||||
#define LOWPAN_NHC_UDP_CHECKSUMC 0x04
|
||||
#define LOWPAN_NHC_UDP_CHECKSUMI 0x00
|
||||
|
||||
#define LOWPAN_NHC_UDP_4BIT_PORT 0xF0B0
|
||||
#define LOWPAN_NHC_UDP_4BIT_MASK 0xFFF0
|
||||
#define LOWPAN_NHC_UDP_8BIT_PORT 0xF000
|
||||
#define LOWPAN_NHC_UDP_8BIT_MASK 0xFF00
|
||||
|
||||
/* values for port compression, _with checksum_ ie bit 5 set to 0 */
|
||||
#define LOWPAN_NHC_UDP_CS_P_00 0xF0 /* all inline */
|
||||
#define LOWPAN_NHC_UDP_CS_P_01 0xF1 /* source 16bit inline,
|
||||
dest = 0xF0 + 8 bit inline */
|
||||
#define LOWPAN_NHC_UDP_CS_P_10 0xF2 /* source = 0xF0 + 8bit inline,
|
||||
dest = 16 bit inline */
|
||||
#define LOWPAN_NHC_UDP_CS_P_11 0xF3 /* source & dest = 0xF0B + 4bit inline */
|
||||
#define LOWPAN_NHC_UDP_CS_C 0x04 /* checksum elided */
|
||||
|
||||
#define LOWPAN_PRIV_SIZE(llpriv_size) \
|
||||
(sizeof(struct lowpan_priv) + llpriv_size)
|
||||
|
||||
@ -250,7 +125,7 @@ struct lowpan_802154_cb *lowpan_802154_cb(const struct sk_buff *skb)
|
||||
#ifdef DEBUG
|
||||
/* print data in line */
|
||||
static inline void raw_dump_inline(const char *caller, char *msg,
|
||||
unsigned char *buf, int len)
|
||||
const unsigned char *buf, int len)
|
||||
{
|
||||
if (msg)
|
||||
pr_debug("%s():%s: ", caller, msg);
|
||||
@ -265,7 +140,7 @@ static inline void raw_dump_inline(const char *caller, char *msg,
|
||||
* ...
|
||||
*/
|
||||
static inline void raw_dump_table(const char *caller, char *msg,
|
||||
unsigned char *buf, int len)
|
||||
const unsigned char *buf, int len)
|
||||
{
|
||||
if (msg)
|
||||
pr_debug("%s():%s:\n", caller, msg);
|
||||
@ -274,24 +149,25 @@ static inline void raw_dump_table(const char *caller, char *msg,
|
||||
}
|
||||
#else
|
||||
static inline void raw_dump_table(const char *caller, char *msg,
|
||||
unsigned char *buf, int len) { }
|
||||
const unsigned char *buf, int len) { }
|
||||
static inline void raw_dump_inline(const char *caller, char *msg,
|
||||
unsigned char *buf, int len) { }
|
||||
const unsigned char *buf, int len) { }
|
||||
#endif
|
||||
|
||||
static inline int lowpan_fetch_skb_u8(struct sk_buff *skb, u8 *val)
|
||||
{
|
||||
if (unlikely(!pskb_may_pull(skb, 1)))
|
||||
return -EINVAL;
|
||||
|
||||
*val = skb->data[0];
|
||||
skb_pull(skb, 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline bool lowpan_fetch_skb(struct sk_buff *skb,
|
||||
void *data, const unsigned int len)
|
||||
/**
|
||||
* lowpan_fetch_skb - getting inline data from 6LoWPAN header
|
||||
*
|
||||
* This function will pull data from sk buffer and put it into data to
|
||||
* remove the 6LoWPAN inline data. This function returns true if the
|
||||
* sk buffer is too small to pull the amount of data which is specified
|
||||
* by len.
|
||||
*
|
||||
* @skb: the buffer where the inline data should be pulled from.
|
||||
* @data: destination buffer for the inline data.
|
||||
* @len: amount of data which should be pulled in bytes.
|
||||
*/
|
||||
static inline bool lowpan_fetch_skb(struct sk_buff *skb, void *data,
|
||||
unsigned int len)
|
||||
{
|
||||
if (unlikely(!pskb_may_pull(skb, len)))
|
||||
return true;
|
||||
@ -311,14 +187,42 @@ static inline void lowpan_push_hc_data(u8 **hc_ptr, const void *data,
|
||||
|
||||
void lowpan_netdev_setup(struct net_device *dev, enum lowpan_lltypes lltype);
|
||||
|
||||
int
|
||||
lowpan_header_decompress(struct sk_buff *skb, struct net_device *dev,
|
||||
const u8 *saddr, const u8 saddr_type,
|
||||
const u8 saddr_len, const u8 *daddr,
|
||||
const u8 daddr_type, const u8 daddr_len,
|
||||
u8 iphc0, u8 iphc1);
|
||||
int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev,
|
||||
unsigned short type, const void *_daddr,
|
||||
const void *_saddr, unsigned int len);
|
||||
/**
|
||||
* lowpan_header_decompress - replace 6LoWPAN header with IPv6 header
|
||||
*
|
||||
* This function replaces the IPHC 6LoWPAN header which should be pointed at
|
||||
* skb->data and skb_network_header, with the IPv6 header.
|
||||
* It would be nice that the caller have the necessary headroom of IPv6 header
|
||||
* and greatest Transport layer header, this would reduce the overhead for
|
||||
* reallocate headroom.
|
||||
*
|
||||
* @skb: the buffer which should be manipulate.
|
||||
* @dev: the lowpan net device pointer.
|
||||
* @daddr: destination lladdr of mac header which is used for compression
|
||||
* methods.
|
||||
* @saddr: source lladdr of mac header which is used for compression
|
||||
* methods.
|
||||
*/
|
||||
int lowpan_header_decompress(struct sk_buff *skb, const struct net_device *dev,
|
||||
const void *daddr, const void *saddr);
|
||||
|
||||
/**
|
||||
* lowpan_header_compress - replace IPv6 header with 6LoWPAN header
|
||||
*
|
||||
* This function replaces the IPv6 header which should be pointed at
|
||||
* skb->data and skb_network_header, with the IPHC 6LoWPAN header.
|
||||
* The caller need to be sure that the sk buffer is not shared and at have
|
||||
* at least a headroom which is smaller or equal LOWPAN_IPHC_MAX_HEADER_LEN,
|
||||
* which is the IPHC "more bytes than IPv6 header" at worst case.
|
||||
*
|
||||
* @skb: the buffer which should be manipulate.
|
||||
* @dev: the lowpan net device pointer.
|
||||
* @daddr: destination lladdr of mac header which is used for compression
|
||||
* methods.
|
||||
* @saddr: source lladdr of mac header which is used for compression
|
||||
* methods.
|
||||
*/
|
||||
int lowpan_header_compress(struct sk_buff *skb, const struct net_device *dev,
|
||||
const void *daddr, const void *saddr);
|
||||
|
||||
#endif /* __6LOWPAN_H__ */
|
||||
|
@ -46,6 +46,7 @@
|
||||
#define HCI_DEV_RESUME 6
|
||||
#define HCI_DEV_OPEN 7
|
||||
#define HCI_DEV_CLOSE 8
|
||||
#define HCI_DEV_SETUP 9
|
||||
|
||||
/* HCI notify events */
|
||||
#define HCI_NOTIFY_CONN_ADD 1
|
||||
@ -170,6 +171,15 @@ enum {
|
||||
* during the hdev->setup vendor callback.
|
||||
*/
|
||||
HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
|
||||
|
||||
/* When this quirk is set, the enabling of diagnostic mode is
|
||||
* not persistent over HCI Reset. Every time the controller
|
||||
* is brought up it needs to be reprogrammed.
|
||||
*
|
||||
* This quirk can be set before hci_register_dev is called or
|
||||
* during the hdev->setup vendor callback.
|
||||
*/
|
||||
HCI_QUIRK_NON_PERSISTENT_DIAG,
|
||||
};
|
||||
|
||||
/* HCI device flags */
|
||||
|
@ -398,6 +398,7 @@ struct hci_dev {
|
||||
int (*send)(struct hci_dev *hdev, struct sk_buff *skb);
|
||||
void (*notify)(struct hci_dev *hdev, unsigned int evt);
|
||||
void (*hw_error)(struct hci_dev *hdev, u8 code);
|
||||
int (*post_init)(struct hci_dev *hdev);
|
||||
int (*set_diag)(struct hci_dev *hdev, bool enable);
|
||||
int (*set_bdaddr)(struct hci_dev *hdev, const bdaddr_t *bdaddr);
|
||||
};
|
||||
@ -470,6 +471,7 @@ struct hci_conn {
|
||||
struct delayed_work auto_accept_work;
|
||||
struct delayed_work idle_work;
|
||||
struct delayed_work le_conn_timeout;
|
||||
struct work_struct le_scan_cleanup;
|
||||
|
||||
struct device dev;
|
||||
struct dentry *debugfs;
|
||||
@ -792,6 +794,30 @@ static inline struct hci_conn *hci_conn_hash_lookup_ba(struct hci_dev *hdev,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline struct hci_conn *hci_conn_hash_lookup_le(struct hci_dev *hdev,
|
||||
bdaddr_t *ba,
|
||||
__u8 ba_type)
|
||||
{
|
||||
struct hci_conn_hash *h = &hdev->conn_hash;
|
||||
struct hci_conn *c;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
list_for_each_entry_rcu(c, &h->list, list) {
|
||||
if (c->type != LE_LINK)
|
||||
continue;
|
||||
|
||||
if (ba_type == c->dst_type && !bacmp(&c->dst, ba)) {
|
||||
rcu_read_unlock();
|
||||
return c;
|
||||
}
|
||||
}
|
||||
|
||||
rcu_read_unlock();
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline struct hci_conn *hci_conn_hash_lookup_state(struct hci_dev *hdev,
|
||||
__u8 type, __u16 state)
|
||||
{
|
||||
@ -1016,9 +1042,6 @@ void hci_conn_params_clear_disabled(struct hci_dev *hdev);
|
||||
struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
|
||||
bdaddr_t *addr,
|
||||
u8 addr_type);
|
||||
struct hci_conn_params *hci_explicit_connect_lookup(struct hci_dev *hdev,
|
||||
bdaddr_t *addr,
|
||||
u8 addr_type);
|
||||
|
||||
void hci_uuids_clear(struct hci_dev *hdev);
|
||||
|
||||
@ -1458,7 +1481,7 @@ void mgmt_remote_name(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 link_type,
|
||||
void mgmt_discovering(struct hci_dev *hdev, u8 discovering);
|
||||
bool mgmt_powering_down(struct hci_dev *hdev);
|
||||
void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent);
|
||||
void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk);
|
||||
void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent);
|
||||
void mgmt_new_csrk(struct hci_dev *hdev, struct smp_csrk *csrk,
|
||||
bool persistent);
|
||||
void mgmt_new_conn_param(struct hci_dev *hdev, bdaddr_t *bdaddr,
|
||||
|
@ -276,6 +276,16 @@ static inline void ieee802154_le64_to_be64(void *be64_dst, const void *le64_src)
|
||||
__put_unaligned_memmove64(swab64p(le64_src), be64_dst);
|
||||
}
|
||||
|
||||
/**
|
||||
* ieee802154_le16_to_be16 - copies and convert le16 to be16
|
||||
* @be16_dst: be16 destination pointer
|
||||
* @le16_src: le16 source pointer
|
||||
*/
|
||||
static inline void ieee802154_le16_to_be16(void *be16_dst, const void *le16_src)
|
||||
{
|
||||
__put_unaligned_memmove16(swab16p(le16_src), be16_dst);
|
||||
}
|
||||
|
||||
/**
|
||||
* ieee802154_alloc_hw - Allocate a new hardware device
|
||||
*
|
||||
|
@ -49,36 +49,178 @@
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/if_arp.h>
|
||||
#include <linux/netdevice.h>
|
||||
|
||||
#include <net/6lowpan.h>
|
||||
#include <net/ipv6.h>
|
||||
#include <net/af_ieee802154.h>
|
||||
|
||||
/* special link-layer handling */
|
||||
#include <net/mac802154.h>
|
||||
|
||||
#include "nhc.h"
|
||||
|
||||
/* Values of fields within the IPHC encoding first byte */
|
||||
#define LOWPAN_IPHC_TF_MASK 0x18
|
||||
#define LOWPAN_IPHC_TF_00 0x00
|
||||
#define LOWPAN_IPHC_TF_01 0x08
|
||||
#define LOWPAN_IPHC_TF_10 0x10
|
||||
#define LOWPAN_IPHC_TF_11 0x18
|
||||
|
||||
#define LOWPAN_IPHC_NH 0x04
|
||||
|
||||
#define LOWPAN_IPHC_HLIM_MASK 0x03
|
||||
#define LOWPAN_IPHC_HLIM_00 0x00
|
||||
#define LOWPAN_IPHC_HLIM_01 0x01
|
||||
#define LOWPAN_IPHC_HLIM_10 0x02
|
||||
#define LOWPAN_IPHC_HLIM_11 0x03
|
||||
|
||||
/* Values of fields within the IPHC encoding second byte */
|
||||
#define LOWPAN_IPHC_CID 0x80
|
||||
|
||||
#define LOWPAN_IPHC_SAC 0x40
|
||||
|
||||
#define LOWPAN_IPHC_SAM_MASK 0x30
|
||||
#define LOWPAN_IPHC_SAM_00 0x00
|
||||
#define LOWPAN_IPHC_SAM_01 0x10
|
||||
#define LOWPAN_IPHC_SAM_10 0x20
|
||||
#define LOWPAN_IPHC_SAM_11 0x30
|
||||
|
||||
#define LOWPAN_IPHC_M 0x08
|
||||
|
||||
#define LOWPAN_IPHC_DAC 0x04
|
||||
|
||||
#define LOWPAN_IPHC_DAM_MASK 0x03
|
||||
#define LOWPAN_IPHC_DAM_00 0x00
|
||||
#define LOWPAN_IPHC_DAM_01 0x01
|
||||
#define LOWPAN_IPHC_DAM_10 0x02
|
||||
#define LOWPAN_IPHC_DAM_11 0x03
|
||||
|
||||
/* ipv6 address based on mac
|
||||
* second bit-flip (Universe/Local) is done according RFC2464
|
||||
*/
|
||||
#define is_addr_mac_addr_based(a, m) \
|
||||
((((a)->s6_addr[8]) == (((m)[0]) ^ 0x02)) && \
|
||||
(((a)->s6_addr[9]) == (m)[1]) && \
|
||||
(((a)->s6_addr[10]) == (m)[2]) && \
|
||||
(((a)->s6_addr[11]) == (m)[3]) && \
|
||||
(((a)->s6_addr[12]) == (m)[4]) && \
|
||||
(((a)->s6_addr[13]) == (m)[5]) && \
|
||||
(((a)->s6_addr[14]) == (m)[6]) && \
|
||||
(((a)->s6_addr[15]) == (m)[7]))
|
||||
|
||||
/* check whether we can compress the IID to 16 bits,
|
||||
* it's possible for unicast addresses with first 49 bits are zero only.
|
||||
*/
|
||||
#define lowpan_is_iid_16_bit_compressable(a) \
|
||||
((((a)->s6_addr16[4]) == 0) && \
|
||||
(((a)->s6_addr[10]) == 0) && \
|
||||
(((a)->s6_addr[11]) == 0xff) && \
|
||||
(((a)->s6_addr[12]) == 0xfe) && \
|
||||
(((a)->s6_addr[13]) == 0))
|
||||
|
||||
/* check whether the 112-bit gid of the multicast address is mappable to: */
|
||||
|
||||
/* 48 bits, FFXX::00XX:XXXX:XXXX */
|
||||
#define lowpan_is_mcast_addr_compressable48(a) \
|
||||
((((a)->s6_addr16[1]) == 0) && \
|
||||
(((a)->s6_addr16[2]) == 0) && \
|
||||
(((a)->s6_addr16[3]) == 0) && \
|
||||
(((a)->s6_addr16[4]) == 0) && \
|
||||
(((a)->s6_addr[10]) == 0))
|
||||
|
||||
/* 32 bits, FFXX::00XX:XXXX */
|
||||
#define lowpan_is_mcast_addr_compressable32(a) \
|
||||
((((a)->s6_addr16[1]) == 0) && \
|
||||
(((a)->s6_addr16[2]) == 0) && \
|
||||
(((a)->s6_addr16[3]) == 0) && \
|
||||
(((a)->s6_addr16[4]) == 0) && \
|
||||
(((a)->s6_addr16[5]) == 0) && \
|
||||
(((a)->s6_addr[12]) == 0))
|
||||
|
||||
/* 8 bits, FF02::00XX */
|
||||
#define lowpan_is_mcast_addr_compressable8(a) \
|
||||
((((a)->s6_addr[1]) == 2) && \
|
||||
(((a)->s6_addr16[1]) == 0) && \
|
||||
(((a)->s6_addr16[2]) == 0) && \
|
||||
(((a)->s6_addr16[3]) == 0) && \
|
||||
(((a)->s6_addr16[4]) == 0) && \
|
||||
(((a)->s6_addr16[5]) == 0) && \
|
||||
(((a)->s6_addr16[6]) == 0) && \
|
||||
(((a)->s6_addr[14]) == 0))
|
||||
|
||||
static inline void iphc_uncompress_eui64_lladdr(struct in6_addr *ipaddr,
|
||||
const void *lladdr)
|
||||
{
|
||||
/* fe:80::XXXX:XXXX:XXXX:XXXX
|
||||
* \_________________/
|
||||
* hwaddr
|
||||
*/
|
||||
ipaddr->s6_addr[0] = 0xFE;
|
||||
ipaddr->s6_addr[1] = 0x80;
|
||||
memcpy(&ipaddr->s6_addr[8], lladdr, EUI64_ADDR_LEN);
|
||||
/* second bit-flip (Universe/Local)
|
||||
* is done according RFC2464
|
||||
*/
|
||||
ipaddr->s6_addr[8] ^= 0x02;
|
||||
}
|
||||
|
||||
static inline void iphc_uncompress_802154_lladdr(struct in6_addr *ipaddr,
|
||||
const void *lladdr)
|
||||
{
|
||||
const struct ieee802154_addr *addr = lladdr;
|
||||
u8 eui64[EUI64_ADDR_LEN] = { };
|
||||
|
||||
switch (addr->mode) {
|
||||
case IEEE802154_ADDR_LONG:
|
||||
ieee802154_le64_to_be64(eui64, &addr->extended_addr);
|
||||
iphc_uncompress_eui64_lladdr(ipaddr, eui64);
|
||||
break;
|
||||
case IEEE802154_ADDR_SHORT:
|
||||
/* fe:80::ff:fe00:XXXX
|
||||
* \__/
|
||||
* short_addr
|
||||
*
|
||||
* Universe/Local bit is zero.
|
||||
*/
|
||||
ipaddr->s6_addr[0] = 0xFE;
|
||||
ipaddr->s6_addr[1] = 0x80;
|
||||
ipaddr->s6_addr[11] = 0xFF;
|
||||
ipaddr->s6_addr[12] = 0xFE;
|
||||
ieee802154_le16_to_be16(&ipaddr->s6_addr16[7],
|
||||
&addr->short_addr);
|
||||
break;
|
||||
default:
|
||||
/* should never handled and filtered by 802154 6lowpan */
|
||||
WARN_ON_ONCE(1);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Uncompress address function for source and
|
||||
* destination address(non-multicast).
|
||||
*
|
||||
* address_mode is sam value or dam value.
|
||||
* address_mode is the masked value for sam or dam value
|
||||
*/
|
||||
static int uncompress_addr(struct sk_buff *skb,
|
||||
struct in6_addr *ipaddr, const u8 address_mode,
|
||||
const u8 *lladdr, const u8 addr_type,
|
||||
const u8 addr_len)
|
||||
static int uncompress_addr(struct sk_buff *skb, const struct net_device *dev,
|
||||
struct in6_addr *ipaddr, u8 address_mode,
|
||||
const void *lladdr)
|
||||
{
|
||||
bool fail;
|
||||
|
||||
switch (address_mode) {
|
||||
case LOWPAN_IPHC_ADDR_00:
|
||||
/* SAM and DAM are the same here */
|
||||
case LOWPAN_IPHC_DAM_00:
|
||||
/* for global link addresses */
|
||||
fail = lowpan_fetch_skb(skb, ipaddr->s6_addr, 16);
|
||||
break;
|
||||
case LOWPAN_IPHC_ADDR_01:
|
||||
case LOWPAN_IPHC_SAM_01:
|
||||
case LOWPAN_IPHC_DAM_01:
|
||||
/* fe:80::XXXX:XXXX:XXXX:XXXX */
|
||||
ipaddr->s6_addr[0] = 0xFE;
|
||||
ipaddr->s6_addr[1] = 0x80;
|
||||
fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[8], 8);
|
||||
break;
|
||||
case LOWPAN_IPHC_ADDR_02:
|
||||
case LOWPAN_IPHC_SAM_10:
|
||||
case LOWPAN_IPHC_DAM_10:
|
||||
/* fe:80::ff:fe00:XXXX */
|
||||
ipaddr->s6_addr[0] = 0xFE;
|
||||
ipaddr->s6_addr[1] = 0x80;
|
||||
@ -86,38 +228,16 @@ static int uncompress_addr(struct sk_buff *skb,
|
||||
ipaddr->s6_addr[12] = 0xFE;
|
||||
fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[14], 2);
|
||||
break;
|
||||
case LOWPAN_IPHC_ADDR_03:
|
||||
case LOWPAN_IPHC_SAM_11:
|
||||
case LOWPAN_IPHC_DAM_11:
|
||||
fail = false;
|
||||
switch (addr_type) {
|
||||
case IEEE802154_ADDR_LONG:
|
||||
/* fe:80::XXXX:XXXX:XXXX:XXXX
|
||||
* \_________________/
|
||||
* hwaddr
|
||||
*/
|
||||
ipaddr->s6_addr[0] = 0xFE;
|
||||
ipaddr->s6_addr[1] = 0x80;
|
||||
memcpy(&ipaddr->s6_addr[8], lladdr, addr_len);
|
||||
/* second bit-flip (Universe/Local)
|
||||
* is done according RFC2464
|
||||
*/
|
||||
ipaddr->s6_addr[8] ^= 0x02;
|
||||
break;
|
||||
case IEEE802154_ADDR_SHORT:
|
||||
/* fe:80::ff:fe00:XXXX
|
||||
* \__/
|
||||
* short_addr
|
||||
*
|
||||
* Universe/Local bit is zero.
|
||||
*/
|
||||
ipaddr->s6_addr[0] = 0xFE;
|
||||
ipaddr->s6_addr[1] = 0x80;
|
||||
ipaddr->s6_addr[11] = 0xFF;
|
||||
ipaddr->s6_addr[12] = 0xFE;
|
||||
ipaddr->s6_addr16[7] = htons(*((u16 *)lladdr));
|
||||
switch (lowpan_priv(dev)->lltype) {
|
||||
case LOWPAN_LLTYPE_IEEE802154:
|
||||
iphc_uncompress_802154_lladdr(ipaddr, lladdr);
|
||||
break;
|
||||
default:
|
||||
pr_debug("Invalid addr_type set\n");
|
||||
return -EINVAL;
|
||||
iphc_uncompress_eui64_lladdr(ipaddr, lladdr);
|
||||
break;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
@ -141,24 +261,25 @@ static int uncompress_addr(struct sk_buff *skb,
|
||||
*/
|
||||
static int uncompress_context_based_src_addr(struct sk_buff *skb,
|
||||
struct in6_addr *ipaddr,
|
||||
const u8 sam)
|
||||
u8 address_mode)
|
||||
{
|
||||
switch (sam) {
|
||||
case LOWPAN_IPHC_ADDR_00:
|
||||
switch (address_mode) {
|
||||
case LOWPAN_IPHC_SAM_00:
|
||||
/* unspec address ::
|
||||
* Do nothing, address is already ::
|
||||
*/
|
||||
break;
|
||||
case LOWPAN_IPHC_ADDR_01:
|
||||
case LOWPAN_IPHC_SAM_01:
|
||||
/* TODO */
|
||||
case LOWPAN_IPHC_ADDR_02:
|
||||
case LOWPAN_IPHC_SAM_10:
|
||||
/* TODO */
|
||||
case LOWPAN_IPHC_ADDR_03:
|
||||
case LOWPAN_IPHC_SAM_11:
|
||||
/* TODO */
|
||||
netdev_warn(skb->dev, "SAM value 0x%x not supported\n", sam);
|
||||
netdev_warn(skb->dev, "SAM value 0x%x not supported\n",
|
||||
address_mode);
|
||||
return -EINVAL;
|
||||
default:
|
||||
pr_debug("Invalid sam value: 0x%x\n", sam);
|
||||
pr_debug("Invalid sam value: 0x%x\n", address_mode);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -174,11 +295,11 @@ static int uncompress_context_based_src_addr(struct sk_buff *skb,
|
||||
*/
|
||||
static int lowpan_uncompress_multicast_daddr(struct sk_buff *skb,
|
||||
struct in6_addr *ipaddr,
|
||||
const u8 dam)
|
||||
u8 address_mode)
|
||||
{
|
||||
bool fail;
|
||||
|
||||
switch (dam) {
|
||||
switch (address_mode) {
|
||||
case LOWPAN_IPHC_DAM_00:
|
||||
/* 00: 128 bits. The full address
|
||||
* is carried in-line.
|
||||
@ -210,7 +331,7 @@ static int lowpan_uncompress_multicast_daddr(struct sk_buff *skb,
|
||||
fail = lowpan_fetch_skb(skb, &ipaddr->s6_addr[15], 1);
|
||||
break;
|
||||
default:
|
||||
pr_debug("DAM value has a wrong value: 0x%x\n", dam);
|
||||
pr_debug("DAM value has a wrong value: 0x%x\n", address_mode);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
@ -225,77 +346,142 @@ static int lowpan_uncompress_multicast_daddr(struct sk_buff *skb,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* TTL uncompression values */
|
||||
static const u8 lowpan_ttl_values[] = { 0, 1, 64, 255 };
|
||||
/* get the ecn values from iphc tf format and set it to ipv6hdr */
|
||||
static inline void lowpan_iphc_tf_set_ecn(struct ipv6hdr *hdr, const u8 *tf)
|
||||
{
|
||||
/* get the two higher bits which is ecn */
|
||||
u8 ecn = tf[0] & 0xc0;
|
||||
|
||||
int
|
||||
lowpan_header_decompress(struct sk_buff *skb, struct net_device *dev,
|
||||
const u8 *saddr, const u8 saddr_type,
|
||||
const u8 saddr_len, const u8 *daddr,
|
||||
const u8 daddr_type, const u8 daddr_len,
|
||||
u8 iphc0, u8 iphc1)
|
||||
/* ECN takes 0x30 in hdr->flow_lbl[0] */
|
||||
hdr->flow_lbl[0] |= (ecn >> 2);
|
||||
}
|
||||
|
||||
/* get the dscp values from iphc tf format and set it to ipv6hdr */
|
||||
static inline void lowpan_iphc_tf_set_dscp(struct ipv6hdr *hdr, const u8 *tf)
|
||||
{
|
||||
/* DSCP is at place after ECN */
|
||||
u8 dscp = tf[0] & 0x3f;
|
||||
|
||||
/* The four highest bits need to be set at hdr->priority */
|
||||
hdr->priority |= ((dscp & 0x3c) >> 2);
|
||||
/* The two lower bits is part of hdr->flow_lbl[0] */
|
||||
hdr->flow_lbl[0] |= ((dscp & 0x03) << 6);
|
||||
}
|
||||
|
||||
/* get the flow label values from iphc tf format and set it to ipv6hdr */
|
||||
static inline void lowpan_iphc_tf_set_lbl(struct ipv6hdr *hdr, const u8 *lbl)
|
||||
{
|
||||
/* flow label is always some array started with lower nibble of
|
||||
* flow_lbl[0] and followed with two bytes afterwards. Inside inline
|
||||
* data the flow_lbl position can be different, which will be handled
|
||||
* by lbl pointer. E.g. case "01" vs "00" the traffic class is 8 bit
|
||||
* shifted, the different lbl pointer will handle that.
|
||||
*
|
||||
* The flow label will started at lower nibble of flow_lbl[0], the
|
||||
* higher nibbles are part of DSCP + ECN.
|
||||
*/
|
||||
hdr->flow_lbl[0] |= lbl[0] & 0x0f;
|
||||
memcpy(&hdr->flow_lbl[1], &lbl[1], 2);
|
||||
}
|
||||
|
||||
/* lowpan_iphc_tf_decompress - decompress the traffic class.
|
||||
* This function will return zero on success, a value lower than zero if
|
||||
* failed.
|
||||
*/
|
||||
static int lowpan_iphc_tf_decompress(struct sk_buff *skb, struct ipv6hdr *hdr,
|
||||
u8 val)
|
||||
{
|
||||
u8 tf[4];
|
||||
|
||||
/* Traffic Class and Flow Label */
|
||||
switch (val) {
|
||||
case LOWPAN_IPHC_TF_00:
|
||||
/* ECN + DSCP + 4-bit Pad + Flow Label (4 bytes) */
|
||||
if (lowpan_fetch_skb(skb, tf, 4))
|
||||
return -EINVAL;
|
||||
|
||||
/* 1 2 3
|
||||
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
* |ECN| DSCP | rsv | Flow Label |
|
||||
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
*/
|
||||
lowpan_iphc_tf_set_ecn(hdr, tf);
|
||||
lowpan_iphc_tf_set_dscp(hdr, tf);
|
||||
lowpan_iphc_tf_set_lbl(hdr, &tf[1]);
|
||||
break;
|
||||
case LOWPAN_IPHC_TF_01:
|
||||
/* ECN + 2-bit Pad + Flow Label (3 bytes), DSCP is elided. */
|
||||
if (lowpan_fetch_skb(skb, tf, 3))
|
||||
return -EINVAL;
|
||||
|
||||
/* 1 2
|
||||
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3
|
||||
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
* |ECN|rsv| Flow Label |
|
||||
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
*/
|
||||
lowpan_iphc_tf_set_ecn(hdr, tf);
|
||||
lowpan_iphc_tf_set_lbl(hdr, &tf[0]);
|
||||
break;
|
||||
case LOWPAN_IPHC_TF_10:
|
||||
/* ECN + DSCP (1 byte), Flow Label is elided. */
|
||||
if (lowpan_fetch_skb(skb, tf, 1))
|
||||
return -EINVAL;
|
||||
|
||||
/* 0 1 2 3 4 5 6 7
|
||||
* +-+-+-+-+-+-+-+-+
|
||||
* |ECN| DSCP |
|
||||
* +-+-+-+-+-+-+-+-+
|
||||
*/
|
||||
lowpan_iphc_tf_set_ecn(hdr, tf);
|
||||
lowpan_iphc_tf_set_dscp(hdr, tf);
|
||||
break;
|
||||
case LOWPAN_IPHC_TF_11:
|
||||
/* Traffic Class and Flow Label are elided */
|
||||
break;
|
||||
default:
|
||||
WARN_ON_ONCE(1);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* TTL uncompression values */
|
||||
static const u8 lowpan_ttl_values[] = {
|
||||
[LOWPAN_IPHC_HLIM_01] = 1,
|
||||
[LOWPAN_IPHC_HLIM_10] = 64,
|
||||
[LOWPAN_IPHC_HLIM_11] = 255,
|
||||
};
|
||||
|
||||
int lowpan_header_decompress(struct sk_buff *skb, const struct net_device *dev,
|
||||
const void *daddr, const void *saddr)
|
||||
{
|
||||
struct ipv6hdr hdr = {};
|
||||
u8 tmp, num_context = 0;
|
||||
u8 iphc0, iphc1;
|
||||
int err;
|
||||
|
||||
raw_dump_table(__func__, "raw skb data dump uncompressed",
|
||||
skb->data, skb->len);
|
||||
|
||||
if (lowpan_fetch_skb(skb, &iphc0, sizeof(iphc0)) ||
|
||||
lowpan_fetch_skb(skb, &iphc1, sizeof(iphc1)))
|
||||
return -EINVAL;
|
||||
|
||||
/* another if the CID flag is set */
|
||||
if (iphc1 & LOWPAN_IPHC_CID) {
|
||||
pr_debug("CID flag is set, increase header with one\n");
|
||||
if (lowpan_fetch_skb(skb, &num_context, sizeof(num_context)))
|
||||
return -EINVAL;
|
||||
}
|
||||
if (iphc1 & LOWPAN_IPHC_CID)
|
||||
return -ENOTSUPP;
|
||||
|
||||
hdr.version = 6;
|
||||
|
||||
/* Traffic Class and Flow Label */
|
||||
switch ((iphc0 & LOWPAN_IPHC_TF) >> 3) {
|
||||
/* Traffic Class and FLow Label carried in-line
|
||||
* ECN + DSCP + 4-bit Pad + Flow Label (4 bytes)
|
||||
*/
|
||||
case 0: /* 00b */
|
||||
if (lowpan_fetch_skb(skb, &tmp, sizeof(tmp)))
|
||||
return -EINVAL;
|
||||
|
||||
memcpy(&hdr.flow_lbl, &skb->data[0], 3);
|
||||
skb_pull(skb, 3);
|
||||
hdr.priority = ((tmp >> 2) & 0x0f);
|
||||
hdr.flow_lbl[0] = ((tmp >> 2) & 0x30) | (tmp << 6) |
|
||||
(hdr.flow_lbl[0] & 0x0f);
|
||||
break;
|
||||
/* Traffic class carried in-line
|
||||
* ECN + DSCP (1 byte), Flow Label is elided
|
||||
*/
|
||||
case 2: /* 10b */
|
||||
if (lowpan_fetch_skb(skb, &tmp, sizeof(tmp)))
|
||||
return -EINVAL;
|
||||
|
||||
hdr.priority = ((tmp >> 2) & 0x0f);
|
||||
hdr.flow_lbl[0] = ((tmp << 6) & 0xC0) | ((tmp >> 2) & 0x30);
|
||||
break;
|
||||
/* Flow Label carried in-line
|
||||
* ECN + 2-bit Pad + Flow Label (3 bytes), DSCP is elided
|
||||
*/
|
||||
case 1: /* 01b */
|
||||
if (lowpan_fetch_skb(skb, &tmp, sizeof(tmp)))
|
||||
return -EINVAL;
|
||||
|
||||
hdr.flow_lbl[0] = (tmp & 0x0F) | ((tmp >> 2) & 0x30);
|
||||
memcpy(&hdr.flow_lbl[1], &skb->data[0], 2);
|
||||
skb_pull(skb, 2);
|
||||
break;
|
||||
/* Traffic Class and Flow Label are elided */
|
||||
case 3: /* 11b */
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
err = lowpan_iphc_tf_decompress(skb, &hdr,
|
||||
iphc0 & LOWPAN_IPHC_TF_MASK);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
/* Next Header */
|
||||
if ((iphc0 & LOWPAN_IPHC_NH_C) == 0) {
|
||||
if (!(iphc0 & LOWPAN_IPHC_NH)) {
|
||||
/* Next header is carried inline */
|
||||
if (lowpan_fetch_skb(skb, &hdr.nexthdr, sizeof(hdr.nexthdr)))
|
||||
return -EINVAL;
|
||||
@ -305,35 +491,30 @@ lowpan_header_decompress(struct sk_buff *skb, struct net_device *dev,
|
||||
}
|
||||
|
||||
/* Hop Limit */
|
||||
if ((iphc0 & 0x03) != LOWPAN_IPHC_TTL_I) {
|
||||
hdr.hop_limit = lowpan_ttl_values[iphc0 & 0x03];
|
||||
if ((iphc0 & LOWPAN_IPHC_HLIM_MASK) != LOWPAN_IPHC_HLIM_00) {
|
||||
hdr.hop_limit = lowpan_ttl_values[iphc0 & LOWPAN_IPHC_HLIM_MASK];
|
||||
} else {
|
||||
if (lowpan_fetch_skb(skb, &hdr.hop_limit,
|
||||
sizeof(hdr.hop_limit)))
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Extract SAM to the tmp variable */
|
||||
tmp = ((iphc1 & LOWPAN_IPHC_SAM) >> LOWPAN_IPHC_SAM_BIT) & 0x03;
|
||||
|
||||
if (iphc1 & LOWPAN_IPHC_SAC) {
|
||||
/* Source address context based uncompression */
|
||||
pr_debug("SAC bit is set. Handle context based source address.\n");
|
||||
err = uncompress_context_based_src_addr(skb, &hdr.saddr, tmp);
|
||||
err = uncompress_context_based_src_addr(skb, &hdr.saddr,
|
||||
iphc1 & LOWPAN_IPHC_SAM_MASK);
|
||||
} else {
|
||||
/* Source address uncompression */
|
||||
pr_debug("source address stateless compression\n");
|
||||
err = uncompress_addr(skb, &hdr.saddr, tmp, saddr,
|
||||
saddr_type, saddr_len);
|
||||
err = uncompress_addr(skb, dev, &hdr.saddr,
|
||||
iphc1 & LOWPAN_IPHC_SAM_MASK, saddr);
|
||||
}
|
||||
|
||||
/* Check on error of previous branch */
|
||||
if (err)
|
||||
return -EINVAL;
|
||||
|
||||
/* Extract DAM to the tmp variable */
|
||||
tmp = ((iphc1 & LOWPAN_IPHC_DAM_11) >> LOWPAN_IPHC_DAM_BIT) & 0x03;
|
||||
|
||||
/* check for Multicast Compression */
|
||||
if (iphc1 & LOWPAN_IPHC_M) {
|
||||
if (iphc1 & LOWPAN_IPHC_DAC) {
|
||||
@ -341,22 +522,22 @@ lowpan_header_decompress(struct sk_buff *skb, struct net_device *dev,
|
||||
/* TODO: implement this */
|
||||
} else {
|
||||
err = lowpan_uncompress_multicast_daddr(skb, &hdr.daddr,
|
||||
tmp);
|
||||
iphc1 & LOWPAN_IPHC_DAM_MASK);
|
||||
|
||||
if (err)
|
||||
return -EINVAL;
|
||||
}
|
||||
} else {
|
||||
err = uncompress_addr(skb, &hdr.daddr, tmp, daddr,
|
||||
daddr_type, daddr_len);
|
||||
err = uncompress_addr(skb, dev, &hdr.daddr,
|
||||
iphc1 & LOWPAN_IPHC_DAM_MASK, daddr);
|
||||
pr_debug("dest: stateless compression mode %d dest %pI6c\n",
|
||||
tmp, &hdr.daddr);
|
||||
iphc1 & LOWPAN_IPHC_DAM_MASK, &hdr.daddr);
|
||||
if (err)
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* Next header data uncompression */
|
||||
if (iphc0 & LOWPAN_IPHC_NH_C) {
|
||||
if (iphc0 & LOWPAN_IPHC_NH) {
|
||||
err = lowpan_nhc_do_uncompression(skb, dev, &hdr);
|
||||
if (err < 0)
|
||||
return err;
|
||||
@ -397,42 +578,176 @@ lowpan_header_decompress(struct sk_buff *skb, struct net_device *dev,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(lowpan_header_decompress);
|
||||
|
||||
static u8 lowpan_compress_addr_64(u8 **hc_ptr, u8 shift,
|
||||
const struct in6_addr *ipaddr,
|
||||
const unsigned char *lladdr)
|
||||
static const u8 lowpan_iphc_dam_to_sam_value[] = {
|
||||
[LOWPAN_IPHC_DAM_00] = LOWPAN_IPHC_SAM_00,
|
||||
[LOWPAN_IPHC_DAM_01] = LOWPAN_IPHC_SAM_01,
|
||||
[LOWPAN_IPHC_DAM_10] = LOWPAN_IPHC_SAM_10,
|
||||
[LOWPAN_IPHC_DAM_11] = LOWPAN_IPHC_SAM_11,
|
||||
};
|
||||
|
||||
static u8 lowpan_compress_addr_64(u8 **hc_ptr, const struct in6_addr *ipaddr,
|
||||
const unsigned char *lladdr, bool sam)
|
||||
{
|
||||
u8 val = 0;
|
||||
u8 dam = LOWPAN_IPHC_DAM_00;
|
||||
|
||||
if (is_addr_mac_addr_based(ipaddr, lladdr)) {
|
||||
val = 3; /* 0-bits */
|
||||
dam = LOWPAN_IPHC_DAM_11; /* 0-bits */
|
||||
pr_debug("address compression 0 bits\n");
|
||||
} else if (lowpan_is_iid_16_bit_compressable(ipaddr)) {
|
||||
/* compress IID to 16 bits xxxx::XXXX */
|
||||
lowpan_push_hc_data(hc_ptr, &ipaddr->s6_addr16[7], 2);
|
||||
val = 2; /* 16-bits */
|
||||
dam = LOWPAN_IPHC_DAM_10; /* 16-bits */
|
||||
raw_dump_inline(NULL, "Compressed ipv6 addr is (16 bits)",
|
||||
*hc_ptr - 2, 2);
|
||||
} else {
|
||||
/* do not compress IID => xxxx::IID */
|
||||
lowpan_push_hc_data(hc_ptr, &ipaddr->s6_addr16[4], 8);
|
||||
val = 1; /* 64-bits */
|
||||
dam = LOWPAN_IPHC_DAM_01; /* 64-bits */
|
||||
raw_dump_inline(NULL, "Compressed ipv6 addr is (64 bits)",
|
||||
*hc_ptr - 8, 8);
|
||||
}
|
||||
|
||||
return rol8(val, shift);
|
||||
if (sam)
|
||||
return lowpan_iphc_dam_to_sam_value[dam];
|
||||
else
|
||||
return dam;
|
||||
}
|
||||
|
||||
int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev,
|
||||
unsigned short type, const void *_daddr,
|
||||
const void *_saddr, unsigned int len)
|
||||
/* lowpan_iphc_get_tc - get the ECN + DCSP fields in hc format */
|
||||
static inline u8 lowpan_iphc_get_tc(const struct ipv6hdr *hdr)
|
||||
{
|
||||
u8 tmp, iphc0, iphc1, *hc_ptr;
|
||||
u8 dscp, ecn;
|
||||
|
||||
/* hdr->priority contains the higher bits of dscp, lower are part of
|
||||
* flow_lbl[0]. Note ECN, DCSP is swapped in ipv6 hdr.
|
||||
*/
|
||||
dscp = (hdr->priority << 2) | ((hdr->flow_lbl[0] & 0xc0) >> 6);
|
||||
/* ECN is at the two lower bits from first nibble of flow_lbl[0] */
|
||||
ecn = (hdr->flow_lbl[0] & 0x30);
|
||||
/* for pretty debug output, also shift ecn to get the ecn value */
|
||||
pr_debug("ecn 0x%02x dscp 0x%02x\n", ecn >> 4, dscp);
|
||||
/* ECN is at 0x30 now, shift it to have ECN + DCSP */
|
||||
return (ecn << 2) | dscp;
|
||||
}
|
||||
|
||||
/* lowpan_iphc_is_flow_lbl_zero - check if flow label is zero */
|
||||
static inline bool lowpan_iphc_is_flow_lbl_zero(const struct ipv6hdr *hdr)
|
||||
{
|
||||
return ((!(hdr->flow_lbl[0] & 0x0f)) &&
|
||||
!hdr->flow_lbl[1] && !hdr->flow_lbl[2]);
|
||||
}
|
||||
|
||||
/* lowpan_iphc_tf_compress - compress the traffic class which is set by
|
||||
* ipv6hdr. Return the corresponding format identifier which is used.
|
||||
*/
|
||||
static u8 lowpan_iphc_tf_compress(u8 **hc_ptr, const struct ipv6hdr *hdr)
|
||||
{
|
||||
/* get ecn dscp data in a byteformat as: ECN(hi) + DSCP(lo) */
|
||||
u8 tc = lowpan_iphc_get_tc(hdr), tf[4], val;
|
||||
|
||||
/* printout the traffic class in hc format */
|
||||
pr_debug("tc 0x%02x\n", tc);
|
||||
|
||||
if (lowpan_iphc_is_flow_lbl_zero(hdr)) {
|
||||
if (!tc) {
|
||||
/* 11: Traffic Class and Flow Label are elided. */
|
||||
val = LOWPAN_IPHC_TF_11;
|
||||
} else {
|
||||
/* 10: ECN + DSCP (1 byte), Flow Label is elided.
|
||||
*
|
||||
* 0 1 2 3 4 5 6 7
|
||||
* +-+-+-+-+-+-+-+-+
|
||||
* |ECN| DSCP |
|
||||
* +-+-+-+-+-+-+-+-+
|
||||
*/
|
||||
lowpan_push_hc_data(hc_ptr, &tc, sizeof(tc));
|
||||
val = LOWPAN_IPHC_TF_10;
|
||||
}
|
||||
} else {
|
||||
/* check if dscp is zero, it's after the first two bit */
|
||||
if (!(tc & 0x3f)) {
|
||||
/* 01: ECN + 2-bit Pad + Flow Label (3 bytes), DSCP is elided
|
||||
*
|
||||
* 1 2
|
||||
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3
|
||||
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
* |ECN|rsv| Flow Label |
|
||||
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
*/
|
||||
memcpy(&tf[0], &hdr->flow_lbl[0], 3);
|
||||
/* zero the highest 4-bits, contains DCSP + ECN */
|
||||
tf[0] &= ~0xf0;
|
||||
/* set ECN */
|
||||
tf[0] |= (tc & 0xc0);
|
||||
|
||||
lowpan_push_hc_data(hc_ptr, tf, 3);
|
||||
val = LOWPAN_IPHC_TF_01;
|
||||
} else {
|
||||
/* 00: ECN + DSCP + 4-bit Pad + Flow Label (4 bytes)
|
||||
*
|
||||
* 1 2 3
|
||||
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
|
||||
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
* |ECN| DSCP | rsv | Flow Label |
|
||||
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|
||||
*/
|
||||
memcpy(&tf[0], &tc, sizeof(tc));
|
||||
/* highest nibble of flow_lbl[0] is part of DSCP + ECN
|
||||
* which will be the 4-bit pad and will be filled with
|
||||
* zeros afterwards.
|
||||
*/
|
||||
memcpy(&tf[1], &hdr->flow_lbl[0], 3);
|
||||
/* zero the 4-bit pad, which is reserved */
|
||||
tf[1] &= ~0xf0;
|
||||
|
||||
lowpan_push_hc_data(hc_ptr, tf, 4);
|
||||
val = LOWPAN_IPHC_TF_00;
|
||||
}
|
||||
}
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
static u8 lowpan_iphc_mcast_addr_compress(u8 **hc_ptr,
|
||||
const struct in6_addr *ipaddr)
|
||||
{
|
||||
u8 val;
|
||||
|
||||
if (lowpan_is_mcast_addr_compressable8(ipaddr)) {
|
||||
pr_debug("compressed to 1 octet\n");
|
||||
/* use last byte */
|
||||
lowpan_push_hc_data(hc_ptr, &ipaddr->s6_addr[15], 1);
|
||||
val = LOWPAN_IPHC_DAM_11;
|
||||
} else if (lowpan_is_mcast_addr_compressable32(ipaddr)) {
|
||||
pr_debug("compressed to 4 octets\n");
|
||||
/* second byte + the last three */
|
||||
lowpan_push_hc_data(hc_ptr, &ipaddr->s6_addr[1], 1);
|
||||
lowpan_push_hc_data(hc_ptr, &ipaddr->s6_addr[13], 3);
|
||||
val = LOWPAN_IPHC_DAM_10;
|
||||
} else if (lowpan_is_mcast_addr_compressable48(ipaddr)) {
|
||||
pr_debug("compressed to 6 octets\n");
|
||||
/* second byte + the last five */
|
||||
lowpan_push_hc_data(hc_ptr, &ipaddr->s6_addr[1], 1);
|
||||
lowpan_push_hc_data(hc_ptr, &ipaddr->s6_addr[11], 5);
|
||||
val = LOWPAN_IPHC_DAM_01;
|
||||
} else {
|
||||
pr_debug("using full address\n");
|
||||
lowpan_push_hc_data(hc_ptr, ipaddr->s6_addr, 16);
|
||||
val = LOWPAN_IPHC_DAM_00;
|
||||
}
|
||||
|
||||
return val;
|
||||
}
|
||||
|
||||
int lowpan_header_compress(struct sk_buff *skb, const struct net_device *dev,
|
||||
const void *daddr, const void *saddr)
|
||||
{
|
||||
u8 iphc0, iphc1, *hc_ptr;
|
||||
struct ipv6hdr *hdr;
|
||||
u8 head[100] = {};
|
||||
u8 head[LOWPAN_IPHC_MAX_HC_BUF_LEN] = {};
|
||||
int ret, addr_type;
|
||||
|
||||
if (type != ETH_P_IPV6)
|
||||
if (skb->protocol != htons(ETH_P_IPV6))
|
||||
return -EINVAL;
|
||||
|
||||
hdr = ipv6_hdr(skb);
|
||||
@ -456,63 +771,26 @@ int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev,
|
||||
|
||||
/* TODO: context lookup */
|
||||
|
||||
raw_dump_inline(__func__, "saddr",
|
||||
(unsigned char *)_saddr, IEEE802154_ADDR_LEN);
|
||||
raw_dump_inline(__func__, "daddr",
|
||||
(unsigned char *)_daddr, IEEE802154_ADDR_LEN);
|
||||
raw_dump_inline(__func__, "saddr", saddr, EUI64_ADDR_LEN);
|
||||
raw_dump_inline(__func__, "daddr", daddr, EUI64_ADDR_LEN);
|
||||
|
||||
raw_dump_table(__func__, "sending raw skb network uncompressed packet",
|
||||
skb->data, skb->len);
|
||||
|
||||
/* Traffic class, flow label
|
||||
* If flow label is 0, compress it. If traffic class is 0, compress it
|
||||
* We have to process both in the same time as the offset of traffic
|
||||
* class depends on the presence of version and flow label
|
||||
*/
|
||||
|
||||
/* hc format of TC is ECN | DSCP , original one is DSCP | ECN */
|
||||
tmp = (hdr->priority << 4) | (hdr->flow_lbl[0] >> 4);
|
||||
tmp = ((tmp & 0x03) << 6) | (tmp >> 2);
|
||||
|
||||
if (((hdr->flow_lbl[0] & 0x0F) == 0) &&
|
||||
(hdr->flow_lbl[1] == 0) && (hdr->flow_lbl[2] == 0)) {
|
||||
/* flow label can be compressed */
|
||||
iphc0 |= LOWPAN_IPHC_FL_C;
|
||||
if ((hdr->priority == 0) &&
|
||||
((hdr->flow_lbl[0] & 0xF0) == 0)) {
|
||||
/* compress (elide) all */
|
||||
iphc0 |= LOWPAN_IPHC_TC_C;
|
||||
} else {
|
||||
/* compress only the flow label */
|
||||
*hc_ptr = tmp;
|
||||
hc_ptr += 1;
|
||||
}
|
||||
} else {
|
||||
/* Flow label cannot be compressed */
|
||||
if ((hdr->priority == 0) &&
|
||||
((hdr->flow_lbl[0] & 0xF0) == 0)) {
|
||||
/* compress only traffic class */
|
||||
iphc0 |= LOWPAN_IPHC_TC_C;
|
||||
*hc_ptr = (tmp & 0xc0) | (hdr->flow_lbl[0] & 0x0F);
|
||||
memcpy(hc_ptr + 1, &hdr->flow_lbl[1], 2);
|
||||
hc_ptr += 3;
|
||||
} else {
|
||||
/* compress nothing */
|
||||
memcpy(hc_ptr, hdr, 4);
|
||||
/* replace the top byte with new ECN | DSCP format */
|
||||
*hc_ptr = tmp;
|
||||
hc_ptr += 4;
|
||||
}
|
||||
}
|
||||
/* Traffic Class, Flow Label compression */
|
||||
iphc0 |= lowpan_iphc_tf_compress(&hc_ptr, hdr);
|
||||
|
||||
/* NOTE: payload length is always compressed */
|
||||
|
||||
/* Check if we provide the nhc format for nexthdr and compression
|
||||
* functionality. If not nexthdr is handled inline and not compressed.
|
||||
*/
|
||||
ret = lowpan_nhc_check_compression(skb, hdr, &hc_ptr, &iphc0);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
ret = lowpan_nhc_check_compression(skb, hdr, &hc_ptr);
|
||||
if (ret == -ENOENT)
|
||||
lowpan_push_hc_data(&hc_ptr, &hdr->nexthdr,
|
||||
sizeof(hdr->nexthdr));
|
||||
else
|
||||
iphc0 |= LOWPAN_IPHC_NH;
|
||||
|
||||
/* Hop limit
|
||||
* if 1: compress, encoding is 01
|
||||
@ -522,13 +800,13 @@ int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev,
|
||||
*/
|
||||
switch (hdr->hop_limit) {
|
||||
case 1:
|
||||
iphc0 |= LOWPAN_IPHC_TTL_1;
|
||||
iphc0 |= LOWPAN_IPHC_HLIM_01;
|
||||
break;
|
||||
case 64:
|
||||
iphc0 |= LOWPAN_IPHC_TTL_64;
|
||||
iphc0 |= LOWPAN_IPHC_HLIM_10;
|
||||
break;
|
||||
case 255:
|
||||
iphc0 |= LOWPAN_IPHC_TTL_255;
|
||||
iphc0 |= LOWPAN_IPHC_HLIM_11;
|
||||
break;
|
||||
default:
|
||||
lowpan_push_hc_data(&hc_ptr, &hdr->hop_limit,
|
||||
@ -542,9 +820,8 @@ int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev,
|
||||
iphc1 |= LOWPAN_IPHC_SAC;
|
||||
} else {
|
||||
if (addr_type & IPV6_ADDR_LINKLOCAL) {
|
||||
iphc1 |= lowpan_compress_addr_64(&hc_ptr,
|
||||
LOWPAN_IPHC_SAM_BIT,
|
||||
&hdr->saddr, _saddr);
|
||||
iphc1 |= lowpan_compress_addr_64(&hc_ptr, &hdr->saddr,
|
||||
saddr, true);
|
||||
pr_debug("source address unicast link-local %pI6c iphc1 0x%02x\n",
|
||||
&hdr->saddr, iphc1);
|
||||
} else {
|
||||
@ -558,38 +835,12 @@ int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev,
|
||||
if (addr_type & IPV6_ADDR_MULTICAST) {
|
||||
pr_debug("destination address is multicast: ");
|
||||
iphc1 |= LOWPAN_IPHC_M;
|
||||
if (lowpan_is_mcast_addr_compressable8(&hdr->daddr)) {
|
||||
pr_debug("compressed to 1 octet\n");
|
||||
iphc1 |= LOWPAN_IPHC_DAM_11;
|
||||
/* use last byte */
|
||||
lowpan_push_hc_data(&hc_ptr,
|
||||
&hdr->daddr.s6_addr[15], 1);
|
||||
} else if (lowpan_is_mcast_addr_compressable32(&hdr->daddr)) {
|
||||
pr_debug("compressed to 4 octets\n");
|
||||
iphc1 |= LOWPAN_IPHC_DAM_10;
|
||||
/* second byte + the last three */
|
||||
lowpan_push_hc_data(&hc_ptr,
|
||||
&hdr->daddr.s6_addr[1], 1);
|
||||
lowpan_push_hc_data(&hc_ptr,
|
||||
&hdr->daddr.s6_addr[13], 3);
|
||||
} else if (lowpan_is_mcast_addr_compressable48(&hdr->daddr)) {
|
||||
pr_debug("compressed to 6 octets\n");
|
||||
iphc1 |= LOWPAN_IPHC_DAM_01;
|
||||
/* second byte + the last five */
|
||||
lowpan_push_hc_data(&hc_ptr,
|
||||
&hdr->daddr.s6_addr[1], 1);
|
||||
lowpan_push_hc_data(&hc_ptr,
|
||||
&hdr->daddr.s6_addr[11], 5);
|
||||
} else {
|
||||
pr_debug("using full address\n");
|
||||
iphc1 |= LOWPAN_IPHC_DAM_00;
|
||||
lowpan_push_hc_data(&hc_ptr, hdr->daddr.s6_addr, 16);
|
||||
}
|
||||
iphc1 |= lowpan_iphc_mcast_addr_compress(&hc_ptr, &hdr->daddr);
|
||||
} else {
|
||||
if (addr_type & IPV6_ADDR_LINKLOCAL) {
|
||||
/* TODO: context lookup */
|
||||
iphc1 |= lowpan_compress_addr_64(&hc_ptr,
|
||||
LOWPAN_IPHC_DAM_BIT, &hdr->daddr, _daddr);
|
||||
iphc1 |= lowpan_compress_addr_64(&hc_ptr, &hdr->daddr,
|
||||
daddr, false);
|
||||
pr_debug("dest address unicast link-local %pI6c "
|
||||
"iphc1 0x%02x\n", &hdr->daddr, iphc1);
|
||||
} else {
|
||||
@ -599,7 +850,7 @@ int lowpan_header_compress(struct sk_buff *skb, struct net_device *dev,
|
||||
}
|
||||
|
||||
/* next header compression */
|
||||
if (iphc0 & LOWPAN_IPHC_NH_C) {
|
||||
if (iphc0 & LOWPAN_IPHC_NH) {
|
||||
ret = lowpan_nhc_do_compression(skb, hdr, &hc_ptr);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
@ -95,23 +95,20 @@ static struct lowpan_nhc *lowpan_nhc_by_nhcid(const struct sk_buff *skb)
|
||||
}
|
||||
|
||||
int lowpan_nhc_check_compression(struct sk_buff *skb,
|
||||
const struct ipv6hdr *hdr, u8 **hc_ptr,
|
||||
u8 *iphc0)
|
||||
const struct ipv6hdr *hdr, u8 **hc_ptr)
|
||||
{
|
||||
struct lowpan_nhc *nhc;
|
||||
int ret = 0;
|
||||
|
||||
spin_lock_bh(&lowpan_nhc_lock);
|
||||
|
||||
nhc = lowpan_nexthdr_nhcs[hdr->nexthdr];
|
||||
if (nhc && nhc->compress)
|
||||
*iphc0 |= LOWPAN_IPHC_NH_C;
|
||||
else
|
||||
lowpan_push_hc_data(hc_ptr, &hdr->nexthdr,
|
||||
sizeof(hdr->nexthdr));
|
||||
if (!(nhc && nhc->compress))
|
||||
ret = -ENOENT;
|
||||
|
||||
spin_unlock_bh(&lowpan_nhc_lock);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
int lowpan_nhc_do_compression(struct sk_buff *skb, const struct ipv6hdr *hdr,
|
||||
@ -157,7 +154,8 @@ int lowpan_nhc_do_compression(struct sk_buff *skb, const struct ipv6hdr *hdr,
|
||||
return ret;
|
||||
}
|
||||
|
||||
int lowpan_nhc_do_uncompression(struct sk_buff *skb, struct net_device *dev,
|
||||
int lowpan_nhc_do_uncompression(struct sk_buff *skb,
|
||||
const struct net_device *dev,
|
||||
struct ipv6hdr *hdr)
|
||||
{
|
||||
struct lowpan_nhc *nhc;
|
||||
|
@ -86,19 +86,16 @@ struct lowpan_nhc *lowpan_nhc_by_nexthdr(u8 nexthdr);
|
||||
|
||||
/**
|
||||
* lowpan_nhc_check_compression - checks if we support compression format. If
|
||||
* we support the nhc by nexthdr field, the 6LoWPAN iphc NHC bit will be
|
||||
* set. If we don't support nexthdr will be added as inline data to the
|
||||
* 6LoWPAN header.
|
||||
* we support the nhc by nexthdr field, the function will return 0. If we
|
||||
* don't support the nhc by nexthdr this function will return -ENOENT.
|
||||
*
|
||||
* @skb: skb of 6LoWPAN header to read nhc and replace header.
|
||||
* @hdr: ipv6hdr to check the nexthdr value
|
||||
* @hc_ptr: pointer for 6LoWPAN header which should increment at the end of
|
||||
* replaced header.
|
||||
* @iphc0: iphc0 pointer to set the 6LoWPAN NHC bit
|
||||
*/
|
||||
int lowpan_nhc_check_compression(struct sk_buff *skb,
|
||||
const struct ipv6hdr *hdr, u8 **hc_ptr,
|
||||
u8 *iphc0);
|
||||
const struct ipv6hdr *hdr, u8 **hc_ptr);
|
||||
|
||||
/**
|
||||
* lowpan_nhc_do_compression - calling compress callback for nhc
|
||||
@ -119,7 +116,8 @@ int lowpan_nhc_do_compression(struct sk_buff *skb, const struct ipv6hdr *hdr,
|
||||
* @dev: netdevice for print logging information.
|
||||
* @hdr: ipv6hdr for setting nexthdr value.
|
||||
*/
|
||||
int lowpan_nhc_do_uncompression(struct sk_buff *skb, struct net_device *dev,
|
||||
int lowpan_nhc_do_uncompression(struct sk_buff *skb,
|
||||
const struct net_device *dev,
|
||||
struct ipv6hdr *hdr);
|
||||
|
||||
/**
|
||||
|
@ -17,7 +17,27 @@
|
||||
|
||||
#include "nhc.h"
|
||||
|
||||
#define LOWPAN_NHC_UDP_IDLEN 1
|
||||
#define LOWPAN_NHC_UDP_MASK 0xF8
|
||||
#define LOWPAN_NHC_UDP_ID 0xF0
|
||||
#define LOWPAN_NHC_UDP_IDLEN 1
|
||||
|
||||
#define LOWPAN_NHC_UDP_4BIT_PORT 0xF0B0
|
||||
#define LOWPAN_NHC_UDP_4BIT_MASK 0xFFF0
|
||||
#define LOWPAN_NHC_UDP_8BIT_PORT 0xF000
|
||||
#define LOWPAN_NHC_UDP_8BIT_MASK 0xFF00
|
||||
|
||||
/* values for port compression, _with checksum_ ie bit 5 set to 0 */
|
||||
|
||||
/* all inline */
|
||||
#define LOWPAN_NHC_UDP_CS_P_00 0xF0
|
||||
/* source 16bit inline, dest = 0xF0 + 8 bit inline */
|
||||
#define LOWPAN_NHC_UDP_CS_P_01 0xF1
|
||||
/* source = 0xF0 + 8bit inline, dest = 16 bit inline */
|
||||
#define LOWPAN_NHC_UDP_CS_P_10 0xF2
|
||||
/* source & dest = 0xF0B + 4bit inline */
|
||||
#define LOWPAN_NHC_UDP_CS_P_11 0xF3
|
||||
/* checksum elided */
|
||||
#define LOWPAN_NHC_UDP_CS_C 0x04
|
||||
|
||||
static int udp_uncompress(struct sk_buff *skb, size_t needed)
|
||||
{
|
||||
|
@ -21,8 +21,6 @@
|
||||
#include <net/ip6_route.h>
|
||||
#include <net/addrconf.h>
|
||||
|
||||
#include <net/af_ieee802154.h> /* to get the address type */
|
||||
|
||||
#include <net/bluetooth/bluetooth.h>
|
||||
#include <net/bluetooth/hci_core.h>
|
||||
#include <net/bluetooth/l2cap.h>
|
||||
@ -272,7 +270,6 @@ static int iphc_decompress(struct sk_buff *skb, struct net_device *netdev,
|
||||
struct l2cap_chan *chan)
|
||||
{
|
||||
const u8 *saddr, *daddr;
|
||||
u8 iphc0, iphc1;
|
||||
struct lowpan_dev *dev;
|
||||
struct lowpan_peer *peer;
|
||||
|
||||
@ -287,22 +284,7 @@ static int iphc_decompress(struct sk_buff *skb, struct net_device *netdev,
|
||||
saddr = peer->eui64_addr;
|
||||
daddr = dev->netdev->dev_addr;
|
||||
|
||||
/* at least two bytes will be used for the encoding */
|
||||
if (skb->len < 2)
|
||||
return -EINVAL;
|
||||
|
||||
if (lowpan_fetch_skb_u8(skb, &iphc0))
|
||||
return -EINVAL;
|
||||
|
||||
if (lowpan_fetch_skb_u8(skb, &iphc1))
|
||||
return -EINVAL;
|
||||
|
||||
return lowpan_header_decompress(skb, netdev,
|
||||
saddr, IEEE802154_ADDR_LONG,
|
||||
EUI64_ADDR_LEN, daddr,
|
||||
IEEE802154_ADDR_LONG, EUI64_ADDR_LEN,
|
||||
iphc0, iphc1);
|
||||
|
||||
return lowpan_header_decompress(skb, netdev, daddr, saddr);
|
||||
}
|
||||
|
||||
static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
|
||||
@ -314,15 +296,17 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
|
||||
if (!netif_running(dev))
|
||||
goto drop;
|
||||
|
||||
if (dev->type != ARPHRD_6LOWPAN)
|
||||
if (dev->type != ARPHRD_6LOWPAN || !skb->len)
|
||||
goto drop;
|
||||
|
||||
skb_reset_network_header(skb);
|
||||
|
||||
skb = skb_share_check(skb, GFP_ATOMIC);
|
||||
if (!skb)
|
||||
goto drop;
|
||||
|
||||
/* check that it's our buffer */
|
||||
if (skb->data[0] == LOWPAN_DISPATCH_IPV6) {
|
||||
if (lowpan_is_ipv6(*skb_network_header(skb))) {
|
||||
/* Copy the packet so that the IPv6 header is
|
||||
* properly aligned.
|
||||
*/
|
||||
@ -334,7 +318,6 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
|
||||
local_skb->protocol = htons(ETH_P_IPV6);
|
||||
local_skb->pkt_type = PACKET_HOST;
|
||||
|
||||
skb_reset_network_header(local_skb);
|
||||
skb_set_transport_header(local_skb, sizeof(struct ipv6hdr));
|
||||
|
||||
if (give_skb_to_upper(local_skb, dev) != NET_RX_SUCCESS) {
|
||||
@ -347,38 +330,34 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
|
||||
|
||||
consume_skb(local_skb);
|
||||
consume_skb(skb);
|
||||
} else {
|
||||
switch (skb->data[0] & 0xe0) {
|
||||
case LOWPAN_DISPATCH_IPHC: /* ipv6 datagram */
|
||||
local_skb = skb_clone(skb, GFP_ATOMIC);
|
||||
if (!local_skb)
|
||||
goto drop;
|
||||
} else if (lowpan_is_iphc(*skb_network_header(skb))) {
|
||||
local_skb = skb_clone(skb, GFP_ATOMIC);
|
||||
if (!local_skb)
|
||||
goto drop;
|
||||
|
||||
ret = iphc_decompress(local_skb, dev, chan);
|
||||
if (ret < 0) {
|
||||
kfree_skb(local_skb);
|
||||
goto drop;
|
||||
}
|
||||
|
||||
local_skb->protocol = htons(ETH_P_IPV6);
|
||||
local_skb->pkt_type = PACKET_HOST;
|
||||
local_skb->dev = dev;
|
||||
|
||||
if (give_skb_to_upper(local_skb, dev)
|
||||
!= NET_RX_SUCCESS) {
|
||||
kfree_skb(local_skb);
|
||||
goto drop;
|
||||
}
|
||||
|
||||
dev->stats.rx_bytes += skb->len;
|
||||
dev->stats.rx_packets++;
|
||||
|
||||
consume_skb(local_skb);
|
||||
consume_skb(skb);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
ret = iphc_decompress(local_skb, dev, chan);
|
||||
if (ret < 0) {
|
||||
kfree_skb(local_skb);
|
||||
goto drop;
|
||||
}
|
||||
|
||||
local_skb->protocol = htons(ETH_P_IPV6);
|
||||
local_skb->pkt_type = PACKET_HOST;
|
||||
local_skb->dev = dev;
|
||||
|
||||
if (give_skb_to_upper(local_skb, dev)
|
||||
!= NET_RX_SUCCESS) {
|
||||
kfree_skb(local_skb);
|
||||
goto drop;
|
||||
}
|
||||
|
||||
dev->stats.rx_bytes += skb->len;
|
||||
dev->stats.rx_packets++;
|
||||
|
||||
consume_skb(local_skb);
|
||||
consume_skb(skb);
|
||||
} else {
|
||||
goto drop;
|
||||
}
|
||||
|
||||
return NET_RX_SUCCESS;
|
||||
@ -492,8 +471,7 @@ static int setup_header(struct sk_buff *skb, struct net_device *netdev,
|
||||
status = 1;
|
||||
}
|
||||
|
||||
lowpan_header_compress(skb, netdev, ETH_P_IPV6, daddr,
|
||||
dev->netdev->dev_addr, skb->len);
|
||||
lowpan_header_compress(skb, netdev, daddr, dev->netdev->dev_addr);
|
||||
|
||||
err = dev_hard_header(skb, netdev, ETH_P_IPV6, NULL, NULL, 0);
|
||||
if (err < 0)
|
||||
@ -1135,7 +1113,7 @@ static int get_l2cap_conn(char *buf, bdaddr_t *addr, u8 *addr_type,
|
||||
return -ENOENT;
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
hcon = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
|
||||
hcon = hci_conn_hash_lookup_le(hdev, addr, *addr_type);
|
||||
hci_dev_unlock(hdev);
|
||||
|
||||
if (!hcon)
|
||||
|
@ -33,7 +33,7 @@
|
||||
|
||||
#include "selftest.h"
|
||||
|
||||
#define VERSION "2.20"
|
||||
#define VERSION "2.21"
|
||||
|
||||
/* Bluetooth sockets */
|
||||
#define BT_MAX_PROTO 8
|
||||
|
@ -59,15 +59,11 @@ static const struct sco_param esco_param_msbc[] = {
|
||||
{ EDR_ESCO_MASK | ESCO_EV3, 0x0008, 0x02 }, /* T1 */
|
||||
};
|
||||
|
||||
static void hci_le_create_connection_cancel(struct hci_conn *conn)
|
||||
{
|
||||
hci_send_cmd(conn->hdev, HCI_OP_LE_CREATE_CONN_CANCEL, 0, NULL);
|
||||
}
|
||||
|
||||
/* This function requires the caller holds hdev->lock */
|
||||
static void hci_connect_le_scan_cleanup(struct hci_conn *conn)
|
||||
{
|
||||
struct hci_conn_params *params;
|
||||
struct hci_dev *hdev = conn->hdev;
|
||||
struct smp_irk *irk;
|
||||
bdaddr_t *bdaddr;
|
||||
u8 bdaddr_type;
|
||||
@ -76,14 +72,15 @@ static void hci_connect_le_scan_cleanup(struct hci_conn *conn)
|
||||
bdaddr_type = conn->dst_type;
|
||||
|
||||
/* Check if we need to convert to identity address */
|
||||
irk = hci_get_irk(conn->hdev, bdaddr, bdaddr_type);
|
||||
irk = hci_get_irk(hdev, bdaddr, bdaddr_type);
|
||||
if (irk) {
|
||||
bdaddr = &irk->bdaddr;
|
||||
bdaddr_type = irk->addr_type;
|
||||
}
|
||||
|
||||
params = hci_explicit_connect_lookup(conn->hdev, bdaddr, bdaddr_type);
|
||||
if (!params)
|
||||
params = hci_pend_le_action_lookup(&hdev->pend_le_conns, bdaddr,
|
||||
bdaddr_type);
|
||||
if (!params || !params->explicit_connect)
|
||||
return;
|
||||
|
||||
/* The connection attempt was doing scan for new RPA, and is
|
||||
@ -97,21 +94,21 @@ static void hci_connect_le_scan_cleanup(struct hci_conn *conn)
|
||||
|
||||
switch (params->auto_connect) {
|
||||
case HCI_AUTO_CONN_EXPLICIT:
|
||||
hci_conn_params_del(conn->hdev, bdaddr, bdaddr_type);
|
||||
hci_conn_params_del(hdev, bdaddr, bdaddr_type);
|
||||
/* return instead of break to avoid duplicate scan update */
|
||||
return;
|
||||
case HCI_AUTO_CONN_DIRECT:
|
||||
case HCI_AUTO_CONN_ALWAYS:
|
||||
list_add(¶ms->action, &conn->hdev->pend_le_conns);
|
||||
list_add(¶ms->action, &hdev->pend_le_conns);
|
||||
break;
|
||||
case HCI_AUTO_CONN_REPORT:
|
||||
list_add(¶ms->action, &conn->hdev->pend_le_reports);
|
||||
list_add(¶ms->action, &hdev->pend_le_reports);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
hci_update_background_scan(conn->hdev);
|
||||
hci_update_background_scan(hdev);
|
||||
}
|
||||
|
||||
static void hci_conn_cleanup(struct hci_conn *conn)
|
||||
@ -137,18 +134,51 @@ static void hci_conn_cleanup(struct hci_conn *conn)
|
||||
hci_conn_put(conn);
|
||||
}
|
||||
|
||||
/* This function requires the caller holds hdev->lock */
|
||||
static void le_scan_cleanup(struct work_struct *work)
|
||||
{
|
||||
struct hci_conn *conn = container_of(work, struct hci_conn,
|
||||
le_scan_cleanup);
|
||||
struct hci_dev *hdev = conn->hdev;
|
||||
struct hci_conn *c = NULL;
|
||||
|
||||
BT_DBG("%s hcon %p", hdev->name, conn);
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
|
||||
/* Check that the hci_conn is still around */
|
||||
rcu_read_lock();
|
||||
list_for_each_entry_rcu(c, &hdev->conn_hash.list, list) {
|
||||
if (c == conn)
|
||||
break;
|
||||
}
|
||||
rcu_read_unlock();
|
||||
|
||||
if (c == conn) {
|
||||
hci_connect_le_scan_cleanup(conn);
|
||||
hci_conn_cleanup(conn);
|
||||
}
|
||||
|
||||
hci_dev_unlock(hdev);
|
||||
hci_dev_put(hdev);
|
||||
hci_conn_put(conn);
|
||||
}
|
||||
|
||||
static void hci_connect_le_scan_remove(struct hci_conn *conn)
|
||||
{
|
||||
hci_connect_le_scan_cleanup(conn);
|
||||
BT_DBG("%s hcon %p", conn->hdev->name, conn);
|
||||
|
||||
/* We can't call hci_conn_del here since that would deadlock
|
||||
* with trying to call cancel_delayed_work_sync(&conn->disc_work).
|
||||
* Instead, call just hci_conn_cleanup() which contains the bare
|
||||
* minimum cleanup operations needed for a connection in this
|
||||
* state.
|
||||
/* We can't call hci_conn_del/hci_conn_cleanup here since that
|
||||
* could deadlock with another hci_conn_del() call that's holding
|
||||
* hci_dev_lock and doing cancel_delayed_work_sync(&conn->disc_work).
|
||||
* Instead, grab temporary extra references to the hci_dev and
|
||||
* hci_conn and perform the necessary cleanup in a separate work
|
||||
* callback.
|
||||
*/
|
||||
hci_conn_cleanup(conn);
|
||||
|
||||
hci_dev_hold(conn->hdev);
|
||||
hci_conn_get(conn);
|
||||
|
||||
schedule_work(&conn->le_scan_cleanup);
|
||||
}
|
||||
|
||||
static void hci_acl_create_connection(struct hci_conn *conn)
|
||||
@ -194,33 +224,8 @@ static void hci_acl_create_connection(struct hci_conn *conn)
|
||||
hci_send_cmd(hdev, HCI_OP_CREATE_CONN, sizeof(cp), &cp);
|
||||
}
|
||||
|
||||
static void hci_acl_create_connection_cancel(struct hci_conn *conn)
|
||||
{
|
||||
struct hci_cp_create_conn_cancel cp;
|
||||
|
||||
BT_DBG("hcon %p", conn);
|
||||
|
||||
if (conn->hdev->hci_ver < BLUETOOTH_VER_1_2)
|
||||
return;
|
||||
|
||||
bacpy(&cp.bdaddr, &conn->dst);
|
||||
hci_send_cmd(conn->hdev, HCI_OP_CREATE_CONN_CANCEL, sizeof(cp), &cp);
|
||||
}
|
||||
|
||||
static void hci_reject_sco(struct hci_conn *conn)
|
||||
{
|
||||
struct hci_cp_reject_sync_conn_req cp;
|
||||
|
||||
cp.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
|
||||
bacpy(&cp.bdaddr, &conn->dst);
|
||||
|
||||
hci_send_cmd(conn->hdev, HCI_OP_REJECT_SYNC_CONN_REQ, sizeof(cp), &cp);
|
||||
}
|
||||
|
||||
int hci_disconnect(struct hci_conn *conn, __u8 reason)
|
||||
{
|
||||
struct hci_cp_disconnect cp;
|
||||
|
||||
BT_DBG("hcon %p", conn);
|
||||
|
||||
/* When we are master of an established connection and it enters
|
||||
@ -228,7 +233,8 @@ int hci_disconnect(struct hci_conn *conn, __u8 reason)
|
||||
* current clock offset. Processing of the result is done
|
||||
* within the event handling and hci_clock_offset_evt function.
|
||||
*/
|
||||
if (conn->type == ACL_LINK && conn->role == HCI_ROLE_MASTER) {
|
||||
if (conn->type == ACL_LINK && conn->role == HCI_ROLE_MASTER &&
|
||||
(conn->state == BT_CONNECTED || conn->state == BT_CONFIG)) {
|
||||
struct hci_dev *hdev = conn->hdev;
|
||||
struct hci_cp_read_clock_offset clkoff_cp;
|
||||
|
||||
@ -237,25 +243,7 @@ int hci_disconnect(struct hci_conn *conn, __u8 reason)
|
||||
&clkoff_cp);
|
||||
}
|
||||
|
||||
conn->state = BT_DISCONN;
|
||||
|
||||
cp.handle = cpu_to_le16(conn->handle);
|
||||
cp.reason = reason;
|
||||
return hci_send_cmd(conn->hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp);
|
||||
}
|
||||
|
||||
static void hci_amp_disconn(struct hci_conn *conn)
|
||||
{
|
||||
struct hci_cp_disconn_phy_link cp;
|
||||
|
||||
BT_DBG("hcon %p", conn);
|
||||
|
||||
conn->state = BT_DISCONN;
|
||||
|
||||
cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
|
||||
cp.reason = hci_proto_disconn_ind(conn);
|
||||
hci_send_cmd(conn->hdev, HCI_OP_DISCONN_PHY_LINK,
|
||||
sizeof(cp), &cp);
|
||||
return hci_abort_conn(conn, reason);
|
||||
}
|
||||
|
||||
static void hci_add_sco(struct hci_conn *conn, __u16 handle)
|
||||
@ -421,35 +409,14 @@ static void hci_conn_timeout(struct work_struct *work)
|
||||
if (refcnt > 0)
|
||||
return;
|
||||
|
||||
switch (conn->state) {
|
||||
case BT_CONNECT:
|
||||
case BT_CONNECT2:
|
||||
if (conn->out) {
|
||||
if (conn->type == ACL_LINK)
|
||||
hci_acl_create_connection_cancel(conn);
|
||||
else if (conn->type == LE_LINK) {
|
||||
if (test_bit(HCI_CONN_SCANNING, &conn->flags))
|
||||
hci_connect_le_scan_remove(conn);
|
||||
else
|
||||
hci_le_create_connection_cancel(conn);
|
||||
}
|
||||
} else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
|
||||
hci_reject_sco(conn);
|
||||
}
|
||||
break;
|
||||
case BT_CONFIG:
|
||||
case BT_CONNECTED:
|
||||
if (conn->type == AMP_LINK) {
|
||||
hci_amp_disconn(conn);
|
||||
} else {
|
||||
__u8 reason = hci_proto_disconn_ind(conn);
|
||||
hci_disconnect(conn, reason);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
conn->state = BT_CLOSED;
|
||||
break;
|
||||
/* LE connections in scanning state need special handling */
|
||||
if (conn->state == BT_CONNECT && conn->type == LE_LINK &&
|
||||
test_bit(HCI_CONN_SCANNING, &conn->flags)) {
|
||||
hci_connect_le_scan_remove(conn);
|
||||
return;
|
||||
}
|
||||
|
||||
hci_abort_conn(conn, hci_proto_disconn_ind(conn));
|
||||
}
|
||||
|
||||
/* Enter sniff mode */
|
||||
@ -517,7 +484,7 @@ static void le_conn_timeout(struct work_struct *work)
|
||||
return;
|
||||
}
|
||||
|
||||
hci_le_create_connection_cancel(conn);
|
||||
hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
|
||||
}
|
||||
|
||||
struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
|
||||
@ -580,6 +547,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst,
|
||||
INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept);
|
||||
INIT_DELAYED_WORK(&conn->idle_work, hci_conn_idle);
|
||||
INIT_DELAYED_WORK(&conn->le_conn_timeout, le_conn_timeout);
|
||||
INIT_WORK(&conn->le_scan_cleanup, le_scan_cleanup);
|
||||
|
||||
atomic_set(&conn->refcnt, 0);
|
||||
|
||||
@ -835,7 +803,7 @@ struct hci_conn *hci_connect_le(struct hci_dev *hdev, bdaddr_t *dst,
|
||||
* attempt, we simply update pending_sec_level and auth_type fields
|
||||
* and return the object found.
|
||||
*/
|
||||
conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
|
||||
conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
|
||||
conn_unfinished = NULL;
|
||||
if (conn) {
|
||||
if (conn->state == BT_CONNECT &&
|
||||
@ -985,13 +953,10 @@ static bool is_connected(struct hci_dev *hdev, bdaddr_t *addr, u8 type)
|
||||
{
|
||||
struct hci_conn *conn;
|
||||
|
||||
conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, addr);
|
||||
conn = hci_conn_hash_lookup_le(hdev, addr, type);
|
||||
if (!conn)
|
||||
return false;
|
||||
|
||||
if (conn->dst_type != type)
|
||||
return false;
|
||||
|
||||
if (conn->state != BT_CONNECTED)
|
||||
return false;
|
||||
|
||||
@ -1064,7 +1029,7 @@ struct hci_conn *hci_connect_le_scan(struct hci_dev *hdev, bdaddr_t *dst,
|
||||
* attempt, we simply update pending_sec_level and auth_type fields
|
||||
* and return the object found.
|
||||
*/
|
||||
conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, dst);
|
||||
conn = hci_conn_hash_lookup_le(hdev, dst, dst_type);
|
||||
if (conn) {
|
||||
if (conn->pending_sec_level < sec_level)
|
||||
conn->pending_sec_level = sec_level;
|
||||
|
@ -162,6 +162,16 @@ static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
|
||||
if (strtobool(buf, &enable))
|
||||
return -EINVAL;
|
||||
|
||||
/* When the diagnostic flags are not persistent and the transport
|
||||
* is not active, then there is no need for the vendor callback.
|
||||
*
|
||||
* Instead just store the desired value. If needed the setting
|
||||
* will be programmed when the controller gets powered on.
|
||||
*/
|
||||
if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
|
||||
!test_bit(HCI_RUNNING, &hdev->flags))
|
||||
goto done;
|
||||
|
||||
hci_req_lock(hdev);
|
||||
err = hdev->set_diag(hdev, enable);
|
||||
hci_req_unlock(hdev);
|
||||
@ -169,6 +179,7 @@ static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf,
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
done:
|
||||
if (enable)
|
||||
hci_dev_set_flag(hdev, HCI_VENDOR_DIAG);
|
||||
else
|
||||
@ -1450,6 +1461,8 @@ static int hci_dev_do_open(struct hci_dev *hdev)
|
||||
set_bit(HCI_INIT, &hdev->flags);
|
||||
|
||||
if (hci_dev_test_flag(hdev, HCI_SETUP)) {
|
||||
hci_sock_dev_event(hdev, HCI_DEV_SETUP);
|
||||
|
||||
if (hdev->setup)
|
||||
ret = hdev->setup(hdev);
|
||||
|
||||
@ -1490,10 +1503,21 @@ static int hci_dev_do_open(struct hci_dev *hdev)
|
||||
|
||||
if (!ret) {
|
||||
if (!hci_dev_test_flag(hdev, HCI_UNCONFIGURED) &&
|
||||
!hci_dev_test_flag(hdev, HCI_USER_CHANNEL))
|
||||
!hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) {
|
||||
ret = __hci_init(hdev);
|
||||
if (!ret && hdev->post_init)
|
||||
ret = hdev->post_init(hdev);
|
||||
}
|
||||
}
|
||||
|
||||
/* If the HCI Reset command is clearing all diagnostic settings,
|
||||
* then they need to be reprogrammed after the init procedure
|
||||
* completed.
|
||||
*/
|
||||
if (test_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks) &&
|
||||
hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) && hdev->set_diag)
|
||||
ret = hdev->set_diag(hdev, true);
|
||||
|
||||
clear_bit(HCI_INIT, &hdev->flags);
|
||||
|
||||
if (!ret) {
|
||||
@ -2916,23 +2940,6 @@ struct hci_conn_params *hci_pend_le_action_lookup(struct list_head *list,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* This function requires the caller holds hdev->lock */
|
||||
struct hci_conn_params *hci_explicit_connect_lookup(struct hci_dev *hdev,
|
||||
bdaddr_t *addr,
|
||||
u8 addr_type)
|
||||
{
|
||||
struct hci_conn_params *param;
|
||||
|
||||
list_for_each_entry(param, &hdev->pend_le_conns, action) {
|
||||
if (bacmp(¶m->addr, addr) == 0 &&
|
||||
param->addr_type == addr_type &&
|
||||
param->explicit_connect)
|
||||
return param;
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* This function requires the caller holds hdev->lock */
|
||||
struct hci_conn_params *hci_conn_params_add(struct hci_dev *hdev,
|
||||
bdaddr_t *addr, u8 addr_type)
|
||||
@ -3555,14 +3562,15 @@ EXPORT_SYMBOL(hci_recv_frame);
|
||||
/* Receive diagnostic message from HCI drivers */
|
||||
int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
{
|
||||
/* Mark as diagnostic packet */
|
||||
bt_cb(skb)->pkt_type = HCI_DIAG_PKT;
|
||||
|
||||
/* Time stamp */
|
||||
__net_timestamp(skb);
|
||||
|
||||
/* Mark as diagnostic packet and send to monitor */
|
||||
bt_cb(skb)->pkt_type = HCI_DIAG_PKT;
|
||||
hci_send_to_monitor(hdev, skb);
|
||||
skb_queue_tail(&hdev->rx_q, skb);
|
||||
queue_work(hdev->workqueue, &hdev->rx_work);
|
||||
|
||||
kfree_skb(skb);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(hci_recv_diag);
|
||||
|
@ -1915,7 +1915,8 @@ static void hci_cs_le_create_conn(struct hci_dev *hdev, u8 status)
|
||||
|
||||
hci_dev_lock(hdev);
|
||||
|
||||
conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->peer_addr);
|
||||
conn = hci_conn_hash_lookup_le(hdev, &cp->peer_addr,
|
||||
cp->peer_addr_type);
|
||||
if (!conn)
|
||||
goto unlock;
|
||||
|
||||
|
@ -564,3 +564,96 @@ void hci_update_background_scan(struct hci_dev *hdev)
|
||||
if (err && err != -ENODATA)
|
||||
BT_ERR("Failed to run HCI request: err %d", err);
|
||||
}
|
||||
|
||||
void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
|
||||
u8 reason)
|
||||
{
|
||||
switch (conn->state) {
|
||||
case BT_CONNECTED:
|
||||
case BT_CONFIG:
|
||||
if (conn->type == AMP_LINK) {
|
||||
struct hci_cp_disconn_phy_link cp;
|
||||
|
||||
cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
|
||||
cp.reason = reason;
|
||||
hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
|
||||
&cp);
|
||||
} else {
|
||||
struct hci_cp_disconnect dc;
|
||||
|
||||
dc.handle = cpu_to_le16(conn->handle);
|
||||
dc.reason = reason;
|
||||
hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
|
||||
}
|
||||
|
||||
conn->state = BT_DISCONN;
|
||||
|
||||
break;
|
||||
case BT_CONNECT:
|
||||
if (conn->type == LE_LINK) {
|
||||
if (test_bit(HCI_CONN_SCANNING, &conn->flags))
|
||||
break;
|
||||
hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
|
||||
0, NULL);
|
||||
} else if (conn->type == ACL_LINK) {
|
||||
if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
|
||||
break;
|
||||
hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
|
||||
6, &conn->dst);
|
||||
}
|
||||
break;
|
||||
case BT_CONNECT2:
|
||||
if (conn->type == ACL_LINK) {
|
||||
struct hci_cp_reject_conn_req rej;
|
||||
|
||||
bacpy(&rej.bdaddr, &conn->dst);
|
||||
rej.reason = reason;
|
||||
|
||||
hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
|
||||
sizeof(rej), &rej);
|
||||
} else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
|
||||
struct hci_cp_reject_sync_conn_req rej;
|
||||
|
||||
bacpy(&rej.bdaddr, &conn->dst);
|
||||
|
||||
/* SCO rejection has its own limited set of
|
||||
* allowed error values (0x0D-0x0F) which isn't
|
||||
* compatible with most values passed to this
|
||||
* function. To be safe hard-code one of the
|
||||
* values that's suitable for SCO.
|
||||
*/
|
||||
rej.reason = HCI_ERROR_REMOTE_LOW_RESOURCES;
|
||||
|
||||
hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
|
||||
sizeof(rej), &rej);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
conn->state = BT_CLOSED;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
|
||||
{
|
||||
if (status)
|
||||
BT_DBG("Failed to abort connection: status 0x%2.2x", status);
|
||||
}
|
||||
|
||||
int hci_abort_conn(struct hci_conn *conn, u8 reason)
|
||||
{
|
||||
struct hci_request req;
|
||||
int err;
|
||||
|
||||
hci_req_init(&req, conn->hdev);
|
||||
|
||||
__hci_abort_conn(&req, conn, reason);
|
||||
|
||||
err = hci_req_run(&req, abort_conn_complete);
|
||||
if (err && err != -ENODATA) {
|
||||
BT_ERR("Failed to run HCI request: err %d", err);
|
||||
return err;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -55,3 +55,7 @@ int hci_update_random_address(struct hci_request *req, bool require_privacy,
|
||||
|
||||
void hci_update_background_scan(struct hci_dev *hdev);
|
||||
void __hci_update_background_scan(struct hci_request *req);
|
||||
|
||||
int hci_abort_conn(struct hci_conn *conn, u8 reason);
|
||||
void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
|
||||
u8 reason);
|
||||
|
@ -120,10 +120,7 @@ static bool is_filtered_packet(struct sock *sk, struct sk_buff *skb)
|
||||
/* Apply filter */
|
||||
flt = &hci_pi(sk)->filter;
|
||||
|
||||
if (bt_cb(skb)->pkt_type == HCI_VENDOR_PKT)
|
||||
flt_type = 0;
|
||||
else
|
||||
flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
|
||||
flt_type = bt_cb(skb)->pkt_type & HCI_FLT_TYPE_BITS;
|
||||
|
||||
if (!test_bit(flt_type, &flt->type_mask))
|
||||
return true;
|
||||
@ -173,6 +170,11 @@ void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb)
|
||||
continue;
|
||||
|
||||
if (hci_pi(sk)->channel == HCI_CHANNEL_RAW) {
|
||||
if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
|
||||
bt_cb(skb)->pkt_type != HCI_EVENT_PKT &&
|
||||
bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
|
||||
bt_cb(skb)->pkt_type != HCI_SCODATA_PKT)
|
||||
continue;
|
||||
if (is_filtered_packet(sk, skb))
|
||||
continue;
|
||||
} else if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
|
||||
@ -333,6 +335,12 @@ static struct sk_buff *create_monitor_event(struct hci_dev *hdev, int event)
|
||||
opcode = cpu_to_le16(HCI_MON_DEL_INDEX);
|
||||
break;
|
||||
|
||||
case HCI_DEV_SETUP:
|
||||
if (hdev->manufacturer == 0xffff)
|
||||
return NULL;
|
||||
|
||||
/* fall through */
|
||||
|
||||
case HCI_DEV_UP:
|
||||
skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC);
|
||||
if (!skb)
|
||||
@ -401,15 +409,17 @@ static void send_monitor_replay(struct sock *sk)
|
||||
if (sock_queue_rcv_skb(sk, skb))
|
||||
kfree_skb(skb);
|
||||
|
||||
if (!test_bit(HCI_UP, &hdev->flags))
|
||||
continue;
|
||||
if (test_bit(HCI_UP, &hdev->flags))
|
||||
skb = create_monitor_event(hdev, HCI_DEV_UP);
|
||||
else if (hci_dev_test_flag(hdev, HCI_SETUP))
|
||||
skb = create_monitor_event(hdev, HCI_DEV_SETUP);
|
||||
else
|
||||
skb = NULL;
|
||||
|
||||
skb = create_monitor_event(hdev, HCI_DEV_UP);
|
||||
if (!skb)
|
||||
continue;
|
||||
|
||||
if (sock_queue_rcv_skb(sk, skb))
|
||||
kfree_skb(skb);
|
||||
if (skb) {
|
||||
if (sock_queue_rcv_skb(sk, skb))
|
||||
kfree_skb(skb);
|
||||
}
|
||||
}
|
||||
|
||||
read_unlock(&hci_dev_list_lock);
|
||||
@ -1250,6 +1260,12 @@ static int hci_sock_sendmsg(struct socket *sock, struct msghdr *msg,
|
||||
goto drop;
|
||||
}
|
||||
|
||||
if (bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
|
||||
bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
|
||||
err = -EINVAL;
|
||||
goto drop;
|
||||
}
|
||||
|
||||
skb_queue_tail(&hdev->raw_q, skb);
|
||||
queue_work(hdev->workqueue, &hdev->tx_work);
|
||||
}
|
||||
|
@ -401,6 +401,20 @@ static void hidp_idle_timeout(unsigned long arg)
|
||||
{
|
||||
struct hidp_session *session = (struct hidp_session *) arg;
|
||||
|
||||
/* The HIDP user-space API only contains calls to add and remove
|
||||
* devices. There is no way to forward events of any kind. Therefore,
|
||||
* we have to forcefully disconnect a device on idle-timeouts. This is
|
||||
* unfortunate and weird API design, but it is spec-compliant and
|
||||
* required for backwards-compatibility. Hence, on idle-timeout, we
|
||||
* signal driver-detach events, so poll() will be woken up with an
|
||||
* error-condition on both sockets.
|
||||
*/
|
||||
|
||||
session->intr_sock->sk->sk_err = EUNATCH;
|
||||
session->ctrl_sock->sk->sk_err = EUNATCH;
|
||||
wake_up_interruptible(sk_sleep(session->intr_sock->sk));
|
||||
wake_up_interruptible(sk_sleep(session->ctrl_sock->sk));
|
||||
|
||||
hidp_session_terminate(session);
|
||||
}
|
||||
|
||||
|
@ -1111,53 +1111,76 @@ static int l2cap_sock_shutdown(struct socket *sock, int how)
|
||||
if (!sk)
|
||||
return 0;
|
||||
|
||||
lock_sock(sk);
|
||||
|
||||
if (sk->sk_shutdown)
|
||||
goto shutdown_already;
|
||||
|
||||
BT_DBG("Handling sock shutdown");
|
||||
|
||||
/* prevent sk structure from being freed whilst unlocked */
|
||||
sock_hold(sk);
|
||||
|
||||
chan = l2cap_pi(sk)->chan;
|
||||
/* prevent chan structure from being freed whilst unlocked */
|
||||
l2cap_chan_hold(chan);
|
||||
conn = chan->conn;
|
||||
|
||||
BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
|
||||
|
||||
if (chan->mode == L2CAP_MODE_ERTM &&
|
||||
chan->unacked_frames > 0 &&
|
||||
chan->state == BT_CONNECTED) {
|
||||
err = __l2cap_wait_ack(sk, chan);
|
||||
|
||||
/* After waiting for ACKs, check whether shutdown
|
||||
* has already been actioned to close the L2CAP
|
||||
* link such as by l2cap_disconnection_req().
|
||||
*/
|
||||
if (sk->sk_shutdown)
|
||||
goto has_shutdown;
|
||||
}
|
||||
|
||||
sk->sk_shutdown = SHUTDOWN_MASK;
|
||||
release_sock(sk);
|
||||
|
||||
l2cap_chan_lock(chan);
|
||||
conn = chan->conn;
|
||||
if (conn)
|
||||
/* prevent conn structure from being freed */
|
||||
l2cap_conn_get(conn);
|
||||
l2cap_chan_unlock(chan);
|
||||
|
||||
if (conn)
|
||||
/* mutex lock must be taken before l2cap_chan_lock() */
|
||||
mutex_lock(&conn->chan_lock);
|
||||
|
||||
l2cap_chan_lock(chan);
|
||||
lock_sock(sk);
|
||||
l2cap_chan_close(chan, 0);
|
||||
l2cap_chan_unlock(chan);
|
||||
|
||||
if (!sk->sk_shutdown) {
|
||||
if (chan->mode == L2CAP_MODE_ERTM &&
|
||||
chan->unacked_frames > 0 &&
|
||||
chan->state == BT_CONNECTED)
|
||||
err = __l2cap_wait_ack(sk, chan);
|
||||
|
||||
sk->sk_shutdown = SHUTDOWN_MASK;
|
||||
|
||||
release_sock(sk);
|
||||
l2cap_chan_close(chan, 0);
|
||||
lock_sock(sk);
|
||||
|
||||
if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
|
||||
!(current->flags & PF_EXITING))
|
||||
err = bt_sock_wait_state(sk, BT_CLOSED,
|
||||
sk->sk_lingertime);
|
||||
if (conn) {
|
||||
mutex_unlock(&conn->chan_lock);
|
||||
l2cap_conn_put(conn);
|
||||
}
|
||||
|
||||
lock_sock(sk);
|
||||
|
||||
if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime &&
|
||||
!(current->flags & PF_EXITING))
|
||||
err = bt_sock_wait_state(sk, BT_CLOSED,
|
||||
sk->sk_lingertime);
|
||||
|
||||
has_shutdown:
|
||||
l2cap_chan_put(chan);
|
||||
sock_put(sk);
|
||||
|
||||
shutdown_already:
|
||||
if (!err && sk->sk_err)
|
||||
err = -sk->sk_err;
|
||||
|
||||
release_sock(sk);
|
||||
l2cap_chan_unlock(chan);
|
||||
|
||||
if (conn)
|
||||
mutex_unlock(&conn->chan_lock);
|
||||
|
||||
l2cap_chan_put(chan);
|
||||
sock_put(sk);
|
||||
|
||||
BT_DBG("err: %d", err);
|
||||
BT_DBG("Sock shutdown complete err: %d", err);
|
||||
|
||||
return err;
|
||||
}
|
||||
|
@ -268,6 +268,14 @@ static int mgmt_event(u16 event, struct hci_dev *hdev, void *data, u16 len,
|
||||
HCI_SOCK_TRUSTED, skip_sk);
|
||||
}
|
||||
|
||||
static u8 le_addr_type(u8 mgmt_addr_type)
|
||||
{
|
||||
if (mgmt_addr_type == BDADDR_LE_PUBLIC)
|
||||
return ADDR_LE_DEV_PUBLIC;
|
||||
else
|
||||
return ADDR_LE_DEV_RANDOM;
|
||||
}
|
||||
|
||||
static int read_version(struct sock *sk, struct hci_dev *hdev, void *data,
|
||||
u16 data_len)
|
||||
{
|
||||
@ -1631,35 +1639,8 @@ static int clean_up_hci_state(struct hci_dev *hdev)
|
||||
discov_stopped = hci_stop_discovery(&req);
|
||||
|
||||
list_for_each_entry(conn, &hdev->conn_hash.list, list) {
|
||||
struct hci_cp_disconnect dc;
|
||||
struct hci_cp_reject_conn_req rej;
|
||||
|
||||
switch (conn->state) {
|
||||
case BT_CONNECTED:
|
||||
case BT_CONFIG:
|
||||
dc.handle = cpu_to_le16(conn->handle);
|
||||
dc.reason = 0x15; /* Terminated due to Power Off */
|
||||
hci_req_add(&req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
|
||||
break;
|
||||
case BT_CONNECT:
|
||||
if (conn->type == LE_LINK)
|
||||
hci_req_add(&req, HCI_OP_LE_CREATE_CONN_CANCEL,
|
||||
0, NULL);
|
||||
else if (conn->type == ACL_LINK)
|
||||
hci_req_add(&req, HCI_OP_CREATE_CONN_CANCEL,
|
||||
6, &conn->dst);
|
||||
break;
|
||||
case BT_CONNECT2:
|
||||
bacpy(&rej.bdaddr, &conn->dst);
|
||||
rej.reason = 0x15; /* Terminated due to Power Off */
|
||||
if (conn->type == ACL_LINK)
|
||||
hci_req_add(&req, HCI_OP_REJECT_CONN_REQ,
|
||||
sizeof(rej), &rej);
|
||||
else if (conn->type == SCO_LINK)
|
||||
hci_req_add(&req, HCI_OP_REJECT_SYNC_CONN_REQ,
|
||||
sizeof(rej), &rej);
|
||||
break;
|
||||
}
|
||||
/* 0x15 == Terminated due to Power Off */
|
||||
__hci_abort_conn(&req, conn, 0x15);
|
||||
}
|
||||
|
||||
err = hci_req_run(&req, clean_up_hci_complete);
|
||||
@ -3044,9 +3025,10 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
|
||||
{
|
||||
struct mgmt_cp_unpair_device *cp = data;
|
||||
struct mgmt_rp_unpair_device rp;
|
||||
struct hci_cp_disconnect dc;
|
||||
struct hci_conn_params *params;
|
||||
struct mgmt_pending_cmd *cmd;
|
||||
struct hci_conn *conn;
|
||||
u8 addr_type;
|
||||
int err;
|
||||
|
||||
memset(&rp, 0, sizeof(rp));
|
||||
@ -3087,36 +3069,23 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
|
||||
conn = NULL;
|
||||
|
||||
err = hci_remove_link_key(hdev, &cp->addr.bdaddr);
|
||||
} else {
|
||||
u8 addr_type;
|
||||
|
||||
conn = hci_conn_hash_lookup_ba(hdev, LE_LINK,
|
||||
&cp->addr.bdaddr);
|
||||
if (conn) {
|
||||
/* Defer clearing up the connection parameters
|
||||
* until closing to give a chance of keeping
|
||||
* them if a repairing happens.
|
||||
*/
|
||||
set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
|
||||
|
||||
/* If disconnection is not requested, then
|
||||
* clear the connection variable so that the
|
||||
* link is not terminated.
|
||||
*/
|
||||
if (!cp->disconnect)
|
||||
conn = NULL;
|
||||
if (err < 0) {
|
||||
err = mgmt_cmd_complete(sk, hdev->id,
|
||||
MGMT_OP_UNPAIR_DEVICE,
|
||||
MGMT_STATUS_NOT_PAIRED, &rp,
|
||||
sizeof(rp));
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
if (cp->addr.type == BDADDR_LE_PUBLIC)
|
||||
addr_type = ADDR_LE_DEV_PUBLIC;
|
||||
else
|
||||
addr_type = ADDR_LE_DEV_RANDOM;
|
||||
|
||||
hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
|
||||
|
||||
err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* LE address type */
|
||||
addr_type = le_addr_type(cp->addr.type);
|
||||
|
||||
hci_remove_irk(hdev, &cp->addr.bdaddr, addr_type);
|
||||
|
||||
err = hci_remove_ltk(hdev, &cp->addr.bdaddr, addr_type);
|
||||
if (err < 0) {
|
||||
err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_UNPAIR_DEVICE,
|
||||
MGMT_STATUS_NOT_PAIRED, &rp,
|
||||
@ -3124,6 +3093,36 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr, addr_type);
|
||||
if (!conn) {
|
||||
hci_conn_params_del(hdev, &cp->addr.bdaddr, addr_type);
|
||||
goto done;
|
||||
}
|
||||
|
||||
/* Abort any ongoing SMP pairing */
|
||||
smp_cancel_pairing(conn);
|
||||
|
||||
/* Defer clearing up the connection parameters until closing to
|
||||
* give a chance of keeping them if a repairing happens.
|
||||
*/
|
||||
set_bit(HCI_CONN_PARAM_REMOVAL_PEND, &conn->flags);
|
||||
|
||||
/* Disable auto-connection parameters if present */
|
||||
params = hci_conn_params_lookup(hdev, &cp->addr.bdaddr, addr_type);
|
||||
if (params) {
|
||||
if (params->explicit_connect)
|
||||
params->auto_connect = HCI_AUTO_CONN_EXPLICIT;
|
||||
else
|
||||
params->auto_connect = HCI_AUTO_CONN_DISABLED;
|
||||
}
|
||||
|
||||
/* If disconnection is not requested, then clear the connection
|
||||
* variable so that the link is not terminated.
|
||||
*/
|
||||
if (!cp->disconnect)
|
||||
conn = NULL;
|
||||
|
||||
done:
|
||||
/* If the connection variable is set, then termination of the
|
||||
* link is requested.
|
||||
*/
|
||||
@ -3143,9 +3142,7 @@ static int unpair_device(struct sock *sk, struct hci_dev *hdev, void *data,
|
||||
|
||||
cmd->cmd_complete = addr_cmd_complete;
|
||||
|
||||
dc.handle = cpu_to_le16(conn->handle);
|
||||
dc.reason = 0x13; /* Remote User Terminated Connection */
|
||||
err = hci_send_cmd(hdev, HCI_OP_DISCONNECT, sizeof(dc), &dc);
|
||||
err = hci_abort_conn(conn, HCI_ERROR_REMOTE_USER_TERM);
|
||||
if (err < 0)
|
||||
mgmt_pending_remove(cmd);
|
||||
|
||||
@ -3193,7 +3190,8 @@ static int disconnect(struct sock *sk, struct hci_dev *hdev, void *data,
|
||||
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK,
|
||||
&cp->addr.bdaddr);
|
||||
else
|
||||
conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &cp->addr.bdaddr);
|
||||
conn = hci_conn_hash_lookup_le(hdev, &cp->addr.bdaddr,
|
||||
le_addr_type(cp->addr.type));
|
||||
|
||||
if (!conn || conn->state == BT_OPEN || conn->state == BT_CLOSED) {
|
||||
err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_DISCONNECT,
|
||||
@ -3544,16 +3542,9 @@ static int pair_device(struct sock *sk, struct hci_dev *hdev, void *data,
|
||||
conn = hci_connect_acl(hdev, &cp->addr.bdaddr, sec_level,
|
||||
auth_type);
|
||||
} else {
|
||||
u8 addr_type;
|
||||
u8 addr_type = le_addr_type(cp->addr.type);
|
||||
struct hci_conn_params *p;
|
||||
|
||||
/* Convert from L2CAP channel address type to HCI address type
|
||||
*/
|
||||
if (cp->addr.type == BDADDR_LE_PUBLIC)
|
||||
addr_type = ADDR_LE_DEV_PUBLIC;
|
||||
else
|
||||
addr_type = ADDR_LE_DEV_RANDOM;
|
||||
|
||||
/* When pairing a new device, it is expected to remember
|
||||
* this device for future connections. Adding the connection
|
||||
* parameter information ahead of time allows tracking
|
||||
@ -3697,7 +3688,8 @@ static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
|
||||
if (addr->type == BDADDR_BREDR)
|
||||
conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &addr->bdaddr);
|
||||
else
|
||||
conn = hci_conn_hash_lookup_ba(hdev, LE_LINK, &addr->bdaddr);
|
||||
conn = hci_conn_hash_lookup_le(hdev, &addr->bdaddr,
|
||||
le_addr_type(addr->type));
|
||||
|
||||
if (!conn) {
|
||||
err = mgmt_cmd_complete(sk, hdev->id, mgmt_op,
|
||||
@ -5600,14 +5592,9 @@ static int load_irks(struct sock *sk, struct hci_dev *hdev, void *cp_data,
|
||||
|
||||
for (i = 0; i < irk_count; i++) {
|
||||
struct mgmt_irk_info *irk = &cp->irks[i];
|
||||
u8 addr_type;
|
||||
|
||||
if (irk->addr.type == BDADDR_LE_PUBLIC)
|
||||
addr_type = ADDR_LE_DEV_PUBLIC;
|
||||
else
|
||||
addr_type = ADDR_LE_DEV_RANDOM;
|
||||
|
||||
hci_add_irk(hdev, &irk->addr.bdaddr, addr_type, irk->val,
|
||||
hci_add_irk(hdev, &irk->addr.bdaddr,
|
||||
le_addr_type(irk->addr.type), irk->val,
|
||||
BDADDR_ANY);
|
||||
}
|
||||
|
||||
@ -5687,12 +5674,7 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
|
||||
|
||||
for (i = 0; i < key_count; i++) {
|
||||
struct mgmt_ltk_info *key = &cp->keys[i];
|
||||
u8 type, addr_type, authenticated;
|
||||
|
||||
if (key->addr.type == BDADDR_LE_PUBLIC)
|
||||
addr_type = ADDR_LE_DEV_PUBLIC;
|
||||
else
|
||||
addr_type = ADDR_LE_DEV_RANDOM;
|
||||
u8 type, authenticated;
|
||||
|
||||
switch (key->type) {
|
||||
case MGMT_LTK_UNAUTHENTICATED:
|
||||
@ -5718,9 +5700,9 @@ static int load_long_term_keys(struct sock *sk, struct hci_dev *hdev,
|
||||
continue;
|
||||
}
|
||||
|
||||
hci_add_ltk(hdev, &key->addr.bdaddr, addr_type, type,
|
||||
authenticated, key->val, key->enc_size, key->ediv,
|
||||
key->rand);
|
||||
hci_add_ltk(hdev, &key->addr.bdaddr,
|
||||
le_addr_type(key->addr.type), type, authenticated,
|
||||
key->val, key->enc_size, key->ediv, key->rand);
|
||||
}
|
||||
|
||||
err = mgmt_cmd_complete(sk, hdev->id, MGMT_OP_LOAD_LONG_TERM_KEYS, 0,
|
||||
@ -6232,10 +6214,7 @@ static int add_device(struct sock *sk, struct hci_dev *hdev,
|
||||
goto added;
|
||||
}
|
||||
|
||||
if (cp->addr.type == BDADDR_LE_PUBLIC)
|
||||
addr_type = ADDR_LE_DEV_PUBLIC;
|
||||
else
|
||||
addr_type = ADDR_LE_DEV_RANDOM;
|
||||
addr_type = le_addr_type(cp->addr.type);
|
||||
|
||||
if (cp->action == 0x02)
|
||||
auto_conn = HCI_AUTO_CONN_ALWAYS;
|
||||
@ -6364,10 +6343,7 @@ static int remove_device(struct sock *sk, struct hci_dev *hdev,
|
||||
goto complete;
|
||||
}
|
||||
|
||||
if (cp->addr.type == BDADDR_LE_PUBLIC)
|
||||
addr_type = ADDR_LE_DEV_PUBLIC;
|
||||
else
|
||||
addr_type = ADDR_LE_DEV_RANDOM;
|
||||
addr_type = le_addr_type(cp->addr.type);
|
||||
|
||||
/* Kernel internally uses conn_params with resolvable private
|
||||
* address, but Remove Device allows only identity addresses.
|
||||
@ -7873,27 +7849,13 @@ void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
|
||||
mgmt_event(MGMT_EV_NEW_LONG_TERM_KEY, hdev, &ev, sizeof(ev), NULL);
|
||||
}
|
||||
|
||||
void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk)
|
||||
void mgmt_new_irk(struct hci_dev *hdev, struct smp_irk *irk, bool persistent)
|
||||
{
|
||||
struct mgmt_ev_new_irk ev;
|
||||
|
||||
memset(&ev, 0, sizeof(ev));
|
||||
|
||||
/* For identity resolving keys from devices that are already
|
||||
* using a public address or static random address, do not
|
||||
* ask for storing this key. The identity resolving key really
|
||||
* is only mandatory for devices using resolvable random
|
||||
* addresses.
|
||||
*
|
||||
* Storing all identity resolving keys has the downside that
|
||||
* they will be also loaded on next boot of they system. More
|
||||
* identity resolving keys, means more time during scanning is
|
||||
* needed to actually resolve these addresses.
|
||||
*/
|
||||
if (bacmp(&irk->rpa, BDADDR_ANY))
|
||||
ev.store_hint = 0x01;
|
||||
else
|
||||
ev.store_hint = 0x00;
|
||||
ev.store_hint = persistent;
|
||||
|
||||
bacpy(&ev.rpa, &irk->rpa);
|
||||
bacpy(&ev.irk.addr.bdaddr, &irk->bdaddr);
|
||||
|
@ -811,7 +811,6 @@ static void smp_failure(struct l2cap_conn *conn, u8 reason)
|
||||
smp_send_cmd(conn, SMP_CMD_PAIRING_FAIL, sizeof(reason),
|
||||
&reason);
|
||||
|
||||
clear_bit(HCI_CONN_ENCRYPT_PEND, &hcon->flags);
|
||||
mgmt_auth_failed(hcon, HCI_ERROR_AUTH_FAILURE);
|
||||
|
||||
if (chan->data)
|
||||
@ -1046,8 +1045,24 @@ static void smp_notify_keys(struct l2cap_conn *conn)
|
||||
struct smp_cmd_pairing *rsp = (void *) &smp->prsp[1];
|
||||
bool persistent;
|
||||
|
||||
if (hcon->type == ACL_LINK) {
|
||||
if (hcon->key_type == HCI_LK_DEBUG_COMBINATION)
|
||||
persistent = false;
|
||||
else
|
||||
persistent = !test_bit(HCI_CONN_FLUSH_KEY,
|
||||
&hcon->flags);
|
||||
} else {
|
||||
/* The LTKs, IRKs and CSRKs should be persistent only if
|
||||
* both sides had the bonding bit set in their
|
||||
* authentication requests.
|
||||
*/
|
||||
persistent = !!((req->auth_req & rsp->auth_req) &
|
||||
SMP_AUTH_BONDING);
|
||||
}
|
||||
|
||||
if (smp->remote_irk) {
|
||||
mgmt_new_irk(hdev, smp->remote_irk);
|
||||
mgmt_new_irk(hdev, smp->remote_irk, persistent);
|
||||
|
||||
/* Now that user space can be considered to know the
|
||||
* identity address track the connection based on it
|
||||
* from now on (assuming this is an LE link).
|
||||
@ -1075,21 +1090,6 @@ static void smp_notify_keys(struct l2cap_conn *conn)
|
||||
}
|
||||
}
|
||||
|
||||
if (hcon->type == ACL_LINK) {
|
||||
if (hcon->key_type == HCI_LK_DEBUG_COMBINATION)
|
||||
persistent = false;
|
||||
else
|
||||
persistent = !test_bit(HCI_CONN_FLUSH_KEY,
|
||||
&hcon->flags);
|
||||
} else {
|
||||
/* The LTKs and CSRKs should be persistent only if both sides
|
||||
* had the bonding bit set in their authentication requests.
|
||||
*/
|
||||
persistent = !!((req->auth_req & rsp->auth_req) &
|
||||
SMP_AUTH_BONDING);
|
||||
}
|
||||
|
||||
|
||||
if (smp->csrk) {
|
||||
smp->csrk->bdaddr_type = hcon->dst_type;
|
||||
bacpy(&smp->csrk->bdaddr, &hcon->dst);
|
||||
@ -2380,6 +2380,32 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
|
||||
return ret;
|
||||
}
|
||||
|
||||
void smp_cancel_pairing(struct hci_conn *hcon)
|
||||
{
|
||||
struct l2cap_conn *conn = hcon->l2cap_data;
|
||||
struct l2cap_chan *chan;
|
||||
struct smp_chan *smp;
|
||||
|
||||
if (!conn)
|
||||
return;
|
||||
|
||||
chan = conn->smp;
|
||||
if (!chan)
|
||||
return;
|
||||
|
||||
l2cap_chan_lock(chan);
|
||||
|
||||
smp = chan->data;
|
||||
if (smp) {
|
||||
if (test_bit(SMP_FLAG_COMPLETE, &smp->flags))
|
||||
smp_failure(conn, 0);
|
||||
else
|
||||
smp_failure(conn, SMP_UNSPECIFIED);
|
||||
}
|
||||
|
||||
l2cap_chan_unlock(chan);
|
||||
}
|
||||
|
||||
static int smp_cmd_encrypt_info(struct l2cap_conn *conn, struct sk_buff *skb)
|
||||
{
|
||||
struct smp_cmd_encrypt_info *rp = (void *) skb->data;
|
||||
|
@ -180,6 +180,7 @@ enum smp_key_pref {
|
||||
};
|
||||
|
||||
/* SMP Commands */
|
||||
void smp_cancel_pairing(struct hci_conn *hcon);
|
||||
bool smp_sufficient_security(struct hci_conn *hcon, u8 sec_level,
|
||||
enum smp_key_pref key_pref);
|
||||
int smp_conn_security(struct hci_conn *hcon, __u8 sec_level);
|
||||
|
@ -90,36 +90,12 @@ static lowpan_rx_result lowpan_rx_h_frag(struct sk_buff *skb)
|
||||
|
||||
int lowpan_iphc_decompress(struct sk_buff *skb)
|
||||
{
|
||||
struct ieee802154_addr_sa sa, da;
|
||||
struct ieee802154_hdr hdr;
|
||||
u8 iphc0, iphc1;
|
||||
void *sap, *dap;
|
||||
|
||||
if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0)
|
||||
return -EINVAL;
|
||||
|
||||
raw_dump_table(__func__, "raw skb data dump", skb->data, skb->len);
|
||||
|
||||
if (lowpan_fetch_skb_u8(skb, &iphc0) ||
|
||||
lowpan_fetch_skb_u8(skb, &iphc1))
|
||||
return -EINVAL;
|
||||
|
||||
ieee802154_addr_to_sa(&sa, &hdr.source);
|
||||
ieee802154_addr_to_sa(&da, &hdr.dest);
|
||||
|
||||
if (sa.addr_type == IEEE802154_ADDR_SHORT)
|
||||
sap = &sa.short_addr;
|
||||
else
|
||||
sap = &sa.hwaddr;
|
||||
|
||||
if (da.addr_type == IEEE802154_ADDR_SHORT)
|
||||
dap = &da.short_addr;
|
||||
else
|
||||
dap = &da.hwaddr;
|
||||
|
||||
return lowpan_header_decompress(skb, skb->dev, sap, sa.addr_type,
|
||||
IEEE802154_ADDR_LEN, dap, da.addr_type,
|
||||
IEEE802154_ADDR_LEN, iphc0, iphc1);
|
||||
return lowpan_header_decompress(skb, skb->dev, &hdr.dest, &hdr.source);
|
||||
}
|
||||
|
||||
static lowpan_rx_result lowpan_rx_h_iphc(struct sk_buff *skb)
|
||||
@ -308,16 +284,16 @@ static int lowpan_rcv(struct sk_buff *skb, struct net_device *wdev,
|
||||
if (wdev->type != ARPHRD_IEEE802154 ||
|
||||
skb->pkt_type == PACKET_OTHERHOST ||
|
||||
!lowpan_rx_h_check(skb))
|
||||
return NET_RX_DROP;
|
||||
goto drop;
|
||||
|
||||
ldev = wdev->ieee802154_ptr->lowpan_dev;
|
||||
if (!ldev || !netif_running(ldev))
|
||||
return NET_RX_DROP;
|
||||
goto drop;
|
||||
|
||||
/* Replacing skb->dev and followed rx handlers will manipulate skb. */
|
||||
skb = skb_share_check(skb, GFP_ATOMIC);
|
||||
if (!skb)
|
||||
return NET_RX_DROP;
|
||||
goto out;
|
||||
skb->dev = ldev;
|
||||
|
||||
/* When receive frag1 it's likely that we manipulate the buffer.
|
||||
@ -328,10 +304,15 @@ static int lowpan_rcv(struct sk_buff *skb, struct net_device *wdev,
|
||||
lowpan_is_iphc(*skb_network_header(skb))) {
|
||||
skb = skb_unshare(skb, GFP_ATOMIC);
|
||||
if (!skb)
|
||||
return NET_RX_DROP;
|
||||
goto out;
|
||||
}
|
||||
|
||||
return lowpan_invoke_rx_handlers(skb);
|
||||
|
||||
drop:
|
||||
kfree_skb(skb);
|
||||
out:
|
||||
return NET_RX_DROP;
|
||||
}
|
||||
|
||||
static struct packet_type lowpan_packet_type = {
|
||||
|
@ -14,6 +14,9 @@
|
||||
|
||||
#include "6lowpan_i.h"
|
||||
|
||||
#define LOWPAN_FRAG1_HEAD_SIZE 0x4
|
||||
#define LOWPAN_FRAGN_HEAD_SIZE 0x5
|
||||
|
||||
/* don't save pan id, it's intra pan */
|
||||
struct lowpan_addr {
|
||||
u8 mode;
|
||||
@ -218,7 +221,7 @@ static int lowpan_header(struct sk_buff *skb, struct net_device *ldev,
|
||||
saddr = &info.saddr.u.extended_addr;
|
||||
|
||||
*dgram_size = skb->len;
|
||||
lowpan_header_compress(skb, ldev, ETH_P_IPV6, daddr, saddr, skb->len);
|
||||
lowpan_header_compress(skb, ldev, daddr, saddr);
|
||||
/* dgram_offset = (saved bytes after compression) + lowpan header len */
|
||||
*dgram_offset = (*dgram_size - skb->len) + skb_network_header_len(skb);
|
||||
|
||||
@ -235,7 +238,7 @@ static int lowpan_header(struct sk_buff *skb, struct net_device *ldev,
|
||||
/* if the destination address is the broadcast address, use the
|
||||
* corresponding short address
|
||||
*/
|
||||
if (lowpan_is_addr_broadcast((const u8 *)daddr)) {
|
||||
if (!memcmp(daddr, ldev->broadcast, EUI64_ADDR_LEN)) {
|
||||
da.mode = IEEE802154_ADDR_SHORT;
|
||||
da.short_addr = cpu_to_le16(IEEE802154_ADDR_BROADCAST);
|
||||
cb->ackreq = false;
|
||||
|
@ -55,7 +55,7 @@ void mac802154_llsec_destroy(struct mac802154_llsec *sec)
|
||||
|
||||
msl = container_of(sl, struct mac802154_llsec_seclevel, level);
|
||||
list_del(&sl->list);
|
||||
kfree(msl);
|
||||
kzfree(msl);
|
||||
}
|
||||
|
||||
list_for_each_entry_safe(dev, dn, &sec->table.devices, list) {
|
||||
@ -72,7 +72,7 @@ void mac802154_llsec_destroy(struct mac802154_llsec *sec)
|
||||
mkey = container_of(key->key, struct mac802154_llsec_key, key);
|
||||
list_del(&key->list);
|
||||
llsec_key_put(mkey);
|
||||
kfree(key);
|
||||
kzfree(key);
|
||||
}
|
||||
}
|
||||
|
||||
@ -161,7 +161,7 @@ llsec_key_alloc(const struct ieee802154_llsec_key *template)
|
||||
if (key->tfm[i])
|
||||
crypto_free_aead(key->tfm[i]);
|
||||
|
||||
kfree(key);
|
||||
kzfree(key);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@ -176,7 +176,7 @@ static void llsec_key_release(struct kref *ref)
|
||||
crypto_free_aead(key->tfm[i]);
|
||||
|
||||
crypto_free_blkcipher(key->tfm0);
|
||||
kfree(key);
|
||||
kzfree(key);
|
||||
}
|
||||
|
||||
static struct mac802154_llsec_key*
|
||||
@ -267,7 +267,7 @@ int mac802154_llsec_key_add(struct mac802154_llsec *sec,
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
kfree(new);
|
||||
kzfree(new);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
@ -347,10 +347,10 @@ static void llsec_dev_free(struct mac802154_llsec_device *dev)
|
||||
devkey);
|
||||
|
||||
list_del(&pos->list);
|
||||
kfree(devkey);
|
||||
kzfree(devkey);
|
||||
}
|
||||
|
||||
kfree(dev);
|
||||
kzfree(dev);
|
||||
}
|
||||
|
||||
int mac802154_llsec_dev_add(struct mac802154_llsec *sec,
|
||||
@ -681,7 +681,7 @@ llsec_do_encrypt_auth(struct sk_buff *skb, const struct mac802154_llsec *sec,
|
||||
|
||||
rc = crypto_aead_encrypt(req);
|
||||
|
||||
kfree(req);
|
||||
kzfree(req);
|
||||
|
||||
return rc;
|
||||
}
|
||||
@ -881,7 +881,7 @@ llsec_do_decrypt_auth(struct sk_buff *skb, const struct mac802154_llsec *sec,
|
||||
|
||||
rc = crypto_aead_decrypt(req);
|
||||
|
||||
kfree(req);
|
||||
kzfree(req);
|
||||
skb_trim(skb, skb->len - authlen);
|
||||
|
||||
return rc;
|
||||
@ -921,7 +921,7 @@ llsec_update_devkey_record(struct mac802154_llsec_device *dev,
|
||||
if (!devkey)
|
||||
list_add_rcu(&next->devkey.list, &dev->dev.keys);
|
||||
else
|
||||
kfree(next);
|
||||
kzfree(next);
|
||||
|
||||
spin_unlock_bh(&dev->lock);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user