Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Merge branch 'for-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth-next

Johan Hedberg says:

====================
pull request: bluetooth-next 2015-10-08

Here's another set of Bluetooth & 802.15.4 patches for the 4.4 kernel.

802.15.4:
- Many improvements & fixes to the mrf24j40 driver
- Fixes and cleanups to nl802154, mac802154 & ieee802154 code

Bluetooth:
- New chipset support in btmrvl driver
- Fixes & cleanups to btbcm, btmrvl, bpa10x & btintel drivers
- Support for vendor specific diagnostic data through common API
- Cleanups to the 6lowpan code
- New events & message types for monitor channel

Please let me know if there are any issues pulling. Thanks.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>

+4001 -1123
+20
Documentation/devicetree/bindings/net/ieee802154/mrf24j40.txt
··· 1 + * MRF24J40 IEEE 802.15.4 * 2 + 3 + Required properties: 4 + - compatible: should be "microchip,mrf24j40", "microchip,mrf24j40ma", 5 + or "microchip,mrf24j40mc" depends on your transceiver 6 + board 7 + - spi-max-frequency: maximal bus speed, should be set something under or equal 8 + 10000000 9 + - reg: the chipselect index 10 + - interrupts: the interrupt generated by the device. 11 + 12 + Example: 13 + 14 + mrf24j40ma@0 { 15 + compatible = "microchip,mrf24j40ma"; 16 + spi-max-frequency = <8500000>; 17 + reg = <0>; 18 + interrupts = <19 8>; 19 + interrupt-parent = <&gpio3>; 20 + };
+1
MAINTAINERS
··· 6978 6978 L: linux-wpan@vger.kernel.org 6979 6979 S: Maintained 6980 6980 F: drivers/net/ieee802154/mrf24j40.c 6981 + F: Documentation/devicetree/bindings/net/ieee802154/mrf24j40.txt 6981 6982 6982 6983 MSI LAPTOP SUPPORT 6983 6984 M: "Lee, Chun-Yi" <jlee@suse.com>
+4 -2
drivers/bluetooth/Kconfig
··· 4 4 5 5 config BT_INTEL 6 6 tristate 7 + select REGMAP 7 8 8 9 config BT_BCM 9 10 tristate ··· 184 183 config BT_HCIBPA10X 185 184 tristate "HCI BPA10x USB driver" 186 185 depends on USB 186 + select BT_HCIUART_H4 187 187 help 188 188 Bluetooth HCI BPA10x USB driver. 189 189 This driver provides support for the Digianswer BPA 100/105 Bluetooth ··· 277 275 The core driver to support Marvell Bluetooth devices. 278 276 279 277 This driver is required if you want to support 280 - Marvell Bluetooth devices, such as 8688/8787/8797/8887/8897. 278 + Marvell Bluetooth devices, such as 8688/8787/8797/8887/8897/8997. 281 279 282 280 Say Y here to compile Marvell Bluetooth driver 283 281 into the kernel or say M to compile it as module. ··· 291 289 The driver for Marvell Bluetooth chipsets with SDIO interface. 292 290 293 291 This driver is required if you want to use Marvell Bluetooth 294 - devices with SDIO interface. Currently SD8688/SD8787/SD8797/SD8887/SD8897 292 + devices with SDIO interface. Currently SD8688/SD8787/SD8797/SD8887/SD8897/SD8997 295 293 chipsets are supported. 296 294 297 295 Say Y here to compile support for Marvell BT-over-SDIO driver
-11
drivers/bluetooth/bfusb.c
··· 422 422 423 423 BT_DBG("hdev %p bfusb %p", hdev, data); 424 424 425 - if (test_and_set_bit(HCI_RUNNING, &hdev->flags)) 426 - return 0; 427 - 428 425 write_lock_irqsave(&data->lock, flags); 429 426 430 427 err = bfusb_rx_submit(data, NULL); 431 428 if (!err) { 432 429 for (i = 1; i < BFUSB_MAX_BULK_RX; i++) 433 430 bfusb_rx_submit(data, NULL); 434 - } else { 435 - clear_bit(HCI_RUNNING, &hdev->flags); 436 431 } 437 432 438 433 write_unlock_irqrestore(&data->lock, flags); ··· 453 458 454 459 BT_DBG("hdev %p bfusb %p", hdev, data); 455 460 456 - if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags)) 457 - return 0; 458 - 459 461 write_lock_irqsave(&data->lock, flags); 460 462 write_unlock_irqrestore(&data->lock, flags); 461 463 ··· 470 478 int sent = 0, size, count; 471 479 472 480 BT_DBG("hdev %p skb %p type %d len %d", hdev, skb, bt_cb(skb)->pkt_type, skb->len); 473 - 474 - if (!test_bit(HCI_RUNNING, &hdev->flags)) 475 - return -EBUSY; 476 481 477 482 switch (bt_cb(skb)->pkt_type) { 478 483 case HCI_COMMAND_PKT:
+1 -7
drivers/bluetooth/bluecard_cs.c
··· 390 390 for (i = 0; i < len; i++) { 391 391 392 392 /* Allocate packet */ 393 - if (info->rx_skb == NULL) { 393 + if (!info->rx_skb) { 394 394 info->rx_state = RECV_WAIT_PACKET_TYPE; 395 395 info->rx_count = 0; 396 396 info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC); ··· 628 628 if (test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state))) 629 629 bluecard_hci_set_baud_rate(hdev, DEFAULT_BAUD_RATE); 630 630 631 - if (test_and_set_bit(HCI_RUNNING, &(hdev->flags))) 632 - return 0; 633 - 634 631 if (test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state))) { 635 632 unsigned int iobase = info->p_dev->resource[0]->start; 636 633 ··· 642 645 static int bluecard_hci_close(struct hci_dev *hdev) 643 646 { 644 647 struct bluecard_info *info = hci_get_drvdata(hdev); 645 - 646 - if (!test_and_clear_bit(HCI_RUNNING, &(hdev->flags))) 647 - return 0; 648 648 649 649 bluecard_hci_flush(hdev); 650 650
+66 -120
drivers/bluetooth/bpa10x.c
··· 35 35 #include <net/bluetooth/bluetooth.h> 36 36 #include <net/bluetooth/hci_core.h> 37 37 38 - #define VERSION "0.10" 38 + #include "hci_uart.h" 39 + 40 + #define VERSION "0.11" 39 41 40 42 static const struct usb_device_id bpa10x_table[] = { 41 43 /* Tektronix BPA 100/105 (Digianswer) */ ··· 57 55 58 56 struct sk_buff *rx_skb[2]; 59 57 }; 60 - 61 - #define HCI_VENDOR_HDR_SIZE 5 62 - 63 - struct hci_vendor_hdr { 64 - __u8 type; 65 - __le16 snum; 66 - __le16 dlen; 67 - } __packed; 68 - 69 - static int bpa10x_recv(struct hci_dev *hdev, int queue, void *buf, int count) 70 - { 71 - struct bpa10x_data *data = hci_get_drvdata(hdev); 72 - 73 - BT_DBG("%s queue %d buffer %p count %d", hdev->name, 74 - queue, buf, count); 75 - 76 - if (queue < 0 || queue > 1) 77 - return -EILSEQ; 78 - 79 - hdev->stat.byte_rx += count; 80 - 81 - while (count) { 82 - struct sk_buff *skb = data->rx_skb[queue]; 83 - struct { __u8 type; int expect; } *scb; 84 - int type, len = 0; 85 - 86 - if (!skb) { 87 - /* Start of the frame */ 88 - 89 - type = *((__u8 *) buf); 90 - count--; buf++; 91 - 92 - switch (type) { 93 - case HCI_EVENT_PKT: 94 - if (count >= HCI_EVENT_HDR_SIZE) { 95 - struct hci_event_hdr *h = buf; 96 - len = HCI_EVENT_HDR_SIZE + h->plen; 97 - } else 98 - return -EILSEQ; 99 - break; 100 - 101 - case HCI_ACLDATA_PKT: 102 - if (count >= HCI_ACL_HDR_SIZE) { 103 - struct hci_acl_hdr *h = buf; 104 - len = HCI_ACL_HDR_SIZE + 105 - __le16_to_cpu(h->dlen); 106 - } else 107 - return -EILSEQ; 108 - break; 109 - 110 - case HCI_SCODATA_PKT: 111 - if (count >= HCI_SCO_HDR_SIZE) { 112 - struct hci_sco_hdr *h = buf; 113 - len = HCI_SCO_HDR_SIZE + h->dlen; 114 - } else 115 - return -EILSEQ; 116 - break; 117 - 118 - case HCI_VENDOR_PKT: 119 - if (count >= HCI_VENDOR_HDR_SIZE) { 120 - struct hci_vendor_hdr *h = buf; 121 - len = HCI_VENDOR_HDR_SIZE + 122 - __le16_to_cpu(h->dlen); 123 - } else 124 - return -EILSEQ; 125 - break; 126 - } 127 - 128 - skb = bt_skb_alloc(len, GFP_ATOMIC); 129 - if (!skb) { 130 - BT_ERR("%s no memory for packet", hdev->name); 131 - return -ENOMEM; 132 - } 133 - 134 - data->rx_skb[queue] = skb; 135 - 136 - scb = (void *) skb->cb; 137 - scb->type = type; 138 - scb->expect = len; 139 - } else { 140 - /* Continuation */ 141 - 142 - scb = (void *) skb->cb; 143 - len = scb->expect; 144 - } 145 - 146 - len = min(len, count); 147 - 148 - memcpy(skb_put(skb, len), buf, len); 149 - 150 - scb->expect -= len; 151 - 152 - if (scb->expect == 0) { 153 - /* Complete frame */ 154 - 155 - data->rx_skb[queue] = NULL; 156 - 157 - bt_cb(skb)->pkt_type = scb->type; 158 - hci_recv_frame(hdev, skb); 159 - } 160 - 161 - count -= len; buf += len; 162 - } 163 - 164 - return 0; 165 - } 166 58 167 59 static void bpa10x_tx_complete(struct urb *urb) 168 60 { ··· 80 184 kfree_skb(skb); 81 185 } 82 186 187 + #define HCI_VENDOR_HDR_SIZE 5 188 + 189 + #define HCI_RECV_VENDOR \ 190 + .type = HCI_VENDOR_PKT, \ 191 + .hlen = HCI_VENDOR_HDR_SIZE, \ 192 + .loff = 3, \ 193 + .lsize = 2, \ 194 + .maxlen = HCI_MAX_FRAME_SIZE 195 + 196 + static const struct h4_recv_pkt bpa10x_recv_pkts[] = { 197 + { H4_RECV_ACL, .recv = hci_recv_frame }, 198 + { H4_RECV_SCO, .recv = hci_recv_frame }, 199 + { H4_RECV_EVENT, .recv = hci_recv_frame }, 200 + { HCI_RECV_VENDOR, .recv = hci_recv_diag }, 201 + }; 202 + 83 203 static void bpa10x_rx_complete(struct urb *urb) 84 204 { 85 205 struct hci_dev *hdev = urb->context; ··· 109 197 return; 110 198 111 199 if (urb->status == 0) { 112 - if (bpa10x_recv(hdev, usb_pipebulk(urb->pipe), 200 + bool idx = usb_pipebulk(urb->pipe); 201 + 202 + data->rx_skb[idx] = h4_recv_buf(hdev, data->rx_skb[idx], 113 203 urb->transfer_buffer, 114 - urb->actual_length) < 0) { 204 + urb->actual_length, 205 + bpa10x_recv_pkts, 206 + ARRAY_SIZE(bpa10x_recv_pkts)); 207 + if (IS_ERR(data->rx_skb[idx])) { 115 208 BT_ERR("%s corrupted event packet", hdev->name); 116 209 hdev->stat.err_rx++; 210 + data->rx_skb[idx] = NULL; 117 211 } 118 212 } 119 213 ··· 222 304 223 305 BT_DBG("%s", hdev->name); 224 306 225 - if (test_and_set_bit(HCI_RUNNING, &hdev->flags)) 226 - return 0; 227 - 228 307 err = bpa10x_submit_intr_urb(hdev); 229 308 if (err < 0) 230 309 goto error; ··· 235 320 error: 236 321 usb_kill_anchored_urbs(&data->rx_anchor); 237 322 238 - clear_bit(HCI_RUNNING, &hdev->flags); 239 - 240 323 return err; 241 324 } 242 325 ··· 243 330 struct bpa10x_data *data = hci_get_drvdata(hdev); 244 331 245 332 BT_DBG("%s", hdev->name); 246 - 247 - if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags)) 248 - return 0; 249 333 250 334 usb_kill_anchored_urbs(&data->rx_anchor); 251 335 ··· 260 350 return 0; 261 351 } 262 352 353 + static int bpa10x_setup(struct hci_dev *hdev) 354 + { 355 + const u8 req[] = { 0x07 }; 356 + struct sk_buff *skb; 357 + 358 + BT_DBG("%s", hdev->name); 359 + 360 + /* Read revision string */ 361 + skb = __hci_cmd_sync(hdev, 0xfc0e, sizeof(req), req, HCI_INIT_TIMEOUT); 362 + if (IS_ERR(skb)) 363 + return PTR_ERR(skb); 364 + 365 + BT_INFO("%s: %s", hdev->name, (char *)(skb->data + 1)); 366 + 367 + kfree_skb(skb); 368 + return 0; 369 + } 370 + 263 371 static int bpa10x_send_frame(struct hci_dev *hdev, struct sk_buff *skb) 264 372 { 265 373 struct bpa10x_data *data = hci_get_drvdata(hdev); ··· 287 359 int err; 288 360 289 361 BT_DBG("%s", hdev->name); 290 - 291 - if (!test_bit(HCI_RUNNING, &hdev->flags)) 292 - return -EBUSY; 293 362 294 363 skb->dev = (void *) hdev; 295 364 ··· 356 431 return 0; 357 432 } 358 433 434 + static int bpa10x_set_diag(struct hci_dev *hdev, bool enable) 435 + { 436 + const u8 req[] = { 0x00, enable }; 437 + struct sk_buff *skb; 438 + 439 + BT_DBG("%s", hdev->name); 440 + 441 + if (!test_bit(HCI_RUNNING, &hdev->flags)) 442 + return -ENETDOWN; 443 + 444 + /* Enable sniffer operation */ 445 + skb = __hci_cmd_sync(hdev, 0xfc0e, sizeof(req), req, HCI_INIT_TIMEOUT); 446 + if (IS_ERR(skb)) 447 + return PTR_ERR(skb); 448 + 449 + kfree_skb(skb); 450 + return 0; 451 + } 452 + 359 453 static int bpa10x_probe(struct usb_interface *intf, const struct usb_device_id *id) 360 454 { 361 455 struct bpa10x_data *data; ··· 409 465 hdev->open = bpa10x_open; 410 466 hdev->close = bpa10x_close; 411 467 hdev->flush = bpa10x_flush; 468 + hdev->setup = bpa10x_setup; 412 469 hdev->send = bpa10x_send_frame; 470 + hdev->set_diag = bpa10x_set_diag; 413 471 414 472 set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks); 415 473
+1 -7
drivers/bluetooth/bt3c_cs.c
··· 233 233 info->hdev->stat.byte_rx++; 234 234 235 235 /* Allocate packet */ 236 - if (info->rx_skb == NULL) { 236 + if (!info->rx_skb) { 237 237 info->rx_state = RECV_WAIT_PACKET_TYPE; 238 238 info->rx_count = 0; 239 239 info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC); ··· 270 270 /* Unknown packet */ 271 271 BT_ERR("Unknown HCI packet with type 0x%02x received", bt_cb(info->rx_skb)->pkt_type); 272 272 info->hdev->stat.err_rx++; 273 - clear_bit(HCI_RUNNING, &(info->hdev->flags)); 274 273 275 274 kfree_skb(info->rx_skb); 276 275 info->rx_skb = NULL; ··· 394 395 395 396 static int bt3c_hci_open(struct hci_dev *hdev) 396 397 { 397 - set_bit(HCI_RUNNING, &(hdev->flags)); 398 - 399 398 return 0; 400 399 } 401 400 402 401 403 402 static int bt3c_hci_close(struct hci_dev *hdev) 404 403 { 405 - if (!test_and_clear_bit(HCI_RUNNING, &(hdev->flags))) 406 - return 0; 407 - 408 404 bt3c_hci_flush(hdev); 409 405 410 406 return 0;
+52 -2
drivers/bluetooth/btbcm.c
··· 181 181 return 0; 182 182 } 183 183 184 + static struct sk_buff *btbcm_read_local_name(struct hci_dev *hdev) 185 + { 186 + struct sk_buff *skb; 187 + 188 + skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL, 189 + HCI_INIT_TIMEOUT); 190 + if (IS_ERR(skb)) { 191 + BT_ERR("%s: BCM: Reading local name failed (%ld)", 192 + hdev->name, PTR_ERR(skb)); 193 + return skb; 194 + } 195 + 196 + if (skb->len != sizeof(struct hci_rp_read_local_name)) { 197 + BT_ERR("%s: BCM: Local name length mismatch", hdev->name); 198 + kfree_skb(skb); 199 + return ERR_PTR(-EIO); 200 + } 201 + 202 + return skb; 203 + } 204 + 184 205 static struct sk_buff *btbcm_read_local_version(struct hci_dev *hdev) 185 206 { 186 207 struct sk_buff *skb; ··· 414 393 BT_INFO("%s: BCM: chip id %u", hdev->name, skb->data[1]); 415 394 kfree_skb(skb); 416 395 396 + /* Read Local Name */ 397 + skb = btbcm_read_local_name(hdev); 398 + if (IS_ERR(skb)) 399 + return PTR_ERR(skb); 400 + 401 + BT_INFO("%s: %s", hdev->name, (char *)(skb->data + 1)); 402 + kfree_skb(skb); 403 + 417 404 switch ((rev & 0xf000) >> 12) { 418 405 case 0: 419 406 case 3: ··· 493 464 hw_name ? : "BCM", (subver & 0x7000) >> 13, 494 465 (subver & 0x1f00) >> 8, (subver & 0x00ff), rev & 0x0fff); 495 466 467 + /* Read Local Name */ 468 + skb = btbcm_read_local_name(hdev); 469 + if (IS_ERR(skb)) 470 + return PTR_ERR(skb); 471 + 472 + BT_INFO("%s: %s", hdev->name, (char *)(skb->data + 1)); 473 + kfree_skb(skb); 474 + 496 475 btbcm_check_bdaddr(hdev); 497 476 498 477 set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks); ··· 512 475 int btbcm_setup_apple(struct hci_dev *hdev) 513 476 { 514 477 struct sk_buff *skb; 478 + int err; 479 + 480 + /* Reset */ 481 + err = btbcm_reset(hdev); 482 + if (err) 483 + return err; 515 484 516 485 /* Read Verbose Config Version Info */ 517 486 skb = btbcm_read_verbose_config(hdev); 518 487 if (!IS_ERR(skb)) { 519 - BT_INFO("%s: BCM: chip id %u build %4.4u", hdev->name, skb->data[1], 520 - get_unaligned_le16(skb->data + 5)); 488 + BT_INFO("%s: BCM: chip id %u build %4.4u", hdev->name, 489 + skb->data[1], get_unaligned_le16(skb->data + 5)); 490 + kfree_skb(skb); 491 + } 492 + 493 + /* Read Local Name */ 494 + skb = btbcm_read_local_name(hdev); 495 + if (!IS_ERR(skb)) { 496 + BT_INFO("%s: %s", hdev->name, (char *)(skb->data + 1)); 521 497 kfree_skb(skb); 522 498 } 523 499
+196
drivers/bluetooth/btintel.c
··· 23 23 24 24 #include <linux/module.h> 25 25 #include <linux/firmware.h> 26 + #include <linux/regmap.h> 26 27 27 28 #include <net/bluetooth/bluetooth.h> 28 29 #include <net/bluetooth/hci_core.h> ··· 215 214 return 0; 216 215 } 217 216 EXPORT_SYMBOL_GPL(btintel_load_ddc_config); 217 + 218 + /* ------- REGMAP IBT SUPPORT ------- */ 219 + 220 + #define IBT_REG_MODE_8BIT 0x00 221 + #define IBT_REG_MODE_16BIT 0x01 222 + #define IBT_REG_MODE_32BIT 0x02 223 + 224 + struct regmap_ibt_context { 225 + struct hci_dev *hdev; 226 + __u16 op_write; 227 + __u16 op_read; 228 + }; 229 + 230 + struct ibt_cp_reg_access { 231 + __le32 addr; 232 + __u8 mode; 233 + __u8 len; 234 + __u8 data[0]; 235 + } __packed; 236 + 237 + struct ibt_rp_reg_access { 238 + __u8 status; 239 + __le32 addr; 240 + __u8 data[0]; 241 + } __packed; 242 + 243 + static int regmap_ibt_read(void *context, const void *addr, size_t reg_size, 244 + void *val, size_t val_size) 245 + { 246 + struct regmap_ibt_context *ctx = context; 247 + struct ibt_cp_reg_access cp; 248 + struct ibt_rp_reg_access *rp; 249 + struct sk_buff *skb; 250 + int err = 0; 251 + 252 + if (reg_size != sizeof(__le32)) 253 + return -EINVAL; 254 + 255 + switch (val_size) { 256 + case 1: 257 + cp.mode = IBT_REG_MODE_8BIT; 258 + break; 259 + case 2: 260 + cp.mode = IBT_REG_MODE_16BIT; 261 + break; 262 + case 4: 263 + cp.mode = IBT_REG_MODE_32BIT; 264 + break; 265 + default: 266 + return -EINVAL; 267 + } 268 + 269 + /* regmap provides a little-endian formatted addr */ 270 + cp.addr = *(__le32 *)addr; 271 + cp.len = val_size; 272 + 273 + bt_dev_dbg(ctx->hdev, "Register (0x%x) read", le32_to_cpu(cp.addr)); 274 + 275 + skb = hci_cmd_sync(ctx->hdev, ctx->op_read, sizeof(cp), &cp, 276 + HCI_CMD_TIMEOUT); 277 + if (IS_ERR(skb)) { 278 + err = PTR_ERR(skb); 279 + bt_dev_err(ctx->hdev, "regmap: Register (0x%x) read error (%d)", 280 + le32_to_cpu(cp.addr), err); 281 + return err; 282 + } 283 + 284 + if (skb->len != sizeof(*rp) + val_size) { 285 + bt_dev_err(ctx->hdev, "regmap: Register (0x%x) read error, bad len", 286 + le32_to_cpu(cp.addr)); 287 + err = -EINVAL; 288 + goto done; 289 + } 290 + 291 + rp = (struct ibt_rp_reg_access *)skb->data; 292 + 293 + if (rp->addr != cp.addr) { 294 + bt_dev_err(ctx->hdev, "regmap: Register (0x%x) read error, bad addr", 295 + le32_to_cpu(rp->addr)); 296 + err = -EINVAL; 297 + goto done; 298 + } 299 + 300 + memcpy(val, rp->data, val_size); 301 + 302 + done: 303 + kfree_skb(skb); 304 + return err; 305 + } 306 + 307 + static int regmap_ibt_gather_write(void *context, 308 + const void *addr, size_t reg_size, 309 + const void *val, size_t val_size) 310 + { 311 + struct regmap_ibt_context *ctx = context; 312 + struct ibt_cp_reg_access *cp; 313 + struct sk_buff *skb; 314 + int plen = sizeof(*cp) + val_size; 315 + u8 mode; 316 + int err = 0; 317 + 318 + if (reg_size != sizeof(__le32)) 319 + return -EINVAL; 320 + 321 + switch (val_size) { 322 + case 1: 323 + mode = IBT_REG_MODE_8BIT; 324 + break; 325 + case 2: 326 + mode = IBT_REG_MODE_16BIT; 327 + break; 328 + case 4: 329 + mode = IBT_REG_MODE_32BIT; 330 + break; 331 + default: 332 + return -EINVAL; 333 + } 334 + 335 + cp = kmalloc(plen, GFP_KERNEL); 336 + if (!cp) 337 + return -ENOMEM; 338 + 339 + /* regmap provides a little-endian formatted addr/value */ 340 + cp->addr = *(__le32 *)addr; 341 + cp->mode = mode; 342 + cp->len = val_size; 343 + memcpy(&cp->data, val, val_size); 344 + 345 + bt_dev_dbg(ctx->hdev, "Register (0x%x) write", le32_to_cpu(cp->addr)); 346 + 347 + skb = hci_cmd_sync(ctx->hdev, ctx->op_write, plen, cp, HCI_CMD_TIMEOUT); 348 + if (IS_ERR(skb)) { 349 + err = PTR_ERR(skb); 350 + bt_dev_err(ctx->hdev, "regmap: Register (0x%x) write error (%d)", 351 + le32_to_cpu(cp->addr), err); 352 + goto done; 353 + } 354 + kfree_skb(skb); 355 + 356 + done: 357 + kfree(cp); 358 + return err; 359 + } 360 + 361 + static int regmap_ibt_write(void *context, const void *data, size_t count) 362 + { 363 + /* data contains register+value, since we only support 32bit addr, 364 + * minimum data size is 4 bytes. 365 + */ 366 + if (WARN_ONCE(count < 4, "Invalid register access")) 367 + return -EINVAL; 368 + 369 + return regmap_ibt_gather_write(context, data, 4, data + 4, count - 4); 370 + } 371 + 372 + static void regmap_ibt_free_context(void *context) 373 + { 374 + kfree(context); 375 + } 376 + 377 + static struct regmap_bus regmap_ibt = { 378 + .read = regmap_ibt_read, 379 + .write = regmap_ibt_write, 380 + .gather_write = regmap_ibt_gather_write, 381 + .free_context = regmap_ibt_free_context, 382 + .reg_format_endian_default = REGMAP_ENDIAN_LITTLE, 383 + .val_format_endian_default = REGMAP_ENDIAN_LITTLE, 384 + }; 385 + 386 + /* Config is the same for all register regions */ 387 + static const struct regmap_config regmap_ibt_cfg = { 388 + .name = "btintel_regmap", 389 + .reg_bits = 32, 390 + .val_bits = 32, 391 + }; 392 + 393 + struct regmap *btintel_regmap_init(struct hci_dev *hdev, u16 opcode_read, 394 + u16 opcode_write) 395 + { 396 + struct regmap_ibt_context *ctx; 397 + 398 + bt_dev_info(hdev, "regmap: Init R%x-W%x region", opcode_read, 399 + opcode_write); 400 + 401 + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 402 + if (!ctx) 403 + return ERR_PTR(-ENOMEM); 404 + 405 + ctx->op_read = opcode_read; 406 + ctx->op_write = opcode_write; 407 + ctx->hdev = hdev; 408 + 409 + return regmap_init(&hdev->dev, &regmap_ibt, ctx, &regmap_ibt_cfg); 410 + } 411 + EXPORT_SYMBOL_GPL(btintel_regmap_init); 218 412 219 413 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>"); 220 414 MODULE_DESCRIPTION("Bluetooth support for Intel devices ver " VERSION);
+9
drivers/bluetooth/btintel.h
··· 80 80 const void *param); 81 81 int btintel_load_ddc_config(struct hci_dev *hdev, const char *ddc_name); 82 82 83 + struct regmap *btintel_regmap_init(struct hci_dev *hdev, u16 opcode_read, 84 + u16 opcode_write); 85 + 83 86 #else 84 87 85 88 static inline int btintel_check_bdaddr(struct hci_dev *hdev) ··· 116 113 return -EOPNOTSUPP; 117 114 } 118 115 116 + static inline struct regmap *btintel_regmap_init(struct hci_dev *hdev, 117 + u16 opcode_read, 118 + u16 opcode_write) 119 + { 120 + return ERR_PTR(-EINVAL); 121 + } 119 122 #endif
+1 -13
drivers/bluetooth/btmrvl_main.c
··· 184 184 } 185 185 186 186 skb = bt_skb_alloc(HCI_COMMAND_HDR_SIZE + len, GFP_ATOMIC); 187 - if (skb == NULL) { 187 + if (!skb) { 188 188 BT_ERR("No free skb"); 189 189 return -ENOMEM; 190 190 } ··· 436 436 437 437 BT_DBG("type=%d, len=%d", skb->pkt_type, skb->len); 438 438 439 - if (!test_bit(HCI_RUNNING, &hdev->flags)) { 440 - BT_ERR("Failed testing HCI_RUNING, flags=%lx", hdev->flags); 441 - print_hex_dump_bytes("data: ", DUMP_PREFIX_OFFSET, 442 - skb->data, skb->len); 443 - return -EBUSY; 444 - } 445 - 446 439 switch (bt_cb(skb)->pkt_type) { 447 440 case HCI_COMMAND_PKT: 448 441 hdev->stat.cmd_tx++; ··· 470 477 { 471 478 struct btmrvl_private *priv = hci_get_drvdata(hdev); 472 479 473 - if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags)) 474 - return 0; 475 - 476 480 skb_queue_purge(&priv->adapter->tx_queue); 477 481 478 482 return 0; ··· 477 487 478 488 static int btmrvl_open(struct hci_dev *hdev) 479 489 { 480 - set_bit(HCI_RUNNING, &hdev->flags); 481 - 482 490 return 0; 483 491 } 484 492
+48 -6
drivers/bluetooth/btmrvl_sdio.c
··· 146 146 .fw_dump_end = 0xea, 147 147 }; 148 148 149 + static const struct btmrvl_sdio_card_reg btmrvl_reg_8997 = { 150 + .cfg = 0x00, 151 + .host_int_mask = 0x08, 152 + .host_intstatus = 0x0c, 153 + .card_status = 0x5c, 154 + .sq_read_base_addr_a0 = 0xf8, 155 + .sq_read_base_addr_a1 = 0xf9, 156 + .card_revision = 0xc8, 157 + .card_fw_status0 = 0xe8, 158 + .card_fw_status1 = 0xe9, 159 + .card_rx_len = 0xea, 160 + .card_rx_unit = 0xeb, 161 + .io_port_0 = 0xe4, 162 + .io_port_1 = 0xe5, 163 + .io_port_2 = 0xe6, 164 + .int_read_to_clear = true, 165 + .host_int_rsr = 0x04, 166 + .card_misc_cfg = 0xD8, 167 + .fw_dump_ctrl = 0xf0, 168 + .fw_dump_start = 0xf1, 169 + .fw_dump_end = 0xf8, 170 + }; 171 + 149 172 static const struct btmrvl_sdio_device btmrvl_sdio_sd8688 = { 150 173 .helper = "mrvl/sd8688_helper.bin", 151 174 .firmware = "mrvl/sd8688.bin", ··· 214 191 .supports_fw_dump = true, 215 192 }; 216 193 194 + static const struct btmrvl_sdio_device btmrvl_sdio_sd8997 = { 195 + .helper = NULL, 196 + .firmware = "mrvl/sd8997_uapsta.bin", 197 + .reg = &btmrvl_reg_8997, 198 + .support_pscan_win_report = true, 199 + .sd_blksz_fw_dl = 256, 200 + .supports_fw_dump = true, 201 + }; 202 + 217 203 static const struct sdio_device_id btmrvl_sdio_ids[] = { 218 204 /* Marvell SD8688 Bluetooth device */ 219 205 { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x9105), 220 - .driver_data = (unsigned long) &btmrvl_sdio_sd8688 }, 206 + .driver_data = (unsigned long)&btmrvl_sdio_sd8688 }, 221 207 /* Marvell SD8787 Bluetooth device */ 222 208 { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x911A), 223 - .driver_data = (unsigned long) &btmrvl_sdio_sd8787 }, 209 + .driver_data = (unsigned long)&btmrvl_sdio_sd8787 }, 224 210 /* Marvell SD8787 Bluetooth AMP device */ 225 211 { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x911B), 226 - .driver_data = (unsigned long) &btmrvl_sdio_sd8787 }, 212 + .driver_data = (unsigned long)&btmrvl_sdio_sd8787 }, 227 213 /* Marvell SD8797 Bluetooth device */ 228 214 { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912A), 229 - .driver_data = (unsigned long) &btmrvl_sdio_sd8797 }, 215 + .driver_data = (unsigned long)&btmrvl_sdio_sd8797 }, 230 216 /* Marvell SD8887 Bluetooth device */ 231 217 { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x9136), 232 218 .driver_data = (unsigned long)&btmrvl_sdio_sd8887 }, 233 219 /* Marvell SD8897 Bluetooth device */ 234 220 { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x912E), 235 - .driver_data = (unsigned long) &btmrvl_sdio_sd8897 }, 221 + .driver_data = (unsigned long)&btmrvl_sdio_sd8897 }, 222 + /* Marvell SD8997 Bluetooth device */ 223 + { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, 0x9142), 224 + .driver_data = (unsigned long)&btmrvl_sdio_sd8997 }, 236 225 237 226 { } /* Terminating entry */ 238 227 }; ··· 654 619 655 620 /* Allocate buffer */ 656 621 skb = bt_skb_alloc(num_blocks * blksz + BTSDIO_DMA_ALIGN, GFP_ATOMIC); 657 - if (skb == NULL) { 622 + if (!skb) { 658 623 BT_ERR("No free skb"); 659 624 ret = -ENOMEM; 660 625 goto exit; ··· 1313 1278 1314 1279 if (memory_size == 0) { 1315 1280 BT_INFO("Firmware dump finished!"); 1281 + sdio_writeb(card->func, FW_DUMP_READ_DONE, 1282 + card->reg->fw_dump_ctrl, &ret); 1283 + if (ret) { 1284 + BT_ERR("SDIO Write MEMDUMP_FINISH ERR"); 1285 + goto done; 1286 + } 1316 1287 break; 1317 1288 } 1318 1289 ··· 1657 1616 MODULE_FIRMWARE("mrvl/sd8797_uapsta.bin"); 1658 1617 MODULE_FIRMWARE("mrvl/sd8887_uapsta.bin"); 1659 1618 MODULE_FIRMWARE("mrvl/sd8897_uapsta.bin"); 1619 + MODULE_FIRMWARE("mrvl/sd8997_uapsta.bin");
+1 -13
drivers/bluetooth/btsdio.c
··· 194 194 195 195 BT_DBG("%s", hdev->name); 196 196 197 - if (test_and_set_bit(HCI_RUNNING, &hdev->flags)) 198 - return 0; 199 - 200 197 sdio_claim_host(data->func); 201 198 202 199 err = sdio_enable_func(data->func); 203 - if (err < 0) { 204 - clear_bit(HCI_RUNNING, &hdev->flags); 200 + if (err < 0) 205 201 goto release; 206 - } 207 202 208 203 err = sdio_claim_irq(data->func, btsdio_interrupt); 209 204 if (err < 0) { 210 205 sdio_disable_func(data->func); 211 - clear_bit(HCI_RUNNING, &hdev->flags); 212 206 goto release; 213 207 } 214 208 ··· 222 228 struct btsdio_data *data = hci_get_drvdata(hdev); 223 229 224 230 BT_DBG("%s", hdev->name); 225 - 226 - if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags)) 227 - return 0; 228 231 229 232 sdio_claim_host(data->func); 230 233 ··· 251 260 struct btsdio_data *data = hci_get_drvdata(hdev); 252 261 253 262 BT_DBG("%s", hdev->name); 254 - 255 - if (!test_bit(HCI_RUNNING, &hdev->flags)) 256 - return -EBUSY; 257 263 258 264 switch (bt_cb(skb)->pkt_type) { 259 265 case HCI_COMMAND_PKT:
+2 -8
drivers/bluetooth/btuart_cs.c
··· 38 38 #include <linux/serial.h> 39 39 #include <linux/serial_reg.h> 40 40 #include <linux/bitops.h> 41 - #include <asm/io.h> 41 + #include <linux/io.h> 42 42 43 43 #include <pcmcia/cistpl.h> 44 44 #include <pcmcia/ciscode.h> ··· 188 188 info->hdev->stat.byte_rx++; 189 189 190 190 /* Allocate packet */ 191 - if (info->rx_skb == NULL) { 191 + if (!info->rx_skb) { 192 192 info->rx_state = RECV_WAIT_PACKET_TYPE; 193 193 info->rx_count = 0; 194 194 info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC); ··· 223 223 /* Unknown packet */ 224 224 BT_ERR("Unknown HCI packet with type 0x%02x received", bt_cb(info->rx_skb)->pkt_type); 225 225 info->hdev->stat.err_rx++; 226 - clear_bit(HCI_RUNNING, &(info->hdev->flags)); 227 226 228 227 kfree_skb(info->rx_skb); 229 228 info->rx_skb = NULL; ··· 408 409 409 410 static int btuart_hci_open(struct hci_dev *hdev) 410 411 { 411 - set_bit(HCI_RUNNING, &(hdev->flags)); 412 - 413 412 return 0; 414 413 } 415 414 416 415 417 416 static int btuart_hci_close(struct hci_dev *hdev) 418 417 { 419 - if (!test_and_clear_bit(HCI_RUNNING, &(hdev->flags))) 420 - return 0; 421 - 422 418 btuart_hci_flush(hdev); 423 419 424 420 return 0;
-13
drivers/bluetooth/btusb.c
··· 940 940 941 941 data->intf->needs_remote_wakeup = 1; 942 942 943 - if (test_and_set_bit(HCI_RUNNING, &hdev->flags)) 944 - goto done; 945 - 946 943 if (test_and_set_bit(BTUSB_INTR_RUNNING, &data->flags)) 947 944 goto done; 948 945 ··· 962 965 963 966 failed: 964 967 clear_bit(BTUSB_INTR_RUNNING, &data->flags); 965 - clear_bit(HCI_RUNNING, &hdev->flags); 966 968 usb_autopm_put_interface(data->intf); 967 969 return err; 968 970 } ··· 979 983 int err; 980 984 981 985 BT_DBG("%s", hdev->name); 982 - 983 - if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags)) 984 - return 0; 985 986 986 987 cancel_work_sync(&data->work); 987 988 cancel_work_sync(&data->waker); ··· 1148 1155 struct urb *urb; 1149 1156 1150 1157 BT_DBG("%s", hdev->name); 1151 - 1152 - if (!test_bit(HCI_RUNNING, &hdev->flags)) 1153 - return -EBUSY; 1154 1158 1155 1159 switch (bt_cb(skb)->pkt_type) { 1156 1160 case HCI_COMMAND_PKT: ··· 1832 1842 struct urb *urb; 1833 1843 1834 1844 BT_DBG("%s", hdev->name); 1835 - 1836 - if (!test_bit(HCI_RUNNING, &hdev->flags)) 1837 - return -EBUSY; 1838 1845 1839 1846 switch (bt_cb(skb)->pkt_type) { 1840 1847 case HCI_COMMAND_PKT:
-13
drivers/bluetooth/btwilink.c
··· 155 155 156 156 BT_DBG("%s %p", hdev->name, hdev); 157 157 158 - if (test_and_set_bit(HCI_RUNNING, &hdev->flags)) 159 - return -EBUSY; 160 - 161 158 /* provide contexts for callbacks from ST */ 162 159 hst = hci_get_drvdata(hdev); 163 160 ··· 178 181 goto done; 179 182 180 183 if (err != -EINPROGRESS) { 181 - clear_bit(HCI_RUNNING, &hdev->flags); 182 184 BT_ERR("st_register failed %d", err); 183 185 return err; 184 186 } ··· 191 195 (&hst->wait_reg_completion, 192 196 msecs_to_jiffies(BT_REGISTER_TIMEOUT)); 193 197 if (!timeleft) { 194 - clear_bit(HCI_RUNNING, &hdev->flags); 195 198 BT_ERR("Timeout(%d sec),didn't get reg " 196 199 "completion signal from ST", 197 200 BT_REGISTER_TIMEOUT / 1000); ··· 200 205 /* Is ST registration callback 201 206 * called with ERROR status? */ 202 207 if (hst->reg_status != 0) { 203 - clear_bit(HCI_RUNNING, &hdev->flags); 204 208 BT_ERR("ST registration completed with invalid " 205 209 "status %d", hst->reg_status); 206 210 return -EAGAIN; ··· 209 215 hst->st_write = ti_st_proto[i].write; 210 216 if (!hst->st_write) { 211 217 BT_ERR("undefined ST write function"); 212 - clear_bit(HCI_RUNNING, &hdev->flags); 213 218 for (i = 0; i < MAX_BT_CHNL_IDS; i++) { 214 219 /* Undo registration with ST */ 215 220 err = st_unregister(&ti_st_proto[i]); ··· 229 236 int err, i; 230 237 struct ti_st *hst = hci_get_drvdata(hdev); 231 238 232 - if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags)) 233 - return 0; 234 - 235 239 for (i = MAX_BT_CHNL_IDS-1; i >= 0; i--) { 236 240 err = st_unregister(&ti_st_proto[i]); 237 241 if (err) ··· 245 255 { 246 256 struct ti_st *hst; 247 257 long len; 248 - 249 - if (!test_bit(HCI_RUNNING, &hdev->flags)) 250 - return -EBUSY; 251 258 252 259 hst = hci_get_drvdata(hdev); 253 260
-5
drivers/bluetooth/dtl1_cs.c
··· 357 357 358 358 static int dtl1_hci_open(struct hci_dev *hdev) 359 359 { 360 - set_bit(HCI_RUNNING, &(hdev->flags)); 361 - 362 360 return 0; 363 361 } 364 362 ··· 374 376 375 377 static int dtl1_hci_close(struct hci_dev *hdev) 376 378 { 377 - if (!test_and_clear_bit(HCI_RUNNING, &(hdev->flags))) 378 - return 0; 379 - 380 379 dtl1_hci_flush(hdev); 381 380 382 381 return 0;
+186 -55
drivers/bluetooth/hci_bcm.c
··· 32 32 #include <linux/gpio/consumer.h> 33 33 #include <linux/tty.h> 34 34 #include <linux/interrupt.h> 35 + #include <linux/dmi.h> 36 + #include <linux/pm_runtime.h> 35 37 36 38 #include <net/bluetooth/bluetooth.h> 37 39 #include <net/bluetooth/hci_core.h> 38 40 39 41 #include "btbcm.h" 40 42 #include "hci_uart.h" 43 + 44 + #define BCM_LM_DIAG_PKT 0x07 45 + #define BCM_LM_DIAG_SIZE 63 46 + 47 + #define BCM_AUTOSUSPEND_DELAY 5000 /* default autosleep delay */ 41 48 42 49 struct bcm_device { 43 50 struct list_head list; ··· 62 55 int irq; 63 56 u8 irq_polarity; 64 57 65 - #ifdef CONFIG_PM_SLEEP 58 + #ifdef CONFIG_PM 66 59 struct hci_uart *hu; 67 60 bool is_suspended; /* suspend/resume flag */ 68 61 #endif ··· 159 152 return 0; 160 153 } 161 154 162 - #ifdef CONFIG_PM_SLEEP 155 + #ifdef CONFIG_PM 163 156 static irqreturn_t bcm_host_wake(int irq, void *data) 164 157 { 165 158 struct bcm_device *bdev = data; 166 159 167 160 bt_dev_dbg(bdev, "Host wake IRQ"); 161 + 162 + pm_runtime_get(&bdev->pdev->dev); 163 + pm_runtime_mark_last_busy(&bdev->pdev->dev); 164 + pm_runtime_put_autosuspend(&bdev->pdev->dev); 168 165 169 166 return IRQ_HANDLED; 170 167 } ··· 193 182 goto unlock; 194 183 195 184 device_init_wakeup(&bdev->pdev->dev, true); 185 + 186 + pm_runtime_set_autosuspend_delay(&bdev->pdev->dev, 187 + BCM_AUTOSUSPEND_DELAY); 188 + pm_runtime_use_autosuspend(&bdev->pdev->dev); 189 + pm_runtime_set_active(&bdev->pdev->dev); 190 + pm_runtime_enable(&bdev->pdev->dev); 196 191 } 197 192 198 193 unlock: ··· 214 197 .bt_wake_active = 1, /* BT_WAKE active mode: 1 = high, 0 = low */ 215 198 .host_wake_active = 0, /* HOST_WAKE active mode: 1 = high, 0 = low */ 216 199 .allow_host_sleep = 1, /* Allow host sleep in SCO flag */ 217 - .combine_modes = 0, /* Combine sleep and LPM flag */ 200 + .combine_modes = 1, /* Combine sleep and LPM flag */ 218 201 .tristate_control = 0, /* Allow tri-state control of UART tx flag */ 219 202 /* Irrelevant USB flags */ 220 203 .usb_auto_sleep = 0, ··· 249 232 static inline int bcm_setup_sleep(struct hci_uart *hu) { return 0; } 250 233 #endif 251 234 235 + static int bcm_set_diag(struct hci_dev *hdev, bool enable) 236 + { 237 + struct hci_uart *hu = hci_get_drvdata(hdev); 238 + struct bcm_data *bcm = hu->priv; 239 + struct sk_buff *skb; 240 + 241 + if (!test_bit(HCI_RUNNING, &hdev->flags)) 242 + return -ENETDOWN; 243 + 244 + skb = bt_skb_alloc(3, GFP_KERNEL); 245 + if (IS_ERR(skb)) 246 + return PTR_ERR(skb); 247 + 248 + *skb_put(skb, 1) = BCM_LM_DIAG_PKT; 249 + *skb_put(skb, 1) = 0xf0; 250 + *skb_put(skb, 1) = enable; 251 + 252 + skb_queue_tail(&bcm->txq, skb); 253 + hci_uart_tx_wakeup(hu); 254 + 255 + return 0; 256 + } 257 + 252 258 static int bcm_open(struct hci_uart *hu) 253 259 { 254 260 struct bcm_data *bcm; ··· 298 258 if (hu->tty->dev->parent == dev->pdev->dev.parent) { 299 259 bcm->dev = dev; 300 260 hu->init_speed = dev->init_speed; 301 - #ifdef CONFIG_PM_SLEEP 261 + #ifdef CONFIG_PM 302 262 dev->hu = hu; 303 263 #endif 304 264 bcm_gpio_set_power(bcm->dev, true); ··· 322 282 mutex_lock(&bcm_device_lock); 323 283 if (bcm_device_exists(bdev)) { 324 284 bcm_gpio_set_power(bdev, false); 325 - #ifdef CONFIG_PM_SLEEP 285 + #ifdef CONFIG_PM 286 + pm_runtime_disable(&bdev->pdev->dev); 287 + pm_runtime_set_suspended(&bdev->pdev->dev); 288 + 326 289 if (device_can_wakeup(&bdev->pdev->dev)) { 327 290 devm_free_irq(&bdev->pdev->dev, bdev->irq, bdev); 328 291 device_init_wakeup(&bdev->pdev->dev, false); ··· 365 322 366 323 bt_dev_dbg(hu->hdev, "hu %p", hu); 367 324 325 + hu->hdev->set_diag = bcm_set_diag; 368 326 hu->hdev->set_bdaddr = btbcm_set_bdaddr; 369 327 370 328 err = btbcm_initialize(hu->hdev, fw_name, sizeof(fw_name)); ··· 423 379 return err; 424 380 } 425 381 382 + #define BCM_RECV_LM_DIAG \ 383 + .type = BCM_LM_DIAG_PKT, \ 384 + .hlen = BCM_LM_DIAG_SIZE, \ 385 + .loff = 0, \ 386 + .lsize = 0, \ 387 + .maxlen = BCM_LM_DIAG_SIZE 388 + 426 389 static const struct h4_recv_pkt bcm_recv_pkts[] = { 427 - { H4_RECV_ACL, .recv = hci_recv_frame }, 428 - { H4_RECV_SCO, .recv = hci_recv_frame }, 429 - { H4_RECV_EVENT, .recv = hci_recv_frame }, 390 + { H4_RECV_ACL, .recv = hci_recv_frame }, 391 + { H4_RECV_SCO, .recv = hci_recv_frame }, 392 + { H4_RECV_EVENT, .recv = hci_recv_frame }, 393 + { BCM_RECV_LM_DIAG, .recv = hci_recv_diag }, 430 394 }; 431 395 432 396 static int bcm_recv(struct hci_uart *hu, const void *data, int count) ··· 451 399 bt_dev_err(hu->hdev, "Frame reassembly failed (%d)", err); 452 400 bcm->rx_skb = NULL; 453 401 return err; 402 + } else if (!bcm->rx_skb) { 403 + /* Delay auto-suspend when receiving completed packet */ 404 + mutex_lock(&bcm_device_lock); 405 + if (bcm->dev && bcm_device_exists(bcm->dev)) { 406 + pm_runtime_get(&bcm->dev->pdev->dev); 407 + pm_runtime_mark_last_busy(&bcm->dev->pdev->dev); 408 + pm_runtime_put_autosuspend(&bcm->dev->pdev->dev); 409 + } 410 + mutex_unlock(&bcm_device_lock); 454 411 } 455 412 456 413 return count; ··· 481 420 static struct sk_buff *bcm_dequeue(struct hci_uart *hu) 482 421 { 483 422 struct bcm_data *bcm = hu->priv; 423 + struct sk_buff *skb = NULL; 424 + struct bcm_device *bdev = NULL; 484 425 485 - return skb_dequeue(&bcm->txq); 426 + mutex_lock(&bcm_device_lock); 427 + 428 + if (bcm_device_exists(bcm->dev)) { 429 + bdev = bcm->dev; 430 + pm_runtime_get_sync(&bdev->pdev->dev); 431 + /* Shall be resumed here */ 432 + } 433 + 434 + skb = skb_dequeue(&bcm->txq); 435 + 436 + if (bdev) { 437 + pm_runtime_mark_last_busy(&bdev->pdev->dev); 438 + pm_runtime_put_autosuspend(&bdev->pdev->dev); 439 + } 440 + 441 + mutex_unlock(&bcm_device_lock); 442 + 443 + return skb; 486 444 } 445 + 446 + #ifdef CONFIG_PM 447 + static int bcm_suspend_device(struct device *dev) 448 + { 449 + struct bcm_device *bdev = platform_get_drvdata(to_platform_device(dev)); 450 + 451 + bt_dev_dbg(bdev, ""); 452 + 453 + if (!bdev->is_suspended && bdev->hu) { 454 + hci_uart_set_flow_control(bdev->hu, true); 455 + 456 + /* Once this returns, driver suspends BT via GPIO */ 457 + bdev->is_suspended = true; 458 + } 459 + 460 + /* Suspend the device */ 461 + if (bdev->device_wakeup) { 462 + gpiod_set_value(bdev->device_wakeup, false); 463 + bt_dev_dbg(bdev, "suspend, delaying 15 ms"); 464 + mdelay(15); 465 + } 466 + 467 + return 0; 468 + } 469 + 470 + static int bcm_resume_device(struct device *dev) 471 + { 472 + struct bcm_device *bdev = platform_get_drvdata(to_platform_device(dev)); 473 + 474 + bt_dev_dbg(bdev, ""); 475 + 476 + if (bdev->device_wakeup) { 477 + gpiod_set_value(bdev->device_wakeup, true); 478 + bt_dev_dbg(bdev, "resume, delaying 15 ms"); 479 + mdelay(15); 480 + } 481 + 482 + /* When this executes, the device has woken up already */ 483 + if (bdev->is_suspended && bdev->hu) { 484 + bdev->is_suspended = false; 485 + 486 + hci_uart_set_flow_control(bdev->hu, false); 487 + } 488 + 489 + return 0; 490 + } 491 + #endif 487 492 488 493 #ifdef CONFIG_PM_SLEEP 489 494 /* Platform suspend callback */ ··· 560 433 561 434 bt_dev_dbg(bdev, "suspend: is_suspended %d", bdev->is_suspended); 562 435 436 + /* bcm_suspend can be called at any time as long as platform device is 437 + * bound, so it should use bcm_device_lock to protect access to hci_uart 438 + * and device_wake-up GPIO. 439 + */ 563 440 mutex_lock(&bcm_device_lock); 564 441 565 442 if (!bdev->hu) 566 443 goto unlock; 567 444 568 - if (!bdev->is_suspended) { 569 - hci_uart_set_flow_control(bdev->hu, true); 570 - 571 - /* Once this callback returns, driver suspends BT via GPIO */ 572 - bdev->is_suspended = true; 573 - } 574 - 575 - /* Suspend the device */ 576 - if (bdev->device_wakeup) { 577 - gpiod_set_value(bdev->device_wakeup, false); 578 - bt_dev_dbg(bdev, "suspend, delaying 15 ms"); 579 - mdelay(15); 580 - } 445 + if (pm_runtime_active(dev)) 446 + bcm_suspend_device(dev); 581 447 582 448 if (device_may_wakeup(&bdev->pdev->dev)) { 583 449 error = enable_irq_wake(bdev->irq); ··· 591 471 592 472 bt_dev_dbg(bdev, "resume: is_suspended %d", bdev->is_suspended); 593 473 474 + /* bcm_resume can be called at any time as long as platform device is 475 + * bound, so it should use bcm_device_lock to protect access to hci_uart 476 + * and device_wake-up GPIO. 477 + */ 594 478 mutex_lock(&bcm_device_lock); 595 479 596 480 if (!bdev->hu) ··· 605 481 bt_dev_dbg(bdev, "BCM irq: disabled"); 606 482 } 607 483 608 - if (bdev->device_wakeup) { 609 - gpiod_set_value(bdev->device_wakeup, true); 610 - bt_dev_dbg(bdev, "resume, delaying 15 ms"); 611 - mdelay(15); 612 - } 613 - 614 - /* When this callback executes, the device has woken up already */ 615 - if (bdev->is_suspended) { 616 - bdev->is_suspended = false; 617 - 618 - hci_uart_set_flow_control(bdev->hu, false); 619 - } 484 + bcm_resume_device(dev); 620 485 621 486 unlock: 622 487 mutex_unlock(&bcm_device_lock); 488 + 489 + pm_runtime_disable(dev); 490 + pm_runtime_set_active(dev); 491 + pm_runtime_enable(dev); 623 492 624 493 return 0; 625 494 } ··· 630 513 }; 631 514 632 515 #ifdef CONFIG_ACPI 516 + static u8 acpi_active_low = ACPI_ACTIVE_LOW; 517 + 518 + /* IRQ polarity of some chipsets are not defined correctly in ACPI table. */ 519 + static const struct dmi_system_id bcm_wrong_irq_dmi_table[] = { 520 + { 521 + .ident = "Asus T100TA", 522 + .matches = { 523 + DMI_EXACT_MATCH(DMI_SYS_VENDOR, 524 + "ASUSTeK COMPUTER INC."), 525 + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T100TA"), 526 + }, 527 + .driver_data = &acpi_active_low, 528 + }, 529 + { } 530 + }; 531 + 633 532 static int bcm_resource(struct acpi_resource *ares, void *data) 634 533 { 635 534 struct bcm_device *dev = data; ··· 682 549 static int bcm_acpi_probe(struct bcm_device *dev) 683 550 { 684 551 struct platform_device *pdev = dev->pdev; 685 - const struct acpi_device_id *id; 686 - struct acpi_device *adev; 687 552 LIST_HEAD(resources); 553 + const struct dmi_system_id *dmi_id; 688 554 int ret; 689 - 690 - id = acpi_match_device(pdev->dev.driver->acpi_match_table, &pdev->dev); 691 - if (!id) 692 - return -ENODEV; 693 555 694 556 /* Retrieve GPIO data */ 695 557 dev->name = dev_name(&pdev->dev); ··· 730 602 } 731 603 732 604 /* Retrieve UART ACPI info */ 733 - adev = ACPI_COMPANION(&dev->pdev->dev); 734 - if (!adev) 735 - return 0; 605 + ret = acpi_dev_get_resources(ACPI_COMPANION(&dev->pdev->dev), 606 + &resources, bcm_resource, dev); 607 + if (ret < 0) 608 + return ret; 609 + acpi_dev_free_resource_list(&resources); 736 610 737 - acpi_dev_get_resources(adev, &resources, bcm_resource, dev); 611 + dmi_id = dmi_first_match(bcm_wrong_irq_dmi_table); 612 + if (dmi_id) { 613 + bt_dev_warn(dev, "%s: Overwriting IRQ polarity to active low", 614 + dmi_id->ident); 615 + dev->irq_polarity = *(u8 *)dmi_id->driver_data; 616 + } 738 617 739 618 return 0; 740 619 } ··· 755 620 static int bcm_probe(struct platform_device *pdev) 756 621 { 757 622 struct bcm_device *dev; 758 - struct acpi_device_id *pdata = pdev->dev.platform_data; 759 623 int ret; 760 624 761 625 dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL); ··· 763 629 764 630 dev->pdev = pdev; 765 631 766 - if (ACPI_HANDLE(&pdev->dev)) { 767 - ret = bcm_acpi_probe(dev); 768 - if (ret) 769 - return ret; 770 - } else if (pdata) { 771 - dev->name = pdata->id; 772 - } else { 773 - return -ENODEV; 774 - } 632 + ret = bcm_acpi_probe(dev); 633 + if (ret) 634 + return ret; 775 635 776 636 platform_set_drvdata(pdev, dev); 777 637 ··· 821 693 #endif 822 694 823 695 /* Platform suspend and resume callbacks */ 824 - static SIMPLE_DEV_PM_OPS(bcm_pm_ops, bcm_suspend, bcm_resume); 696 + static const struct dev_pm_ops bcm_pm_ops = { 697 + SET_SYSTEM_SLEEP_PM_OPS(bcm_suspend, bcm_resume) 698 + SET_RUNTIME_PM_OPS(bcm_suspend_device, bcm_resume_device, NULL) 699 + }; 825 700 826 701 static struct platform_driver bcm_driver = { 827 702 .probe = bcm_probe,
+1
drivers/bluetooth/hci_h4.c
··· 266 266 267 267 return skb; 268 268 } 269 + EXPORT_SYMBOL_GPL(h4_recv_buf);
+5 -5
drivers/bluetooth/hci_h5.c
··· 128 128 { 129 129 const unsigned char sync_req[] = { 0x01, 0x7e }; 130 130 unsigned char conf_req[] = { 0x03, 0xfc, 0x01 }; 131 - struct hci_uart *hu = (struct hci_uart *) arg; 131 + struct hci_uart *hu = (struct hci_uart *)arg; 132 132 struct h5 *h5 = hu->priv; 133 133 struct sk_buff *skb; 134 134 unsigned long flags; ··· 210 210 211 211 init_timer(&h5->timer); 212 212 h5->timer.function = h5_timed_event; 213 - h5->timer.data = (unsigned long) hu; 213 + h5->timer.data = (unsigned long)hu; 214 214 215 215 h5->tx_win = H5_TX_WIN_MAX; 216 216 ··· 453 453 return -ENOMEM; 454 454 } 455 455 456 - h5->rx_skb->dev = (void *) hu->hdev; 456 + h5->rx_skb->dev = (void *)hu->hdev; 457 457 458 458 return 0; 459 459 } ··· 696 696 } 697 697 698 698 skb = skb_dequeue(&h5->unrel); 699 - if (skb != NULL) { 699 + if (skb) { 700 700 nskb = h5_prepare_pkt(hu, bt_cb(skb)->pkt_type, 701 701 skb->data, skb->len); 702 702 if (nskb) { ··· 714 714 goto unlock; 715 715 716 716 skb = skb_dequeue(&h5->rel); 717 - if (skb != NULL) { 717 + if (skb) { 718 718 nskb = h5_prepare_pkt(hu, bt_cb(skb)->pkt_type, 719 719 skb->data, skb->len); 720 720 if (nskb) {
-24
drivers/bluetooth/hci_intel.c
··· 1165 1165 { }, 1166 1166 }; 1167 1167 MODULE_DEVICE_TABLE(acpi, intel_acpi_match); 1168 - 1169 - static int intel_acpi_probe(struct intel_device *idev) 1170 - { 1171 - const struct acpi_device_id *id; 1172 - 1173 - id = acpi_match_device(intel_acpi_match, &idev->pdev->dev); 1174 - if (!id) 1175 - return -ENODEV; 1176 - 1177 - return 0; 1178 - } 1179 - #else 1180 - static int intel_acpi_probe(struct intel_device *idev) 1181 - { 1182 - return -ENODEV; 1183 - } 1184 1168 #endif 1185 1169 1186 1170 #ifdef CONFIG_PM ··· 1231 1247 mutex_init(&idev->hu_lock); 1232 1248 1233 1249 idev->pdev = pdev; 1234 - 1235 - if (ACPI_HANDLE(&pdev->dev)) { 1236 - int err = intel_acpi_probe(idev); 1237 - if (err) 1238 - return err; 1239 - } else { 1240 - return -ENODEV; 1241 - } 1242 1250 1243 1251 idev->reset = devm_gpiod_get_optional(&pdev->dev, "reset", 1244 1252 GPIOD_OUT_LOW);
+3 -14
drivers/bluetooth/hci_ldisc.c
··· 208 208 BT_DBG("%s %p", hdev->name, hdev); 209 209 210 210 /* Nothing to do for UART driver */ 211 - 212 - set_bit(HCI_RUNNING, &hdev->flags); 213 - 214 211 return 0; 215 212 } 216 213 ··· 238 241 { 239 242 BT_DBG("hdev %p", hdev); 240 243 241 - if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags)) 242 - return 0; 243 - 244 244 hci_uart_flush(hdev); 245 245 hdev->flush = NULL; 246 246 return 0; ··· 247 253 static int hci_uart_send_frame(struct hci_dev *hdev, struct sk_buff *skb) 248 254 { 249 255 struct hci_uart *hu = hci_get_drvdata(hdev); 250 - 251 - if (!test_bit(HCI_RUNNING, &hdev->flags)) 252 - return -EBUSY; 253 256 254 257 BT_DBG("%s: type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len); 255 258 ··· 461 470 INIT_WORK(&hu->init_ready, hci_uart_init_work); 462 471 INIT_WORK(&hu->write_work, hci_uart_write_work); 463 472 464 - spin_lock_init(&hu->rx_lock); 465 - 466 473 /* Flush any pending characters in the driver and line discipline. */ 467 474 468 475 /* FIXME: why is this needed. Note don't use ldisc_ref here as the ··· 558 569 if (!test_bit(HCI_UART_PROTO_SET, &hu->flags)) 559 570 return; 560 571 561 - spin_lock(&hu->rx_lock); 572 + /* It does not need a lock here as it is already protected by a mutex in 573 + * tty caller 574 + */ 562 575 hu->proto->recv(hu, data, count); 563 576 564 577 if (hu->hdev) 565 578 hu->hdev->stat.byte_rx += count; 566 - 567 - spin_unlock(&hu->rx_lock); 568 579 569 580 tty_unthrottle(tty); 570 581 }
+2 -2
drivers/bluetooth/hci_qca.c
··· 347 347 struct hci_uart *hu = (struct hci_uart *)arg; 348 348 struct qca_data *qca = hu->priv; 349 349 unsigned long flags, retrans_delay; 350 - unsigned long retransmit = 0; 350 + bool retransmit = false; 351 351 352 352 BT_DBG("hu %p wake retransmit timeout in %d state", 353 353 hu, qca->tx_ibs_state); ··· 358 358 switch (qca->tx_ibs_state) { 359 359 case HCI_IBS_TX_WAKING: 360 360 /* No WAKE_ACK, retransmit WAKE */ 361 - retransmit = 1; 361 + retransmit = true; 362 362 if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0) { 363 363 BT_ERR("Failed to acknowledge device wake up"); 364 364 break;
-1
drivers/bluetooth/hci_uart.h
··· 85 85 86 86 struct sk_buff *tx_skb; 87 87 unsigned long tx_state; 88 - spinlock_t rx_lock; 89 88 90 89 unsigned int init_speed; 91 90 unsigned int oper_speed;
-8
drivers/bluetooth/hci_vhci.c
··· 55 55 56 56 static int vhci_open_dev(struct hci_dev *hdev) 57 57 { 58 - set_bit(HCI_RUNNING, &hdev->flags); 59 - 60 58 return 0; 61 59 } 62 60 63 61 static int vhci_close_dev(struct hci_dev *hdev) 64 62 { 65 63 struct vhci_data *data = hci_get_drvdata(hdev); 66 - 67 - if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags)) 68 - return 0; 69 64 70 65 skb_queue_purge(&data->readq); 71 66 ··· 79 84 static int vhci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) 80 85 { 81 86 struct vhci_data *data = hci_get_drvdata(hdev); 82 - 83 - if (!test_bit(HCI_RUNNING, &hdev->flags)) 84 - return -EBUSY; 85 87 86 88 memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1); 87 89 skb_queue_tail(&data->readq, skb);
+1
drivers/net/ieee802154/Kconfig
··· 43 43 tristate "Microchip MRF24J40 transceiver driver" 44 44 depends on IEEE802154_DRIVERS && MAC802154 45 45 depends on SPI 46 + select REGMAP_SPI 46 47 ---help--- 47 48 Say Y here to enable the MRF24J20 SPI 802.15.4 wireless 48 49 controller.
+90 -123
drivers/net/ieee802154/at86rf230.c
··· 81 81 u8 from_state; 82 82 u8 to_state; 83 83 84 - bool irq_enable; 84 + bool free; 85 85 }; 86 86 87 87 struct at86rf230_trac { ··· 105 105 struct completion state_complete; 106 106 struct at86rf230_state_change state; 107 107 108 - struct at86rf230_state_change irq; 109 - 110 108 unsigned long cal_timeout; 111 109 bool is_tx; 112 110 bool is_tx_from_off; ··· 120 122 static void 121 123 at86rf230_async_state_change(struct at86rf230_local *lp, 122 124 struct at86rf230_state_change *ctx, 123 - const u8 state, void (*complete)(void *context), 124 - const bool irq_enable); 125 + const u8 state, void (*complete)(void *context)); 125 126 126 127 static inline void 127 128 at86rf230_sleep(struct at86rf230_local *lp) ··· 349 352 struct at86rf230_local *lp = ctx->lp; 350 353 351 354 lp->is_tx = 0; 352 - at86rf230_async_state_change(lp, ctx, STATE_RX_AACK_ON, NULL, false); 355 + at86rf230_async_state_change(lp, ctx, STATE_RX_AACK_ON, NULL); 353 356 ieee802154_wake_queue(lp->hw); 357 + if (ctx->free) 358 + kfree(ctx); 354 359 } 355 360 356 361 static inline void ··· 362 363 dev_err(&lp->spi->dev, "spi_async error %d\n", rc); 363 364 364 365 at86rf230_async_state_change(lp, ctx, STATE_FORCE_TRX_OFF, 365 - at86rf230_async_error_recover, false); 366 + at86rf230_async_error_recover); 366 367 } 367 368 368 369 /* Generic function to get some register value in async mode */ 369 370 static void 370 - at86rf230_async_read_reg(struct at86rf230_local *lp, const u8 reg, 371 + at86rf230_async_read_reg(struct at86rf230_local *lp, u8 reg, 371 372 struct at86rf230_state_change *ctx, 372 - void (*complete)(void *context), 373 - const bool irq_enable) 373 + void (*complete)(void *context)) 374 374 { 375 375 int rc; 376 376 ··· 377 379 378 380 tx_buf[0] = (reg & CMD_REG_MASK) | CMD_REG; 379 381 ctx->msg.complete = complete; 380 - ctx->irq_enable = irq_enable; 381 382 rc = spi_async(lp->spi, &ctx->msg); 382 - if (rc) { 383 - if (irq_enable) 384 - enable_irq(ctx->irq); 385 - 383 + if (rc) 386 384 at86rf230_async_error(lp, ctx, rc); 387 - } 385 + } 386 + 387 + static void 388 + at86rf230_async_write_reg(struct at86rf230_local *lp, u8 reg, u8 val, 389 + struct at86rf230_state_change *ctx, 390 + void (*complete)(void *context)) 391 + { 392 + int rc; 393 + 394 + ctx->buf[0] = (reg & CMD_REG_MASK) | CMD_REG | CMD_WRITE; 395 + ctx->buf[1] = val; 396 + ctx->msg.complete = complete; 397 + rc = spi_async(lp->spi, &ctx->msg); 398 + if (rc) 399 + at86rf230_async_error(lp, ctx, rc); 388 400 } 389 401 390 402 static void ··· 442 434 lp->tx_retry++; 443 435 444 436 at86rf230_async_state_change(lp, ctx, state, 445 - ctx->complete, 446 - ctx->irq_enable); 437 + ctx->complete); 447 438 return; 448 439 } 449 440 } ··· 463 456 struct at86rf230_local *lp = ctx->lp; 464 457 465 458 at86rf230_async_read_reg(lp, RG_TRX_STATUS, ctx, 466 - at86rf230_async_state_assert, 467 - ctx->irq_enable); 459 + at86rf230_async_state_assert); 468 460 469 461 return HRTIMER_NORESTART; 470 462 } ··· 568 562 struct at86rf230_local *lp = ctx->lp; 569 563 u8 *buf = ctx->buf; 570 564 const u8 trx_state = buf[1] & TRX_STATE_MASK; 571 - int rc; 572 565 573 566 /* Check for "possible" STATE_TRANSITION_IN_PROGRESS */ 574 567 if (trx_state == STATE_TRANSITION_IN_PROGRESS) { 575 568 udelay(1); 576 569 at86rf230_async_read_reg(lp, RG_TRX_STATUS, ctx, 577 - at86rf230_async_state_change_start, 578 - ctx->irq_enable); 570 + at86rf230_async_state_change_start); 579 571 return; 580 572 } 581 573 ··· 590 586 /* Going into the next step for a state change which do a timing 591 587 * relevant delay. 592 588 */ 593 - buf[0] = (RG_TRX_STATE & CMD_REG_MASK) | CMD_REG | CMD_WRITE; 594 - buf[1] = ctx->to_state; 595 - ctx->msg.complete = at86rf230_async_state_delay; 596 - rc = spi_async(lp->spi, &ctx->msg); 597 - if (rc) { 598 - if (ctx->irq_enable) 599 - enable_irq(ctx->irq); 600 - 601 - at86rf230_async_error(lp, ctx, rc); 602 - } 589 + at86rf230_async_write_reg(lp, RG_TRX_STATE, ctx->to_state, ctx, 590 + at86rf230_async_state_delay); 603 591 } 604 592 605 593 static void 606 594 at86rf230_async_state_change(struct at86rf230_local *lp, 607 595 struct at86rf230_state_change *ctx, 608 - const u8 state, void (*complete)(void *context), 609 - const bool irq_enable) 596 + const u8 state, void (*complete)(void *context)) 610 597 { 611 598 /* Initialization for the state change context */ 612 599 ctx->to_state = state; 613 600 ctx->complete = complete; 614 - ctx->irq_enable = irq_enable; 615 601 at86rf230_async_read_reg(lp, RG_TRX_STATUS, ctx, 616 - at86rf230_async_state_change_start, 617 - irq_enable); 602 + at86rf230_async_state_change_start); 618 603 } 619 604 620 605 static void ··· 625 632 unsigned long rc; 626 633 627 634 at86rf230_async_state_change(lp, &lp->state, state, 628 - at86rf230_sync_state_change_complete, 629 - false); 635 + at86rf230_sync_state_change_complete); 630 636 631 637 rc = wait_for_completion_timeout(&lp->state_complete, 632 638 msecs_to_jiffies(100)); ··· 643 651 struct at86rf230_state_change *ctx = context; 644 652 struct at86rf230_local *lp = ctx->lp; 645 653 646 - enable_irq(ctx->irq); 647 - 648 654 ieee802154_xmit_complete(lp->hw, lp->tx_skb, false); 655 + kfree(ctx); 649 656 } 650 657 651 658 static void ··· 654 663 struct at86rf230_local *lp = ctx->lp; 655 664 656 665 at86rf230_async_state_change(lp, ctx, STATE_RX_AACK_ON, 657 - at86rf230_tx_complete, true); 666 + at86rf230_tx_complete); 658 667 } 659 668 660 669 static void ··· 688 697 } 689 698 } 690 699 691 - at86rf230_async_state_change(lp, &lp->irq, STATE_TX_ON, 692 - at86rf230_tx_on, true); 700 + at86rf230_async_state_change(lp, ctx, STATE_TX_ON, at86rf230_tx_on); 693 701 } 694 702 695 703 static void ··· 696 706 { 697 707 struct at86rf230_state_change *ctx = context; 698 708 struct at86rf230_local *lp = ctx->lp; 699 - u8 rx_local_buf[AT86RF2XX_MAX_BUF]; 700 709 const u8 *buf = ctx->buf; 701 710 struct sk_buff *skb; 702 711 u8 len, lqi; ··· 707 718 } 708 719 lqi = buf[2 + len]; 709 720 710 - memcpy(rx_local_buf, buf + 2, len); 711 - ctx->trx.len = 2; 712 - enable_irq(ctx->irq); 713 - 714 721 skb = dev_alloc_skb(IEEE802154_MTU); 715 722 if (!skb) { 716 723 dev_vdbg(&lp->spi->dev, "failed to allocate sk_buff\n"); 724 + kfree(ctx); 717 725 return; 718 726 } 719 727 720 - memcpy(skb_put(skb, len), rx_local_buf, len); 728 + memcpy(skb_put(skb, len), buf + 2, len); 721 729 ieee802154_rx_irqsafe(lp->hw, skb, lqi); 730 + kfree(ctx); 722 731 } 723 732 724 733 static void ··· 752 765 rc = spi_async(lp->spi, &ctx->msg); 753 766 if (rc) { 754 767 ctx->trx.len = 2; 755 - enable_irq(ctx->irq); 756 768 at86rf230_async_error(lp, ctx, rc); 757 769 } 758 770 } 759 771 760 772 static void 761 - at86rf230_irq_trx_end(struct at86rf230_local *lp) 773 + at86rf230_irq_trx_end(void *context) 762 774 { 775 + struct at86rf230_state_change *ctx = context; 776 + struct at86rf230_local *lp = ctx->lp; 777 + 763 778 if (lp->is_tx) { 764 779 lp->is_tx = 0; 765 - at86rf230_async_read_reg(lp, RG_TRX_STATE, &lp->irq, 766 - at86rf230_tx_trac_check, true); 780 + at86rf230_async_read_reg(lp, RG_TRX_STATE, ctx, 781 + at86rf230_tx_trac_check); 767 782 } else { 768 - at86rf230_async_read_reg(lp, RG_TRX_STATE, &lp->irq, 769 - at86rf230_rx_trac_check, true); 783 + at86rf230_async_read_reg(lp, RG_TRX_STATE, ctx, 784 + at86rf230_rx_trac_check); 770 785 } 771 786 } 772 787 ··· 778 789 struct at86rf230_state_change *ctx = context; 779 790 struct at86rf230_local *lp = ctx->lp; 780 791 const u8 *buf = ctx->buf; 781 - const u8 irq = buf[1]; 792 + u8 irq = buf[1]; 793 + 794 + enable_irq(lp->spi->irq); 782 795 783 796 if (irq & IRQ_TRX_END) { 784 - at86rf230_irq_trx_end(lp); 797 + at86rf230_irq_trx_end(ctx); 785 798 } else { 786 - enable_irq(ctx->irq); 787 799 dev_err(&lp->spi->dev, "not supported irq %02x received\n", 788 800 irq); 801 + kfree(ctx); 789 802 } 803 + } 804 + 805 + static void 806 + at86rf230_setup_spi_messages(struct at86rf230_local *lp, 807 + struct at86rf230_state_change *state) 808 + { 809 + state->lp = lp; 810 + state->irq = lp->spi->irq; 811 + spi_message_init(&state->msg); 812 + state->msg.context = state; 813 + state->trx.len = 2; 814 + state->trx.tx_buf = state->buf; 815 + state->trx.rx_buf = state->buf; 816 + spi_message_add_tail(&state->trx, &state->msg); 817 + hrtimer_init(&state->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 818 + state->timer.function = at86rf230_async_state_timer; 790 819 } 791 820 792 821 static irqreturn_t at86rf230_isr(int irq, void *data) 793 822 { 794 823 struct at86rf230_local *lp = data; 795 - struct at86rf230_state_change *ctx = &lp->irq; 796 - u8 *buf = ctx->buf; 824 + struct at86rf230_state_change *ctx; 797 825 int rc; 798 826 799 827 disable_irq_nosync(irq); 800 828 801 - buf[0] = (RG_IRQ_STATUS & CMD_REG_MASK) | CMD_REG; 829 + ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC); 830 + if (!ctx) { 831 + enable_irq(irq); 832 + return IRQ_NONE; 833 + } 834 + 835 + at86rf230_setup_spi_messages(lp, ctx); 836 + /* tell on error handling to free ctx */ 837 + ctx->free = true; 838 + 839 + ctx->buf[0] = (RG_IRQ_STATUS & CMD_REG_MASK) | CMD_REG; 802 840 ctx->msg.complete = at86rf230_irq_status; 803 841 rc = spi_async(lp->spi, &ctx->msg); 804 842 if (rc) { 805 - enable_irq(irq); 806 843 at86rf230_async_error(lp, ctx, rc); 844 + enable_irq(irq); 807 845 return IRQ_NONE; 808 846 } 809 847 ··· 842 826 { 843 827 struct at86rf230_state_change *ctx = context; 844 828 struct at86rf230_local *lp = ctx->lp; 845 - u8 *buf = ctx->buf; 846 - int rc; 847 829 848 830 ctx->trx.len = 2; 849 831 850 - if (gpio_is_valid(lp->slp_tr)) { 832 + if (gpio_is_valid(lp->slp_tr)) 851 833 at86rf230_slp_tr_rising_edge(lp); 852 - } else { 853 - buf[0] = (RG_TRX_STATE & CMD_REG_MASK) | CMD_REG | CMD_WRITE; 854 - buf[1] = STATE_BUSY_TX; 855 - ctx->msg.complete = NULL; 856 - rc = spi_async(lp->spi, &ctx->msg); 857 - if (rc) 858 - at86rf230_async_error(lp, ctx, rc); 859 - } 834 + else 835 + at86rf230_async_write_reg(lp, RG_TRX_STATE, STATE_BUSY_TX, ctx, 836 + NULL); 860 837 } 861 838 862 839 static void ··· 882 873 struct at86rf230_local *lp = ctx->lp; 883 874 884 875 at86rf230_async_state_change(lp, ctx, STATE_TX_ARET_ON, 885 - at86rf230_write_frame, false); 876 + at86rf230_write_frame); 886 877 } 887 878 888 879 static void ··· 895 886 if (lp->is_tx_from_off) { 896 887 lp->is_tx_from_off = false; 897 888 at86rf230_async_state_change(lp, ctx, STATE_TX_ARET_ON, 898 - at86rf230_write_frame, 899 - false); 889 + at86rf230_write_frame); 900 890 } else { 901 891 at86rf230_async_state_change(lp, ctx, STATE_TX_ON, 902 - at86rf230_xmit_tx_on, 903 - false); 892 + at86rf230_xmit_tx_on); 904 893 } 905 894 } 906 895 ··· 921 914 if (time_is_before_jiffies(lp->cal_timeout)) { 922 915 lp->is_tx_from_off = true; 923 916 at86rf230_async_state_change(lp, ctx, STATE_TRX_OFF, 924 - at86rf230_xmit_start, false); 917 + at86rf230_xmit_start); 925 918 } else { 926 919 at86rf230_xmit_start(ctx); 927 920 } ··· 1380 1373 return rc; 1381 1374 1382 1375 irq_type = irq_get_trigger_type(lp->spi->irq); 1383 - if (irq_type == IRQ_TYPE_EDGE_RISING || 1384 - irq_type == IRQ_TYPE_EDGE_FALLING) 1385 - dev_warn(&lp->spi->dev, 1386 - "Using edge triggered irq's are not recommended, because it can cause races and result in a non-functional driver!\n"); 1387 1376 if (irq_type == IRQ_TYPE_EDGE_FALLING || 1388 1377 irq_type == IRQ_TYPE_LEVEL_LOW) 1389 1378 irq_pol = IRQ_ACTIVE_LOW; ··· 1605 1602 return rc; 1606 1603 } 1607 1604 1608 - static void 1609 - at86rf230_setup_spi_messages(struct at86rf230_local *lp) 1610 - { 1611 - lp->state.lp = lp; 1612 - lp->state.irq = lp->spi->irq; 1613 - spi_message_init(&lp->state.msg); 1614 - lp->state.msg.context = &lp->state; 1615 - lp->state.trx.len = 2; 1616 - lp->state.trx.tx_buf = lp->state.buf; 1617 - lp->state.trx.rx_buf = lp->state.buf; 1618 - spi_message_add_tail(&lp->state.trx, &lp->state.msg); 1619 - hrtimer_init(&lp->state.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1620 - lp->state.timer.function = at86rf230_async_state_timer; 1621 - 1622 - lp->irq.lp = lp; 1623 - lp->irq.irq = lp->spi->irq; 1624 - spi_message_init(&lp->irq.msg); 1625 - lp->irq.msg.context = &lp->irq; 1626 - lp->irq.trx.len = 2; 1627 - lp->irq.trx.tx_buf = lp->irq.buf; 1628 - lp->irq.trx.rx_buf = lp->irq.buf; 1629 - spi_message_add_tail(&lp->irq.trx, &lp->irq.msg); 1630 - hrtimer_init(&lp->irq.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1631 - lp->irq.timer.function = at86rf230_async_state_timer; 1632 - 1633 - lp->tx.lp = lp; 1634 - lp->tx.irq = lp->spi->irq; 1635 - spi_message_init(&lp->tx.msg); 1636 - lp->tx.msg.context = &lp->tx; 1637 - lp->tx.trx.len = 2; 1638 - lp->tx.trx.tx_buf = lp->tx.buf; 1639 - lp->tx.trx.rx_buf = lp->tx.buf; 1640 - spi_message_add_tail(&lp->tx.trx, &lp->tx.msg); 1641 - hrtimer_init(&lp->tx.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 1642 - lp->tx.timer.function = at86rf230_async_state_timer; 1643 - } 1644 - 1645 1605 #ifdef CONFIG_IEEE802154_AT86RF230_DEBUGFS 1646 1606 static struct dentry *at86rf230_debugfs_root; 1647 1607 ··· 1726 1760 goto free_dev; 1727 1761 } 1728 1762 1729 - at86rf230_setup_spi_messages(lp); 1763 + at86rf230_setup_spi_messages(lp, &lp->state); 1764 + at86rf230_setup_spi_messages(lp, &lp->tx); 1730 1765 1731 1766 rc = at86rf230_detect_device(lp); 1732 1767 if (rc < 0)
+1011 -441
drivers/net/ieee802154/mrf24j40.c
··· 18 18 #include <linux/spi/spi.h> 19 19 #include <linux/interrupt.h> 20 20 #include <linux/module.h> 21 + #include <linux/regmap.h> 21 22 #include <linux/ieee802154.h> 23 + #include <linux/irq.h> 22 24 #include <net/cfg802154.h> 23 25 #include <net/mac802154.h> 24 26 25 27 /* MRF24J40 Short Address Registers */ 26 - #define REG_RXMCR 0x00 /* Receive MAC control */ 27 - #define REG_PANIDL 0x01 /* PAN ID (low) */ 28 - #define REG_PANIDH 0x02 /* PAN ID (high) */ 29 - #define REG_SADRL 0x03 /* Short address (low) */ 30 - #define REG_SADRH 0x04 /* Short address (high) */ 31 - #define REG_EADR0 0x05 /* Long address (low) (high is EADR7) */ 32 - #define REG_TXMCR 0x11 /* Transmit MAC control */ 33 - #define REG_PACON0 0x16 /* Power Amplifier Control */ 34 - #define REG_PACON1 0x17 /* Power Amplifier Control */ 35 - #define REG_PACON2 0x18 /* Power Amplifier Control */ 36 - #define REG_TXNCON 0x1B /* Transmit Normal FIFO Control */ 37 - #define REG_TXSTAT 0x24 /* TX MAC Status Register */ 38 - #define REG_SOFTRST 0x2A /* Soft Reset */ 39 - #define REG_TXSTBL 0x2E /* TX Stabilization */ 40 - #define REG_INTSTAT 0x31 /* Interrupt Status */ 41 - #define REG_INTCON 0x32 /* Interrupt Control */ 42 - #define REG_GPIO 0x33 /* GPIO */ 43 - #define REG_TRISGPIO 0x34 /* GPIO direction */ 44 - #define REG_RFCTL 0x36 /* RF Control Mode Register */ 45 - #define REG_BBREG1 0x39 /* Baseband Registers */ 46 - #define REG_BBREG2 0x3A /* */ 47 - #define REG_BBREG6 0x3E /* */ 48 - #define REG_CCAEDTH 0x3F /* Energy Detection Threshold */ 28 + #define REG_RXMCR 0x00 /* Receive MAC control */ 29 + #define BIT_PROMI BIT(0) 30 + #define BIT_ERRPKT BIT(1) 31 + #define BIT_NOACKRSP BIT(5) 32 + #define BIT_PANCOORD BIT(3) 33 + 34 + #define REG_PANIDL 0x01 /* PAN ID (low) */ 35 + #define REG_PANIDH 0x02 /* PAN ID (high) */ 36 + #define REG_SADRL 0x03 /* Short address (low) */ 37 + #define REG_SADRH 0x04 /* Short address (high) */ 38 + #define REG_EADR0 0x05 /* Long address (low) (high is EADR7) */ 39 + #define REG_EADR1 0x06 40 + #define REG_EADR2 0x07 41 + #define REG_EADR3 0x08 42 + #define REG_EADR4 0x09 43 + #define REG_EADR5 0x0A 44 + #define REG_EADR6 0x0B 45 + #define REG_EADR7 0x0C 46 + #define REG_RXFLUSH 0x0D 47 + #define REG_ORDER 0x10 48 + #define REG_TXMCR 0x11 /* Transmit MAC control */ 49 + #define TXMCR_MIN_BE_SHIFT 3 50 + #define TXMCR_MIN_BE_MASK 0x18 51 + #define TXMCR_CSMA_RETRIES_SHIFT 0 52 + #define TXMCR_CSMA_RETRIES_MASK 0x07 53 + 54 + #define REG_ACKTMOUT 0x12 55 + #define REG_ESLOTG1 0x13 56 + #define REG_SYMTICKL 0x14 57 + #define REG_SYMTICKH 0x15 58 + #define REG_PACON0 0x16 /* Power Amplifier Control */ 59 + #define REG_PACON1 0x17 /* Power Amplifier Control */ 60 + #define REG_PACON2 0x18 /* Power Amplifier Control */ 61 + #define REG_TXBCON0 0x1A 62 + #define REG_TXNCON 0x1B /* Transmit Normal FIFO Control */ 63 + #define BIT_TXNTRIG BIT(0) 64 + #define BIT_TXNACKREQ BIT(2) 65 + 66 + #define REG_TXG1CON 0x1C 67 + #define REG_TXG2CON 0x1D 68 + #define REG_ESLOTG23 0x1E 69 + #define REG_ESLOTG45 0x1F 70 + #define REG_ESLOTG67 0x20 71 + #define REG_TXPEND 0x21 72 + #define REG_WAKECON 0x22 73 + #define REG_FROMOFFSET 0x23 74 + #define REG_TXSTAT 0x24 /* TX MAC Status Register */ 75 + #define REG_TXBCON1 0x25 76 + #define REG_GATECLK 0x26 77 + #define REG_TXTIME 0x27 78 + #define REG_HSYMTMRL 0x28 79 + #define REG_HSYMTMRH 0x29 80 + #define REG_SOFTRST 0x2A /* Soft Reset */ 81 + #define REG_SECCON0 0x2C 82 + #define REG_SECCON1 0x2D 83 + #define REG_TXSTBL 0x2E /* TX Stabilization */ 84 + #define REG_RXSR 0x30 85 + #define REG_INTSTAT 0x31 /* Interrupt Status */ 86 + #define BIT_TXNIF BIT(0) 87 + #define BIT_RXIF BIT(3) 88 + 89 + #define REG_INTCON 0x32 /* Interrupt Control */ 90 + #define BIT_TXNIE BIT(0) 91 + #define BIT_RXIE BIT(3) 92 + 93 + #define REG_GPIO 0x33 /* GPIO */ 94 + #define REG_TRISGPIO 0x34 /* GPIO direction */ 95 + #define REG_SLPACK 0x35 96 + #define REG_RFCTL 0x36 /* RF Control Mode Register */ 97 + #define BIT_RFRST BIT(2) 98 + 99 + #define REG_SECCR2 0x37 100 + #define REG_BBREG0 0x38 101 + #define REG_BBREG1 0x39 /* Baseband Registers */ 102 + #define BIT_RXDECINV BIT(2) 103 + 104 + #define REG_BBREG2 0x3A /* */ 105 + #define BBREG2_CCA_MODE_SHIFT 6 106 + #define BBREG2_CCA_MODE_MASK 0xc0 107 + 108 + #define REG_BBREG3 0x3B 109 + #define REG_BBREG4 0x3C 110 + #define REG_BBREG6 0x3E /* */ 111 + #define REG_CCAEDTH 0x3F /* Energy Detection Threshold */ 49 112 50 113 /* MRF24J40 Long Address Registers */ 51 - #define REG_RFCON0 0x200 /* RF Control Registers */ 52 - #define REG_RFCON1 0x201 53 - #define REG_RFCON2 0x202 54 - #define REG_RFCON3 0x203 55 - #define REG_RFCON5 0x205 56 - #define REG_RFCON6 0x206 57 - #define REG_RFCON7 0x207 58 - #define REG_RFCON8 0x208 59 - #define REG_RSSI 0x210 60 - #define REG_SLPCON0 0x211 /* Sleep Clock Control Registers */ 61 - #define REG_SLPCON1 0x220 62 - #define REG_WAKETIMEL 0x222 /* Wake-up Time Match Value Low */ 63 - #define REG_WAKETIMEH 0x223 /* Wake-up Time Match Value High */ 64 - #define REG_TESTMODE 0x22F /* Test mode */ 65 - #define REG_RX_FIFO 0x300 /* Receive FIFO */ 114 + #define REG_RFCON0 0x200 /* RF Control Registers */ 115 + #define RFCON0_CH_SHIFT 4 116 + #define RFCON0_CH_MASK 0xf0 117 + #define RFOPT_RECOMMEND 3 118 + 119 + #define REG_RFCON1 0x201 120 + #define REG_RFCON2 0x202 121 + #define REG_RFCON3 0x203 122 + 123 + #define TXPWRL_MASK 0xc0 124 + #define TXPWRL_SHIFT 6 125 + #define TXPWRL_30 0x3 126 + #define TXPWRL_20 0x2 127 + #define TXPWRL_10 0x1 128 + #define TXPWRL_0 0x0 129 + 130 + #define TXPWRS_MASK 0x38 131 + #define TXPWRS_SHIFT 3 132 + #define TXPWRS_6_3 0x7 133 + #define TXPWRS_4_9 0x6 134 + #define TXPWRS_3_7 0x5 135 + #define TXPWRS_2_8 0x4 136 + #define TXPWRS_1_9 0x3 137 + #define TXPWRS_1_2 0x2 138 + #define TXPWRS_0_5 0x1 139 + #define TXPWRS_0 0x0 140 + 141 + #define REG_RFCON5 0x205 142 + #define REG_RFCON6 0x206 143 + #define REG_RFCON7 0x207 144 + #define REG_RFCON8 0x208 145 + #define REG_SLPCAL0 0x209 146 + #define REG_SLPCAL1 0x20A 147 + #define REG_SLPCAL2 0x20B 148 + #define REG_RFSTATE 0x20F 149 + #define REG_RSSI 0x210 150 + #define REG_SLPCON0 0x211 /* Sleep Clock Control Registers */ 151 + #define BIT_INTEDGE BIT(1) 152 + 153 + #define REG_SLPCON1 0x220 154 + #define REG_WAKETIMEL 0x222 /* Wake-up Time Match Value Low */ 155 + #define REG_WAKETIMEH 0x223 /* Wake-up Time Match Value High */ 156 + #define REG_REMCNTL 0x224 157 + #define REG_REMCNTH 0x225 158 + #define REG_MAINCNT0 0x226 159 + #define REG_MAINCNT1 0x227 160 + #define REG_MAINCNT2 0x228 161 + #define REG_MAINCNT3 0x229 162 + #define REG_TESTMODE 0x22F /* Test mode */ 163 + #define REG_ASSOEAR0 0x230 164 + #define REG_ASSOEAR1 0x231 165 + #define REG_ASSOEAR2 0x232 166 + #define REG_ASSOEAR3 0x233 167 + #define REG_ASSOEAR4 0x234 168 + #define REG_ASSOEAR5 0x235 169 + #define REG_ASSOEAR6 0x236 170 + #define REG_ASSOEAR7 0x237 171 + #define REG_ASSOSAR0 0x238 172 + #define REG_ASSOSAR1 0x239 173 + #define REG_UNONCE0 0x240 174 + #define REG_UNONCE1 0x241 175 + #define REG_UNONCE2 0x242 176 + #define REG_UNONCE3 0x243 177 + #define REG_UNONCE4 0x244 178 + #define REG_UNONCE5 0x245 179 + #define REG_UNONCE6 0x246 180 + #define REG_UNONCE7 0x247 181 + #define REG_UNONCE8 0x248 182 + #define REG_UNONCE9 0x249 183 + #define REG_UNONCE10 0x24A 184 + #define REG_UNONCE11 0x24B 185 + #define REG_UNONCE12 0x24C 186 + #define REG_RX_FIFO 0x300 /* Receive FIFO */ 66 187 67 188 /* Device configuration: Only channels 11-26 on page 0 are supported. */ 68 189 #define MRF24J40_CHAN_MIN 11 ··· 202 81 struct spi_device *spi; 203 82 struct ieee802154_hw *hw; 204 83 205 - struct mutex buffer_mutex; /* only used to protect buf */ 206 - struct completion tx_complete; 207 - u8 *buf; /* 3 bytes. Used for SPI single-register transfers. */ 84 + struct regmap *regmap_short; 85 + struct regmap *regmap_long; 86 + 87 + /* for writing txfifo */ 88 + struct spi_message tx_msg; 89 + u8 tx_hdr_buf[2]; 90 + struct spi_transfer tx_hdr_trx; 91 + u8 tx_len_buf[2]; 92 + struct spi_transfer tx_len_trx; 93 + struct spi_transfer tx_buf_trx; 94 + struct sk_buff *tx_skb; 95 + 96 + /* post transmit message to send frame out */ 97 + struct spi_message tx_post_msg; 98 + u8 tx_post_buf[2]; 99 + struct spi_transfer tx_post_trx; 100 + 101 + /* for protect/unprotect/read length rxfifo */ 102 + struct spi_message rx_msg; 103 + u8 rx_buf[3]; 104 + struct spi_transfer rx_trx; 105 + 106 + /* receive handling */ 107 + struct spi_message rx_buf_msg; 108 + u8 rx_addr_buf[2]; 109 + struct spi_transfer rx_addr_trx; 110 + u8 rx_lqi_buf[2]; 111 + struct spi_transfer rx_lqi_trx; 112 + u8 rx_fifo_buf[RX_FIFO_SIZE]; 113 + struct spi_transfer rx_fifo_buf_trx; 114 + 115 + /* isr handling for reading intstat */ 116 + struct spi_message irq_msg; 117 + u8 irq_buf[2]; 118 + struct spi_transfer irq_trx; 208 119 }; 120 + 121 + /* regmap information for short address register access */ 122 + #define MRF24J40_SHORT_WRITE 0x01 123 + #define MRF24J40_SHORT_READ 0x00 124 + #define MRF24J40_SHORT_NUMREGS 0x3F 125 + 126 + /* regmap information for long address register access */ 127 + #define MRF24J40_LONG_ACCESS 0x80 128 + #define MRF24J40_LONG_NUMREGS 0x38F 209 129 210 130 /* Read/Write SPI Commands for Short and Long Address registers. */ 211 131 #define MRF24J40_READSHORT(reg) ((reg) << 1) ··· 259 97 260 98 #define printdev(X) (&X->spi->dev) 261 99 262 - static int write_short_reg(struct mrf24j40 *devrec, u8 reg, u8 value) 100 + static bool 101 + mrf24j40_short_reg_writeable(struct device *dev, unsigned int reg) 263 102 { 264 - int ret; 265 - struct spi_message msg; 266 - struct spi_transfer xfer = { 267 - .len = 2, 268 - .tx_buf = devrec->buf, 269 - .rx_buf = devrec->buf, 270 - }; 271 - 272 - spi_message_init(&msg); 273 - spi_message_add_tail(&xfer, &msg); 274 - 275 - mutex_lock(&devrec->buffer_mutex); 276 - devrec->buf[0] = MRF24J40_WRITESHORT(reg); 277 - devrec->buf[1] = value; 278 - 279 - ret = spi_sync(devrec->spi, &msg); 280 - if (ret) 281 - dev_err(printdev(devrec), 282 - "SPI write Failed for short register 0x%hhx\n", reg); 283 - 284 - mutex_unlock(&devrec->buffer_mutex); 285 - return ret; 103 + switch (reg) { 104 + case REG_RXMCR: 105 + case REG_PANIDL: 106 + case REG_PANIDH: 107 + case REG_SADRL: 108 + case REG_SADRH: 109 + case REG_EADR0: 110 + case REG_EADR1: 111 + case REG_EADR2: 112 + case REG_EADR3: 113 + case REG_EADR4: 114 + case REG_EADR5: 115 + case REG_EADR6: 116 + case REG_EADR7: 117 + case REG_RXFLUSH: 118 + case REG_ORDER: 119 + case REG_TXMCR: 120 + case REG_ACKTMOUT: 121 + case REG_ESLOTG1: 122 + case REG_SYMTICKL: 123 + case REG_SYMTICKH: 124 + case REG_PACON0: 125 + case REG_PACON1: 126 + case REG_PACON2: 127 + case REG_TXBCON0: 128 + case REG_TXNCON: 129 + case REG_TXG1CON: 130 + case REG_TXG2CON: 131 + case REG_ESLOTG23: 132 + case REG_ESLOTG45: 133 + case REG_ESLOTG67: 134 + case REG_TXPEND: 135 + case REG_WAKECON: 136 + case REG_FROMOFFSET: 137 + case REG_TXBCON1: 138 + case REG_GATECLK: 139 + case REG_TXTIME: 140 + case REG_HSYMTMRL: 141 + case REG_HSYMTMRH: 142 + case REG_SOFTRST: 143 + case REG_SECCON0: 144 + case REG_SECCON1: 145 + case REG_TXSTBL: 146 + case REG_RXSR: 147 + case REG_INTCON: 148 + case REG_TRISGPIO: 149 + case REG_GPIO: 150 + case REG_RFCTL: 151 + case REG_SLPACK: 152 + case REG_BBREG0: 153 + case REG_BBREG1: 154 + case REG_BBREG2: 155 + case REG_BBREG3: 156 + case REG_BBREG4: 157 + case REG_BBREG6: 158 + case REG_CCAEDTH: 159 + return true; 160 + default: 161 + return false; 162 + } 286 163 } 287 164 288 - static int read_short_reg(struct mrf24j40 *devrec, u8 reg, u8 *val) 165 + static bool 166 + mrf24j40_short_reg_readable(struct device *dev, unsigned int reg) 289 167 { 290 - int ret = -1; 291 - struct spi_message msg; 292 - struct spi_transfer xfer = { 293 - .len = 2, 294 - .tx_buf = devrec->buf, 295 - .rx_buf = devrec->buf, 296 - }; 168 + bool rc; 297 169 298 - spi_message_init(&msg); 299 - spi_message_add_tail(&xfer, &msg); 170 + /* all writeable are also readable */ 171 + rc = mrf24j40_short_reg_writeable(dev, reg); 172 + if (rc) 173 + return rc; 300 174 301 - mutex_lock(&devrec->buffer_mutex); 302 - devrec->buf[0] = MRF24J40_READSHORT(reg); 303 - devrec->buf[1] = 0; 304 - 305 - ret = spi_sync(devrec->spi, &msg); 306 - if (ret) 307 - dev_err(printdev(devrec), 308 - "SPI read Failed for short register 0x%hhx\n", reg); 309 - else 310 - *val = devrec->buf[1]; 311 - 312 - mutex_unlock(&devrec->buffer_mutex); 313 - return ret; 175 + /* readonly regs */ 176 + switch (reg) { 177 + case REG_TXSTAT: 178 + case REG_INTSTAT: 179 + return true; 180 + default: 181 + return false; 182 + } 314 183 } 315 184 316 - static int read_long_reg(struct mrf24j40 *devrec, u16 reg, u8 *value) 185 + static bool 186 + mrf24j40_short_reg_volatile(struct device *dev, unsigned int reg) 317 187 { 318 - int ret; 319 - u16 cmd; 320 - struct spi_message msg; 321 - struct spi_transfer xfer = { 322 - .len = 3, 323 - .tx_buf = devrec->buf, 324 - .rx_buf = devrec->buf, 325 - }; 326 - 327 - spi_message_init(&msg); 328 - spi_message_add_tail(&xfer, &msg); 329 - 330 - cmd = MRF24J40_READLONG(reg); 331 - mutex_lock(&devrec->buffer_mutex); 332 - devrec->buf[0] = cmd >> 8 & 0xff; 333 - devrec->buf[1] = cmd & 0xff; 334 - devrec->buf[2] = 0; 335 - 336 - ret = spi_sync(devrec->spi, &msg); 337 - if (ret) 338 - dev_err(printdev(devrec), 339 - "SPI read Failed for long register 0x%hx\n", reg); 340 - else 341 - *value = devrec->buf[2]; 342 - 343 - mutex_unlock(&devrec->buffer_mutex); 344 - return ret; 188 + /* can be changed during runtime */ 189 + switch (reg) { 190 + case REG_TXSTAT: 191 + case REG_INTSTAT: 192 + case REG_RXFLUSH: 193 + case REG_TXNCON: 194 + case REG_SOFTRST: 195 + case REG_RFCTL: 196 + case REG_TXBCON0: 197 + case REG_TXG1CON: 198 + case REG_TXG2CON: 199 + case REG_TXBCON1: 200 + case REG_SECCON0: 201 + case REG_RXSR: 202 + case REG_SLPACK: 203 + case REG_SECCR2: 204 + case REG_BBREG6: 205 + /* use them in spi_async and regmap so it's volatile */ 206 + case REG_BBREG1: 207 + return true; 208 + default: 209 + return false; 210 + } 345 211 } 346 212 347 - static int write_long_reg(struct mrf24j40 *devrec, u16 reg, u8 val) 213 + static bool 214 + mrf24j40_short_reg_precious(struct device *dev, unsigned int reg) 348 215 { 216 + /* don't clear irq line on read */ 217 + switch (reg) { 218 + case REG_INTSTAT: 219 + return true; 220 + default: 221 + return false; 222 + } 223 + } 224 + 225 + static const struct regmap_config mrf24j40_short_regmap = { 226 + .name = "mrf24j40_short", 227 + .reg_bits = 7, 228 + .val_bits = 8, 229 + .pad_bits = 1, 230 + .write_flag_mask = MRF24J40_SHORT_WRITE, 231 + .read_flag_mask = MRF24J40_SHORT_READ, 232 + .cache_type = REGCACHE_RBTREE, 233 + .max_register = MRF24J40_SHORT_NUMREGS, 234 + .writeable_reg = mrf24j40_short_reg_writeable, 235 + .readable_reg = mrf24j40_short_reg_readable, 236 + .volatile_reg = mrf24j40_short_reg_volatile, 237 + .precious_reg = mrf24j40_short_reg_precious, 238 + }; 239 + 240 + static bool 241 + mrf24j40_long_reg_writeable(struct device *dev, unsigned int reg) 242 + { 243 + switch (reg) { 244 + case REG_RFCON0: 245 + case REG_RFCON1: 246 + case REG_RFCON2: 247 + case REG_RFCON3: 248 + case REG_RFCON5: 249 + case REG_RFCON6: 250 + case REG_RFCON7: 251 + case REG_RFCON8: 252 + case REG_SLPCAL2: 253 + case REG_SLPCON0: 254 + case REG_SLPCON1: 255 + case REG_WAKETIMEL: 256 + case REG_WAKETIMEH: 257 + case REG_REMCNTL: 258 + case REG_REMCNTH: 259 + case REG_MAINCNT0: 260 + case REG_MAINCNT1: 261 + case REG_MAINCNT2: 262 + case REG_MAINCNT3: 263 + case REG_TESTMODE: 264 + case REG_ASSOEAR0: 265 + case REG_ASSOEAR1: 266 + case REG_ASSOEAR2: 267 + case REG_ASSOEAR3: 268 + case REG_ASSOEAR4: 269 + case REG_ASSOEAR5: 270 + case REG_ASSOEAR6: 271 + case REG_ASSOEAR7: 272 + case REG_ASSOSAR0: 273 + case REG_ASSOSAR1: 274 + case REG_UNONCE0: 275 + case REG_UNONCE1: 276 + case REG_UNONCE2: 277 + case REG_UNONCE3: 278 + case REG_UNONCE4: 279 + case REG_UNONCE5: 280 + case REG_UNONCE6: 281 + case REG_UNONCE7: 282 + case REG_UNONCE8: 283 + case REG_UNONCE9: 284 + case REG_UNONCE10: 285 + case REG_UNONCE11: 286 + case REG_UNONCE12: 287 + return true; 288 + default: 289 + return false; 290 + } 291 + } 292 + 293 + static bool 294 + mrf24j40_long_reg_readable(struct device *dev, unsigned int reg) 295 + { 296 + bool rc; 297 + 298 + /* all writeable are also readable */ 299 + rc = mrf24j40_long_reg_writeable(dev, reg); 300 + if (rc) 301 + return rc; 302 + 303 + /* readonly regs */ 304 + switch (reg) { 305 + case REG_SLPCAL0: 306 + case REG_SLPCAL1: 307 + case REG_RFSTATE: 308 + case REG_RSSI: 309 + return true; 310 + default: 311 + return false; 312 + } 313 + } 314 + 315 + static bool 316 + mrf24j40_long_reg_volatile(struct device *dev, unsigned int reg) 317 + { 318 + /* can be changed during runtime */ 319 + switch (reg) { 320 + case REG_SLPCAL0: 321 + case REG_SLPCAL1: 322 + case REG_SLPCAL2: 323 + case REG_RFSTATE: 324 + case REG_RSSI: 325 + case REG_MAINCNT3: 326 + return true; 327 + default: 328 + return false; 329 + } 330 + } 331 + 332 + static const struct regmap_config mrf24j40_long_regmap = { 333 + .name = "mrf24j40_long", 334 + .reg_bits = 11, 335 + .val_bits = 8, 336 + .pad_bits = 5, 337 + .write_flag_mask = MRF24J40_LONG_ACCESS, 338 + .read_flag_mask = MRF24J40_LONG_ACCESS, 339 + .cache_type = REGCACHE_RBTREE, 340 + .max_register = MRF24J40_LONG_NUMREGS, 341 + .writeable_reg = mrf24j40_long_reg_writeable, 342 + .readable_reg = mrf24j40_long_reg_readable, 343 + .volatile_reg = mrf24j40_long_reg_volatile, 344 + }; 345 + 346 + static int mrf24j40_long_regmap_write(void *context, const void *data, 347 + size_t count) 348 + { 349 + struct spi_device *spi = context; 350 + u8 buf[3]; 351 + 352 + if (count > 3) 353 + return -EINVAL; 354 + 355 + /* regmap supports read/write mask only in frist byte 356 + * long write access need to set the 12th bit, so we 357 + * make special handling for write. 358 + */ 359 + memcpy(buf, data, count); 360 + buf[1] |= (1 << 4); 361 + 362 + return spi_write(spi, buf, count); 363 + } 364 + 365 + static int 366 + mrf24j40_long_regmap_read(void *context, const void *reg, size_t reg_size, 367 + void *val, size_t val_size) 368 + { 369 + struct spi_device *spi = context; 370 + 371 + return spi_write_then_read(spi, reg, reg_size, val, val_size); 372 + } 373 + 374 + static const struct regmap_bus mrf24j40_long_regmap_bus = { 375 + .write = mrf24j40_long_regmap_write, 376 + .read = mrf24j40_long_regmap_read, 377 + .reg_format_endian_default = REGMAP_ENDIAN_BIG, 378 + .val_format_endian_default = REGMAP_ENDIAN_BIG, 379 + }; 380 + 381 + static void write_tx_buf_complete(void *context) 382 + { 383 + struct mrf24j40 *devrec = context; 384 + __le16 fc = ieee802154_get_fc_from_skb(devrec->tx_skb); 385 + u8 val = BIT_TXNTRIG; 349 386 int ret; 350 - u16 cmd; 351 - struct spi_message msg; 352 - struct spi_transfer xfer = { 353 - .len = 3, 354 - .tx_buf = devrec->buf, 355 - .rx_buf = devrec->buf, 356 - }; 357 387 358 - spi_message_init(&msg); 359 - spi_message_add_tail(&xfer, &msg); 388 + if (ieee802154_is_ackreq(fc)) 389 + val |= BIT_TXNACKREQ; 360 390 361 - cmd = MRF24J40_WRITELONG(reg); 362 - mutex_lock(&devrec->buffer_mutex); 363 - devrec->buf[0] = cmd >> 8 & 0xff; 364 - devrec->buf[1] = cmd & 0xff; 365 - devrec->buf[2] = val; 391 + devrec->tx_post_msg.complete = NULL; 392 + devrec->tx_post_buf[0] = MRF24J40_WRITESHORT(REG_TXNCON); 393 + devrec->tx_post_buf[1] = val; 366 394 367 - ret = spi_sync(devrec->spi, &msg); 395 + ret = spi_async(devrec->spi, &devrec->tx_post_msg); 368 396 if (ret) 369 - dev_err(printdev(devrec), 370 - "SPI write Failed for long register 0x%hx\n", reg); 371 - 372 - mutex_unlock(&devrec->buffer_mutex); 373 - return ret; 397 + dev_err(printdev(devrec), "SPI write Failed for transmit buf\n"); 374 398 } 375 399 376 400 /* This function relies on an undocumented write method. Once a write command ··· 565 217 static int write_tx_buf(struct mrf24j40 *devrec, u16 reg, 566 218 const u8 *data, size_t length) 567 219 { 568 - int ret; 569 220 u16 cmd; 570 - u8 lengths[2]; 571 - struct spi_message msg; 572 - struct spi_transfer addr_xfer = { 573 - .len = 2, 574 - .tx_buf = devrec->buf, 575 - }; 576 - struct spi_transfer lengths_xfer = { 577 - .len = 2, 578 - .tx_buf = &lengths, /* TODO: Is DMA really required for SPI? */ 579 - }; 580 - struct spi_transfer data_xfer = { 581 - .len = length, 582 - .tx_buf = data, 583 - }; 221 + int ret; 584 222 585 223 /* Range check the length. 2 bytes are used for the length fields.*/ 586 224 if (length > TX_FIFO_SIZE-2) { ··· 574 240 length = TX_FIFO_SIZE-2; 575 241 } 576 242 577 - spi_message_init(&msg); 578 - spi_message_add_tail(&addr_xfer, &msg); 579 - spi_message_add_tail(&lengths_xfer, &msg); 580 - spi_message_add_tail(&data_xfer, &msg); 581 - 582 243 cmd = MRF24J40_WRITELONG(reg); 583 - mutex_lock(&devrec->buffer_mutex); 584 - devrec->buf[0] = cmd >> 8 & 0xff; 585 - devrec->buf[1] = cmd & 0xff; 586 - lengths[0] = 0x0; /* Header Length. Set to 0 for now. TODO */ 587 - lengths[1] = length; /* Total length */ 244 + devrec->tx_hdr_buf[0] = cmd >> 8 & 0xff; 245 + devrec->tx_hdr_buf[1] = cmd & 0xff; 246 + devrec->tx_len_buf[0] = 0x0; /* Header Length. Set to 0 for now. TODO */ 247 + devrec->tx_len_buf[1] = length; /* Total length */ 248 + devrec->tx_buf_trx.tx_buf = data; 249 + devrec->tx_buf_trx.len = length; 588 250 589 - ret = spi_sync(devrec->spi, &msg); 251 + ret = spi_async(devrec->spi, &devrec->tx_msg); 590 252 if (ret) 591 253 dev_err(printdev(devrec), "SPI write Failed for TX buf\n"); 592 254 593 - mutex_unlock(&devrec->buffer_mutex); 594 - return ret; 595 - } 596 - 597 - static int mrf24j40_read_rx_buf(struct mrf24j40 *devrec, 598 - u8 *data, u8 *len, u8 *lqi) 599 - { 600 - u8 rx_len; 601 - u8 addr[2]; 602 - u8 lqi_rssi[2]; 603 - u16 cmd; 604 - int ret; 605 - struct spi_message msg; 606 - struct spi_transfer addr_xfer = { 607 - .len = 2, 608 - .tx_buf = &addr, 609 - }; 610 - struct spi_transfer data_xfer = { 611 - .len = 0x0, /* set below */ 612 - .rx_buf = data, 613 - }; 614 - struct spi_transfer status_xfer = { 615 - .len = 2, 616 - .rx_buf = &lqi_rssi, 617 - }; 618 - 619 - /* Get the length of the data in the RX FIFO. The length in this 620 - * register exclues the 1-byte length field at the beginning. */ 621 - ret = read_long_reg(devrec, REG_RX_FIFO, &rx_len); 622 - if (ret) 623 - goto out; 624 - 625 - /* Range check the RX FIFO length, accounting for the one-byte 626 - * length field at the beginning. */ 627 - if (rx_len > RX_FIFO_SIZE-1) { 628 - dev_err(printdev(devrec), "Invalid length read from device. Performing short read.\n"); 629 - rx_len = RX_FIFO_SIZE-1; 630 - } 631 - 632 - if (rx_len > *len) { 633 - /* Passed in buffer wasn't big enough. Should never happen. */ 634 - dev_err(printdev(devrec), "Buffer not big enough. Performing short read\n"); 635 - rx_len = *len; 636 - } 637 - 638 - /* Set up the commands to read the data. */ 639 - cmd = MRF24J40_READLONG(REG_RX_FIFO+1); 640 - addr[0] = cmd >> 8 & 0xff; 641 - addr[1] = cmd & 0xff; 642 - data_xfer.len = rx_len; 643 - 644 - spi_message_init(&msg); 645 - spi_message_add_tail(&addr_xfer, &msg); 646 - spi_message_add_tail(&data_xfer, &msg); 647 - spi_message_add_tail(&status_xfer, &msg); 648 - 649 - ret = spi_sync(devrec->spi, &msg); 650 - if (ret) { 651 - dev_err(printdev(devrec), "SPI RX Buffer Read Failed.\n"); 652 - goto out; 653 - } 654 - 655 - *lqi = lqi_rssi[0]; 656 - *len = rx_len; 657 - 658 - #ifdef DEBUG 659 - print_hex_dump(KERN_DEBUG, "mrf24j40 rx: ", 660 - DUMP_PREFIX_OFFSET, 16, 1, data, *len, 0); 661 - pr_debug("mrf24j40 rx: lqi: %02hhx rssi: %02hhx\n", 662 - lqi_rssi[0], lqi_rssi[1]); 663 - #endif 664 - 665 - out: 666 255 return ret; 667 256 } 668 257 669 258 static int mrf24j40_tx(struct ieee802154_hw *hw, struct sk_buff *skb) 670 259 { 671 260 struct mrf24j40 *devrec = hw->priv; 672 - u8 val; 673 - int ret = 0; 674 261 675 262 dev_dbg(printdev(devrec), "tx packet of %d bytes\n", skb->len); 263 + devrec->tx_skb = skb; 676 264 677 - ret = write_tx_buf(devrec, 0x000, skb->data, skb->len); 678 - if (ret) 679 - goto err; 680 - 681 - reinit_completion(&devrec->tx_complete); 682 - 683 - /* Set TXNTRIG bit of TXNCON to send packet */ 684 - ret = read_short_reg(devrec, REG_TXNCON, &val); 685 - if (ret) 686 - goto err; 687 - val |= 0x1; 688 - /* Set TXNACKREQ if the ACK bit is set in the packet. */ 689 - if (skb->data[0] & IEEE802154_FC_ACK_REQ) 690 - val |= 0x4; 691 - write_short_reg(devrec, REG_TXNCON, val); 692 - 693 - /* Wait for the device to send the TX complete interrupt. */ 694 - ret = wait_for_completion_interruptible_timeout( 695 - &devrec->tx_complete, 696 - 5 * HZ); 697 - if (ret == -ERESTARTSYS) 698 - goto err; 699 - if (ret == 0) { 700 - dev_warn(printdev(devrec), "Timeout waiting for TX interrupt\n"); 701 - ret = -ETIMEDOUT; 702 - goto err; 703 - } 704 - 705 - /* Check for send error from the device. */ 706 - ret = read_short_reg(devrec, REG_TXSTAT, &val); 707 - if (ret) 708 - goto err; 709 - if (val & 0x1) { 710 - dev_dbg(printdev(devrec), "Error Sending. Retry count exceeded\n"); 711 - ret = -ECOMM; /* TODO: Better error code ? */ 712 - } else 713 - dev_dbg(printdev(devrec), "Packet Sent\n"); 714 - 715 - err: 716 - 717 - return ret; 265 + return write_tx_buf(devrec, 0x000, skb->data, skb->len); 718 266 } 719 267 720 268 static int mrf24j40_ed(struct ieee802154_hw *hw, u8 *level) ··· 610 394 static int mrf24j40_start(struct ieee802154_hw *hw) 611 395 { 612 396 struct mrf24j40 *devrec = hw->priv; 613 - u8 val; 614 - int ret; 615 397 616 398 dev_dbg(printdev(devrec), "start\n"); 617 399 618 - ret = read_short_reg(devrec, REG_INTCON, &val); 619 - if (ret) 620 - return ret; 621 - val &= ~(0x1|0x8); /* Clear TXNIE and RXIE. Enable interrupts */ 622 - write_short_reg(devrec, REG_INTCON, val); 623 - 624 - return 0; 400 + /* Clear TXNIE and RXIE. Enable interrupts */ 401 + return regmap_update_bits(devrec->regmap_short, REG_INTCON, 402 + BIT_TXNIE | BIT_RXIE, 0); 625 403 } 626 404 627 405 static void mrf24j40_stop(struct ieee802154_hw *hw) 628 406 { 629 407 struct mrf24j40 *devrec = hw->priv; 630 - u8 val; 631 - int ret; 632 408 633 409 dev_dbg(printdev(devrec), "stop\n"); 634 410 635 - ret = read_short_reg(devrec, REG_INTCON, &val); 636 - if (ret) 637 - return; 638 - val |= 0x1|0x8; /* Set TXNIE and RXIE. Disable Interrupts */ 639 - write_short_reg(devrec, REG_INTCON, val); 411 + /* Set TXNIE and RXIE. Disable Interrupts */ 412 + regmap_update_bits(devrec->regmap_short, REG_INTCON, 413 + BIT_TXNIE | BIT_TXNIE, BIT_TXNIE | BIT_TXNIE); 640 414 } 641 415 642 416 static int mrf24j40_set_channel(struct ieee802154_hw *hw, u8 page, u8 channel) ··· 642 436 WARN_ON(channel > MRF24J40_CHAN_MAX); 643 437 644 438 /* Set Channel TODO */ 645 - val = (channel-11) << 4 | 0x03; 646 - write_long_reg(devrec, REG_RFCON0, val); 647 - 648 - /* RF Reset */ 649 - ret = read_short_reg(devrec, REG_RFCTL, &val); 439 + val = (channel - 11) << RFCON0_CH_SHIFT | RFOPT_RECOMMEND; 440 + ret = regmap_update_bits(devrec->regmap_long, REG_RFCON0, 441 + RFCON0_CH_MASK, val); 650 442 if (ret) 651 443 return ret; 652 - val |= 0x04; 653 - write_short_reg(devrec, REG_RFCTL, val); 654 - val &= ~0x04; 655 - write_short_reg(devrec, REG_RFCTL, val); 656 444 657 - udelay(SET_CHANNEL_DELAY_US); /* per datasheet */ 445 + /* RF Reset */ 446 + ret = regmap_update_bits(devrec->regmap_short, REG_RFCTL, BIT_RFRST, 447 + BIT_RFRST); 448 + if (ret) 449 + return ret; 658 450 659 - return 0; 451 + ret = regmap_update_bits(devrec->regmap_short, REG_RFCTL, BIT_RFRST, 0); 452 + if (!ret) 453 + udelay(SET_CHANNEL_DELAY_US); /* per datasheet */ 454 + 455 + return ret; 660 456 } 661 457 662 458 static int mrf24j40_filter(struct ieee802154_hw *hw, ··· 676 468 addrh = le16_to_cpu(filt->short_addr) >> 8 & 0xff; 677 469 addrl = le16_to_cpu(filt->short_addr) & 0xff; 678 470 679 - write_short_reg(devrec, REG_SADRH, addrh); 680 - write_short_reg(devrec, REG_SADRL, addrl); 471 + regmap_write(devrec->regmap_short, REG_SADRH, addrh); 472 + regmap_write(devrec->regmap_short, REG_SADRL, addrl); 681 473 dev_dbg(printdev(devrec), 682 474 "Set short addr to %04hx\n", filt->short_addr); 683 475 } ··· 688 480 689 481 memcpy(addr, &filt->ieee_addr, 8); 690 482 for (i = 0; i < 8; i++) 691 - write_short_reg(devrec, REG_EADR0 + i, addr[i]); 483 + regmap_write(devrec->regmap_short, REG_EADR0 + i, 484 + addr[i]); 692 485 693 486 #ifdef DEBUG 694 487 pr_debug("Set long addr to: "); ··· 705 496 706 497 panidh = le16_to_cpu(filt->pan_id) >> 8 & 0xff; 707 498 panidl = le16_to_cpu(filt->pan_id) & 0xff; 708 - write_short_reg(devrec, REG_PANIDH, panidh); 709 - write_short_reg(devrec, REG_PANIDL, panidl); 499 + regmap_write(devrec->regmap_short, REG_PANIDH, panidh); 500 + regmap_write(devrec->regmap_short, REG_PANIDL, panidl); 710 501 711 502 dev_dbg(printdev(devrec), "Set PANID to %04hx\n", filt->pan_id); 712 503 } ··· 716 507 u8 val; 717 508 int ret; 718 509 719 - ret = read_short_reg(devrec, REG_RXMCR, &val); 510 + if (filt->pan_coord) 511 + val = BIT_PANCOORD; 512 + else 513 + val = 0; 514 + ret = regmap_update_bits(devrec->regmap_short, REG_RXMCR, 515 + BIT_PANCOORD, val); 720 516 if (ret) 721 517 return ret; 722 - if (filt->pan_coord) 723 - val |= 0x8; 724 - else 725 - val &= ~0x8; 726 - write_short_reg(devrec, REG_RXMCR, val); 727 518 728 519 /* REG_SLOTTED is maintained as default (unslotted/CSMA-CA). 729 520 * REG_ORDER is maintained as default (no beacon/superframe). ··· 736 527 return 0; 737 528 } 738 529 739 - static int mrf24j40_handle_rx(struct mrf24j40 *devrec) 530 + static void mrf24j40_handle_rx_read_buf_unlock(struct mrf24j40 *devrec) 740 531 { 741 - u8 len = RX_FIFO_SIZE; 742 - u8 lqi = 0; 743 - u8 val; 744 - int ret = 0; 745 - int ret2; 532 + int ret; 533 + 534 + /* Turn back on reception of packets off the air. */ 535 + devrec->rx_msg.complete = NULL; 536 + devrec->rx_buf[0] = MRF24J40_WRITESHORT(REG_BBREG1); 537 + devrec->rx_buf[1] = 0x00; /* CLR RXDECINV */ 538 + ret = spi_async(devrec->spi, &devrec->rx_msg); 539 + if (ret) 540 + dev_err(printdev(devrec), "failed to unlock rx buffer\n"); 541 + } 542 + 543 + static void mrf24j40_handle_rx_read_buf_complete(void *context) 544 + { 545 + struct mrf24j40 *devrec = context; 546 + u8 len = devrec->rx_buf[2]; 547 + u8 rx_local_buf[RX_FIFO_SIZE]; 746 548 struct sk_buff *skb; 747 549 748 - /* Turn off reception of packets off the air. This prevents the 749 - * device from overwriting the buffer while we're reading it. */ 750 - ret = read_short_reg(devrec, REG_BBREG1, &val); 751 - if (ret) 752 - goto out; 753 - val |= 4; /* SET RXDECINV */ 754 - write_short_reg(devrec, REG_BBREG1, val); 550 + memcpy(rx_local_buf, devrec->rx_fifo_buf, len); 551 + mrf24j40_handle_rx_read_buf_unlock(devrec); 755 552 756 - skb = dev_alloc_skb(len); 553 + skb = dev_alloc_skb(IEEE802154_MTU); 757 554 if (!skb) { 758 - ret = -ENOMEM; 759 - goto out; 555 + dev_err(printdev(devrec), "failed to allocate skb\n"); 556 + return; 760 557 } 761 558 762 - ret = mrf24j40_read_rx_buf(devrec, skb_put(skb, len), &len, &lqi); 763 - if (ret < 0) { 764 - dev_err(printdev(devrec), "Failure reading RX FIFO\n"); 765 - kfree_skb(skb); 766 - ret = -EINVAL; 767 - goto out; 559 + memcpy(skb_put(skb, len), rx_local_buf, len); 560 + ieee802154_rx_irqsafe(devrec->hw, skb, 0); 561 + 562 + #ifdef DEBUG 563 + print_hex_dump(KERN_DEBUG, "mrf24j40 rx: ", DUMP_PREFIX_OFFSET, 16, 1, 564 + rx_local_buf, len, 0); 565 + pr_debug("mrf24j40 rx: lqi: %02hhx rssi: %02hhx\n", 566 + devrec->rx_lqi_buf[0], devrec->rx_lqi_buf[1]); 567 + #endif 568 + } 569 + 570 + static void mrf24j40_handle_rx_read_buf(void *context) 571 + { 572 + struct mrf24j40 *devrec = context; 573 + u16 cmd; 574 + int ret; 575 + 576 + /* if length is invalid read the full MTU */ 577 + if (!ieee802154_is_valid_psdu_len(devrec->rx_buf[2])) 578 + devrec->rx_buf[2] = IEEE802154_MTU; 579 + 580 + cmd = MRF24J40_READLONG(REG_RX_FIFO + 1); 581 + devrec->rx_addr_buf[0] = cmd >> 8 & 0xff; 582 + devrec->rx_addr_buf[1] = cmd & 0xff; 583 + devrec->rx_fifo_buf_trx.len = devrec->rx_buf[2]; 584 + ret = spi_async(devrec->spi, &devrec->rx_buf_msg); 585 + if (ret) { 586 + dev_err(printdev(devrec), "failed to read rx buffer\n"); 587 + mrf24j40_handle_rx_read_buf_unlock(devrec); 588 + } 589 + } 590 + 591 + static void mrf24j40_handle_rx_read_len(void *context) 592 + { 593 + struct mrf24j40 *devrec = context; 594 + u16 cmd; 595 + int ret; 596 + 597 + /* read the length of received frame */ 598 + devrec->rx_msg.complete = mrf24j40_handle_rx_read_buf; 599 + devrec->rx_trx.len = 3; 600 + cmd = MRF24J40_READLONG(REG_RX_FIFO); 601 + devrec->rx_buf[0] = cmd >> 8 & 0xff; 602 + devrec->rx_buf[1] = cmd & 0xff; 603 + 604 + ret = spi_async(devrec->spi, &devrec->rx_msg); 605 + if (ret) { 606 + dev_err(printdev(devrec), "failed to read rx buffer length\n"); 607 + mrf24j40_handle_rx_read_buf_unlock(devrec); 608 + } 609 + } 610 + 611 + static int mrf24j40_handle_rx(struct mrf24j40 *devrec) 612 + { 613 + /* Turn off reception of packets off the air. This prevents the 614 + * device from overwriting the buffer while we're reading it. 615 + */ 616 + devrec->rx_msg.complete = mrf24j40_handle_rx_read_len; 617 + devrec->rx_trx.len = 2; 618 + devrec->rx_buf[0] = MRF24J40_WRITESHORT(REG_BBREG1); 619 + devrec->rx_buf[1] = BIT_RXDECINV; /* SET RXDECINV */ 620 + 621 + return spi_async(devrec->spi, &devrec->rx_msg); 622 + } 623 + 624 + static int 625 + mrf24j40_csma_params(struct ieee802154_hw *hw, u8 min_be, u8 max_be, 626 + u8 retries) 627 + { 628 + struct mrf24j40 *devrec = hw->priv; 629 + u8 val; 630 + 631 + /* min_be */ 632 + val = min_be << TXMCR_MIN_BE_SHIFT; 633 + /* csma backoffs */ 634 + val |= retries << TXMCR_CSMA_RETRIES_SHIFT; 635 + 636 + return regmap_update_bits(devrec->regmap_short, REG_TXMCR, 637 + TXMCR_MIN_BE_MASK | TXMCR_CSMA_RETRIES_MASK, 638 + val); 639 + } 640 + 641 + static int mrf24j40_set_cca_mode(struct ieee802154_hw *hw, 642 + const struct wpan_phy_cca *cca) 643 + { 644 + struct mrf24j40 *devrec = hw->priv; 645 + u8 val; 646 + 647 + /* mapping 802.15.4 to driver spec */ 648 + switch (cca->mode) { 649 + case NL802154_CCA_ENERGY: 650 + val = 2; 651 + break; 652 + case NL802154_CCA_CARRIER: 653 + val = 1; 654 + break; 655 + case NL802154_CCA_ENERGY_CARRIER: 656 + switch (cca->opt) { 657 + case NL802154_CCA_OPT_ENERGY_CARRIER_AND: 658 + val = 3; 659 + break; 660 + default: 661 + return -EINVAL; 662 + } 663 + break; 664 + default: 665 + return -EINVAL; 768 666 } 769 667 770 - /* Cut off the checksum */ 771 - skb_trim(skb, len-2); 668 + return regmap_update_bits(devrec->regmap_short, REG_BBREG2, 669 + BBREG2_CCA_MODE_MASK, 670 + val << BBREG2_CCA_MODE_SHIFT); 671 + } 772 672 773 - /* TODO: Other drivers call ieee20154_rx_irqsafe() here (eg: cc2040, 774 - * also from a workqueue). I think irqsafe is not necessary here. 775 - * Can someone confirm? */ 776 - ieee802154_rx_irqsafe(devrec->hw, skb, lqi); 673 + /* array for representing ed levels */ 674 + static const s32 mrf24j40_ed_levels[] = { 675 + -9000, -8900, -8800, -8700, -8600, -8500, -8400, -8300, -8200, -8100, 676 + -8000, -7900, -7800, -7700, -7600, -7500, -7400, -7300, -7200, -7100, 677 + -7000, -6900, -6800, -6700, -6600, -6500, -6400, -6300, -6200, -6100, 678 + -6000, -5900, -5800, -5700, -5600, -5500, -5400, -5300, -5200, -5100, 679 + -5000, -4900, -4800, -4700, -4600, -4500, -4400, -4300, -4200, -4100, 680 + -4000, -3900, -3800, -3700, -3600, -3500 681 + }; 777 682 778 - dev_dbg(printdev(devrec), "RX Handled\n"); 683 + /* map ed levels to register value */ 684 + static const s32 mrf24j40_ed_levels_map[][2] = { 685 + { -9000, 0 }, { -8900, 1 }, { -8800, 2 }, { -8700, 5 }, { -8600, 9 }, 686 + { -8500, 13 }, { -8400, 18 }, { -8300, 23 }, { -8200, 27 }, 687 + { -8100, 32 }, { -8000, 37 }, { -7900, 43 }, { -7800, 48 }, 688 + { -7700, 53 }, { -7600, 58 }, { -7500, 63 }, { -7400, 68 }, 689 + { -7300, 73 }, { -7200, 78 }, { -7100, 83 }, { -7000, 89 }, 690 + { -6900, 95 }, { -6800, 100 }, { -6700, 107 }, { -6600, 111 }, 691 + { -6500, 117 }, { -6400, 121 }, { -6300, 125 }, { -6200, 129 }, 692 + { -6100, 133 }, { -6000, 138 }, { -5900, 143 }, { -5800, 148 }, 693 + { -5700, 153 }, { -5600, 159 }, { -5500, 165 }, { -5400, 170 }, 694 + { -5300, 176 }, { -5200, 183 }, { -5100, 188 }, { -5000, 193 }, 695 + { -4900, 198 }, { -4800, 203 }, { -4700, 207 }, { -4600, 212 }, 696 + { -4500, 216 }, { -4400, 221 }, { -4300, 225 }, { -4200, 228 }, 697 + { -4100, 233 }, { -4000, 239 }, { -3900, 245 }, { -3800, 250 }, 698 + { -3700, 253 }, { -3600, 254 }, { -3500, 255 }, 699 + }; 779 700 780 - out: 781 - /* Turn back on reception of packets off the air. */ 782 - ret2 = read_short_reg(devrec, REG_BBREG1, &val); 783 - if (ret2) 784 - return ret2; 785 - val &= ~0x4; /* Clear RXDECINV */ 786 - write_short_reg(devrec, REG_BBREG1, val); 701 + static int mrf24j40_set_cca_ed_level(struct ieee802154_hw *hw, s32 mbm) 702 + { 703 + struct mrf24j40 *devrec = hw->priv; 704 + int i; 705 + 706 + for (i = 0; i < ARRAY_SIZE(mrf24j40_ed_levels_map); i++) { 707 + if (mrf24j40_ed_levels_map[i][0] == mbm) 708 + return regmap_write(devrec->regmap_short, REG_CCAEDTH, 709 + mrf24j40_ed_levels_map[i][1]); 710 + } 711 + 712 + return -EINVAL; 713 + } 714 + 715 + static const s32 mrf24j40ma_powers[] = { 716 + 0, -50, -120, -190, -280, -370, -490, -630, -1000, -1050, -1120, -1190, 717 + -1280, -1370, -1490, -1630, -2000, -2050, -2120, -2190, -2280, -2370, 718 + -2490, -2630, -3000, -3050, -3120, -3190, -3280, -3370, -3490, -3630, 719 + }; 720 + 721 + static int mrf24j40_set_txpower(struct ieee802154_hw *hw, s32 mbm) 722 + { 723 + struct mrf24j40 *devrec = hw->priv; 724 + s32 small_scale; 725 + u8 val; 726 + 727 + if (0 >= mbm && mbm > -1000) { 728 + val = TXPWRL_0 << TXPWRL_SHIFT; 729 + small_scale = mbm; 730 + } else if (-1000 >= mbm && mbm > -2000) { 731 + val = TXPWRL_10 << TXPWRL_SHIFT; 732 + small_scale = mbm + 1000; 733 + } else if (-2000 >= mbm && mbm > -3000) { 734 + val = TXPWRL_20 << TXPWRL_SHIFT; 735 + small_scale = mbm + 2000; 736 + } else if (-3000 >= mbm && mbm > -4000) { 737 + val = TXPWRL_30 << TXPWRL_SHIFT; 738 + small_scale = mbm + 3000; 739 + } else { 740 + return -EINVAL; 741 + } 742 + 743 + switch (small_scale) { 744 + case 0: 745 + val |= (TXPWRS_0 << TXPWRS_SHIFT); 746 + break; 747 + case -50: 748 + val |= (TXPWRS_0_5 << TXPWRS_SHIFT); 749 + break; 750 + case -120: 751 + val |= (TXPWRS_1_2 << TXPWRS_SHIFT); 752 + break; 753 + case -190: 754 + val |= (TXPWRS_1_9 << TXPWRS_SHIFT); 755 + break; 756 + case -280: 757 + val |= (TXPWRS_2_8 << TXPWRS_SHIFT); 758 + break; 759 + case -370: 760 + val |= (TXPWRS_3_7 << TXPWRS_SHIFT); 761 + break; 762 + case -490: 763 + val |= (TXPWRS_4_9 << TXPWRS_SHIFT); 764 + break; 765 + case -630: 766 + val |= (TXPWRS_6_3 << TXPWRS_SHIFT); 767 + break; 768 + default: 769 + return -EINVAL; 770 + } 771 + 772 + return regmap_update_bits(devrec->regmap_long, REG_RFCON3, 773 + TXPWRL_MASK | TXPWRS_MASK, val); 774 + } 775 + 776 + static int mrf24j40_set_promiscuous_mode(struct ieee802154_hw *hw, bool on) 777 + { 778 + struct mrf24j40 *devrec = hw->priv; 779 + int ret; 780 + 781 + if (on) { 782 + /* set PROMI, ERRPKT and NOACKRSP */ 783 + ret = regmap_update_bits(devrec->regmap_short, REG_RXMCR, 784 + BIT_PROMI | BIT_ERRPKT | BIT_NOACKRSP, 785 + BIT_PROMI | BIT_ERRPKT | BIT_NOACKRSP); 786 + } else { 787 + /* clear PROMI, ERRPKT and NOACKRSP */ 788 + ret = regmap_update_bits(devrec->regmap_short, REG_RXMCR, 789 + BIT_PROMI | BIT_ERRPKT | BIT_NOACKRSP, 790 + 0); 791 + } 787 792 788 793 return ret; 789 794 } 790 795 791 796 static const struct ieee802154_ops mrf24j40_ops = { 792 797 .owner = THIS_MODULE, 793 - .xmit_sync = mrf24j40_tx, 798 + .xmit_async = mrf24j40_tx, 794 799 .ed = mrf24j40_ed, 795 800 .start = mrf24j40_start, 796 801 .stop = mrf24j40_stop, 797 802 .set_channel = mrf24j40_set_channel, 798 803 .set_hw_addr_filt = mrf24j40_filter, 804 + .set_csma_params = mrf24j40_csma_params, 805 + .set_cca_mode = mrf24j40_set_cca_mode, 806 + .set_cca_ed_level = mrf24j40_set_cca_ed_level, 807 + .set_txpower = mrf24j40_set_txpower, 808 + .set_promiscuous_mode = mrf24j40_set_promiscuous_mode, 799 809 }; 810 + 811 + static void mrf24j40_intstat_complete(void *context) 812 + { 813 + struct mrf24j40 *devrec = context; 814 + u8 intstat = devrec->irq_buf[1]; 815 + 816 + enable_irq(devrec->spi->irq); 817 + 818 + /* Check for TX complete */ 819 + if (intstat & BIT_TXNIF) 820 + ieee802154_xmit_complete(devrec->hw, devrec->tx_skb, false); 821 + 822 + /* Check for Rx */ 823 + if (intstat & BIT_RXIF) 824 + mrf24j40_handle_rx(devrec); 825 + } 800 826 801 827 static irqreturn_t mrf24j40_isr(int irq, void *data) 802 828 { 803 829 struct mrf24j40 *devrec = data; 804 - u8 intstat; 805 830 int ret; 806 831 832 + disable_irq_nosync(irq); 833 + 834 + devrec->irq_buf[0] = MRF24J40_READSHORT(REG_INTSTAT); 807 835 /* Read the interrupt status */ 808 - ret = read_short_reg(devrec, REG_INTSTAT, &intstat); 809 - if (ret) 810 - goto out; 836 + ret = spi_async(devrec->spi, &devrec->irq_msg); 837 + if (ret) { 838 + enable_irq(irq); 839 + return IRQ_NONE; 840 + } 811 841 812 - /* Check for TX complete */ 813 - if (intstat & 0x1) 814 - complete(&devrec->tx_complete); 815 - 816 - /* Check for Rx */ 817 - if (intstat & 0x8) 818 - mrf24j40_handle_rx(devrec); 819 - 820 - out: 821 842 return IRQ_HANDLED; 822 843 } 823 844 824 845 static int mrf24j40_hw_init(struct mrf24j40 *devrec) 825 846 { 847 + u32 irq_type; 826 848 int ret; 827 - u8 val; 828 849 829 850 /* Initialize the device. 830 851 From datasheet section 3.2: Initialization. */ 831 - ret = write_short_reg(devrec, REG_SOFTRST, 0x07); 852 + ret = regmap_write(devrec->regmap_short, REG_SOFTRST, 0x07); 832 853 if (ret) 833 854 goto err_ret; 834 855 835 - ret = write_short_reg(devrec, REG_PACON2, 0x98); 856 + ret = regmap_write(devrec->regmap_short, REG_PACON2, 0x98); 836 857 if (ret) 837 858 goto err_ret; 838 859 839 - ret = write_short_reg(devrec, REG_TXSTBL, 0x95); 860 + ret = regmap_write(devrec->regmap_short, REG_TXSTBL, 0x95); 840 861 if (ret) 841 862 goto err_ret; 842 863 843 - ret = write_long_reg(devrec, REG_RFCON0, 0x03); 864 + ret = regmap_write(devrec->regmap_long, REG_RFCON0, 0x03); 844 865 if (ret) 845 866 goto err_ret; 846 867 847 - ret = write_long_reg(devrec, REG_RFCON1, 0x01); 868 + ret = regmap_write(devrec->regmap_long, REG_RFCON1, 0x01); 848 869 if (ret) 849 870 goto err_ret; 850 871 851 - ret = write_long_reg(devrec, REG_RFCON2, 0x80); 872 + ret = regmap_write(devrec->regmap_long, REG_RFCON2, 0x80); 852 873 if (ret) 853 874 goto err_ret; 854 875 855 - ret = write_long_reg(devrec, REG_RFCON6, 0x90); 876 + ret = regmap_write(devrec->regmap_long, REG_RFCON6, 0x90); 856 877 if (ret) 857 878 goto err_ret; 858 879 859 - ret = write_long_reg(devrec, REG_RFCON7, 0x80); 880 + ret = regmap_write(devrec->regmap_long, REG_RFCON7, 0x80); 860 881 if (ret) 861 882 goto err_ret; 862 883 863 - ret = write_long_reg(devrec, REG_RFCON8, 0x10); 884 + ret = regmap_write(devrec->regmap_long, REG_RFCON8, 0x10); 864 885 if (ret) 865 886 goto err_ret; 866 887 867 - ret = write_long_reg(devrec, REG_SLPCON1, 0x21); 888 + ret = regmap_write(devrec->regmap_long, REG_SLPCON1, 0x21); 868 889 if (ret) 869 890 goto err_ret; 870 891 871 - ret = write_short_reg(devrec, REG_BBREG2, 0x80); 892 + ret = regmap_write(devrec->regmap_short, REG_BBREG2, 0x80); 872 893 if (ret) 873 894 goto err_ret; 874 895 875 - ret = write_short_reg(devrec, REG_CCAEDTH, 0x60); 896 + ret = regmap_write(devrec->regmap_short, REG_CCAEDTH, 0x60); 876 897 if (ret) 877 898 goto err_ret; 878 899 879 - ret = write_short_reg(devrec, REG_BBREG6, 0x40); 900 + ret = regmap_write(devrec->regmap_short, REG_BBREG6, 0x40); 880 901 if (ret) 881 902 goto err_ret; 882 903 883 - ret = write_short_reg(devrec, REG_RFCTL, 0x04); 904 + ret = regmap_write(devrec->regmap_short, REG_RFCTL, 0x04); 884 905 if (ret) 885 906 goto err_ret; 886 907 887 - ret = write_short_reg(devrec, REG_RFCTL, 0x0); 908 + ret = regmap_write(devrec->regmap_short, REG_RFCTL, 0x0); 888 909 if (ret) 889 910 goto err_ret; 890 911 891 912 udelay(192); 892 913 893 914 /* Set RX Mode. RXMCR<1:0>: 0x0 normal, 0x1 promisc, 0x2 error */ 894 - ret = read_short_reg(devrec, REG_RXMCR, &val); 895 - if (ret) 896 - goto err_ret; 897 - 898 - val &= ~0x3; /* Clear RX mode (normal) */ 899 - 900 - ret = write_short_reg(devrec, REG_RXMCR, val); 915 + ret = regmap_update_bits(devrec->regmap_short, REG_RXMCR, 0x03, 0x00); 901 916 if (ret) 902 917 goto err_ret; 903 918 ··· 1129 696 /* Enable external amplifier. 1130 697 * From MRF24J40MC datasheet section 1.3: Operation. 1131 698 */ 1132 - read_long_reg(devrec, REG_TESTMODE, &val); 1133 - val |= 0x7; /* Configure GPIO 0-2 to control amplifier */ 1134 - write_long_reg(devrec, REG_TESTMODE, val); 699 + regmap_update_bits(devrec->regmap_long, REG_TESTMODE, 0x07, 700 + 0x07); 1135 701 1136 - read_short_reg(devrec, REG_TRISGPIO, &val); 1137 - val |= 0x8; /* Set GPIO3 as output. */ 1138 - write_short_reg(devrec, REG_TRISGPIO, val); 702 + /* Set GPIO3 as output. */ 703 + regmap_update_bits(devrec->regmap_short, REG_TRISGPIO, 0x08, 704 + 0x08); 1139 705 1140 - read_short_reg(devrec, REG_GPIO, &val); 1141 - val |= 0x8; /* Set GPIO3 HIGH to enable U5 voltage regulator */ 1142 - write_short_reg(devrec, REG_GPIO, val); 706 + /* Set GPIO3 HIGH to enable U5 voltage regulator */ 707 + regmap_update_bits(devrec->regmap_short, REG_GPIO, 0x08, 0x08); 1143 708 1144 709 /* Reduce TX pwr to meet FCC requirements. 1145 710 * From MRF24J40MC datasheet section 3.1.1 1146 711 */ 1147 - write_long_reg(devrec, REG_RFCON3, 0x28); 712 + regmap_write(devrec->regmap_long, REG_RFCON3, 0x28); 713 + } 714 + 715 + irq_type = irq_get_trigger_type(devrec->spi->irq); 716 + if (irq_type == IRQ_TYPE_EDGE_RISING || 717 + irq_type == IRQ_TYPE_EDGE_FALLING) 718 + dev_warn(&devrec->spi->dev, 719 + "Using edge triggered irq's are not recommended, because it can cause races and result in a non-functional driver!\n"); 720 + switch (irq_type) { 721 + case IRQ_TYPE_EDGE_RISING: 722 + case IRQ_TYPE_LEVEL_HIGH: 723 + /* set interrupt polarity to rising */ 724 + ret = regmap_update_bits(devrec->regmap_long, REG_SLPCON0, 725 + BIT_INTEDGE, BIT_INTEDGE); 726 + if (ret) 727 + goto err_ret; 728 + break; 729 + default: 730 + /* default is falling edge */ 731 + break; 1148 732 } 1149 733 1150 734 return 0; ··· 1170 720 return ret; 1171 721 } 1172 722 723 + static void 724 + mrf24j40_setup_tx_spi_messages(struct mrf24j40 *devrec) 725 + { 726 + spi_message_init(&devrec->tx_msg); 727 + devrec->tx_msg.context = devrec; 728 + devrec->tx_msg.complete = write_tx_buf_complete; 729 + devrec->tx_hdr_trx.len = 2; 730 + devrec->tx_hdr_trx.tx_buf = devrec->tx_hdr_buf; 731 + spi_message_add_tail(&devrec->tx_hdr_trx, &devrec->tx_msg); 732 + devrec->tx_len_trx.len = 2; 733 + devrec->tx_len_trx.tx_buf = devrec->tx_len_buf; 734 + spi_message_add_tail(&devrec->tx_len_trx, &devrec->tx_msg); 735 + spi_message_add_tail(&devrec->tx_buf_trx, &devrec->tx_msg); 736 + 737 + spi_message_init(&devrec->tx_post_msg); 738 + devrec->tx_post_msg.context = devrec; 739 + devrec->tx_post_trx.len = 2; 740 + devrec->tx_post_trx.tx_buf = devrec->tx_post_buf; 741 + spi_message_add_tail(&devrec->tx_post_trx, &devrec->tx_post_msg); 742 + } 743 + 744 + static void 745 + mrf24j40_setup_rx_spi_messages(struct mrf24j40 *devrec) 746 + { 747 + spi_message_init(&devrec->rx_msg); 748 + devrec->rx_msg.context = devrec; 749 + devrec->rx_trx.len = 2; 750 + devrec->rx_trx.tx_buf = devrec->rx_buf; 751 + devrec->rx_trx.rx_buf = devrec->rx_buf; 752 + spi_message_add_tail(&devrec->rx_trx, &devrec->rx_msg); 753 + 754 + spi_message_init(&devrec->rx_buf_msg); 755 + devrec->rx_buf_msg.context = devrec; 756 + devrec->rx_buf_msg.complete = mrf24j40_handle_rx_read_buf_complete; 757 + devrec->rx_addr_trx.len = 2; 758 + devrec->rx_addr_trx.tx_buf = devrec->rx_addr_buf; 759 + spi_message_add_tail(&devrec->rx_addr_trx, &devrec->rx_buf_msg); 760 + devrec->rx_fifo_buf_trx.rx_buf = devrec->rx_fifo_buf; 761 + spi_message_add_tail(&devrec->rx_fifo_buf_trx, &devrec->rx_buf_msg); 762 + devrec->rx_lqi_trx.len = 2; 763 + devrec->rx_lqi_trx.rx_buf = devrec->rx_lqi_buf; 764 + spi_message_add_tail(&devrec->rx_lqi_trx, &devrec->rx_buf_msg); 765 + } 766 + 767 + static void 768 + mrf24j40_setup_irq_spi_messages(struct mrf24j40 *devrec) 769 + { 770 + spi_message_init(&devrec->irq_msg); 771 + devrec->irq_msg.context = devrec; 772 + devrec->irq_msg.complete = mrf24j40_intstat_complete; 773 + devrec->irq_trx.len = 2; 774 + devrec->irq_trx.tx_buf = devrec->irq_buf; 775 + devrec->irq_trx.rx_buf = devrec->irq_buf; 776 + spi_message_add_tail(&devrec->irq_trx, &devrec->irq_msg); 777 + } 778 + 779 + static void mrf24j40_phy_setup(struct mrf24j40 *devrec) 780 + { 781 + ieee802154_random_extended_addr(&devrec->hw->phy->perm_extended_addr); 782 + devrec->hw->phy->current_channel = 11; 783 + 784 + /* mrf24j40 supports max_minbe 0 - 3 */ 785 + devrec->hw->phy->supported.max_minbe = 3; 786 + /* datasheet doesn't say anything about max_be, but we have min_be 787 + * So we assume the max_be default. 788 + */ 789 + devrec->hw->phy->supported.min_maxbe = 5; 790 + devrec->hw->phy->supported.max_maxbe = 5; 791 + 792 + devrec->hw->phy->cca.mode = NL802154_CCA_CARRIER; 793 + devrec->hw->phy->supported.cca_modes = BIT(NL802154_CCA_ENERGY) | 794 + BIT(NL802154_CCA_CARRIER) | 795 + BIT(NL802154_CCA_ENERGY_CARRIER); 796 + devrec->hw->phy->supported.cca_opts = BIT(NL802154_CCA_OPT_ENERGY_CARRIER_AND); 797 + 798 + devrec->hw->phy->cca_ed_level = -6900; 799 + devrec->hw->phy->supported.cca_ed_levels = mrf24j40_ed_levels; 800 + devrec->hw->phy->supported.cca_ed_levels_size = ARRAY_SIZE(mrf24j40_ed_levels); 801 + 802 + switch (spi_get_device_id(devrec->spi)->driver_data) { 803 + case MRF24J40: 804 + case MRF24J40MA: 805 + devrec->hw->phy->supported.tx_powers = mrf24j40ma_powers; 806 + devrec->hw->phy->supported.tx_powers_size = ARRAY_SIZE(mrf24j40ma_powers); 807 + devrec->hw->phy->flags |= WPAN_PHY_FLAG_TXPOWER; 808 + break; 809 + default: 810 + break; 811 + } 812 + } 813 + 1173 814 static int mrf24j40_probe(struct spi_device *spi) 1174 815 { 1175 - int ret = -ENOMEM; 816 + int ret = -ENOMEM, irq_type; 817 + struct ieee802154_hw *hw; 1176 818 struct mrf24j40 *devrec; 1177 819 1178 820 dev_info(&spi->dev, "probe(). IRQ: %d\n", spi->irq); 1179 821 1180 - devrec = devm_kzalloc(&spi->dev, sizeof(struct mrf24j40), GFP_KERNEL); 1181 - if (!devrec) 1182 - goto err_ret; 1183 - devrec->buf = devm_kzalloc(&spi->dev, 3, GFP_KERNEL); 1184 - if (!devrec->buf) 1185 - goto err_ret; 1186 - 1187 - spi->mode = SPI_MODE_0; /* TODO: Is this appropriate for right here? */ 1188 - if (spi->max_speed_hz > MAX_SPI_SPEED_HZ) 1189 - spi->max_speed_hz = MAX_SPI_SPEED_HZ; 1190 - 1191 - mutex_init(&devrec->buffer_mutex); 1192 - init_completion(&devrec->tx_complete); 1193 - devrec->spi = spi; 1194 - spi_set_drvdata(spi, devrec); 1195 - 1196 822 /* Register with the 802154 subsystem */ 1197 823 1198 - devrec->hw = ieee802154_alloc_hw(0, &mrf24j40_ops); 1199 - if (!devrec->hw) 824 + hw = ieee802154_alloc_hw(sizeof(*devrec), &mrf24j40_ops); 825 + if (!hw) 1200 826 goto err_ret; 1201 827 1202 - devrec->hw->priv = devrec; 1203 - devrec->hw->parent = &devrec->spi->dev; 828 + devrec = hw->priv; 829 + devrec->spi = spi; 830 + spi_set_drvdata(spi, devrec); 831 + devrec->hw = hw; 832 + devrec->hw->parent = &spi->dev; 1204 833 devrec->hw->phy->supported.channels[0] = CHANNEL_MASK; 1205 - devrec->hw->flags = IEEE802154_HW_OMIT_CKSUM | IEEE802154_HW_AFILT; 834 + devrec->hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AFILT | 835 + IEEE802154_HW_CSMA_PARAMS | 836 + IEEE802154_HW_PROMISCUOUS; 837 + 838 + devrec->hw->phy->flags = WPAN_PHY_FLAG_CCA_MODE | 839 + WPAN_PHY_FLAG_CCA_ED_LEVEL; 840 + 841 + mrf24j40_setup_tx_spi_messages(devrec); 842 + mrf24j40_setup_rx_spi_messages(devrec); 843 + mrf24j40_setup_irq_spi_messages(devrec); 844 + 845 + devrec->regmap_short = devm_regmap_init_spi(spi, 846 + &mrf24j40_short_regmap); 847 + if (IS_ERR(devrec->regmap_short)) { 848 + ret = PTR_ERR(devrec->regmap_short); 849 + dev_err(&spi->dev, "Failed to allocate short register map: %d\n", 850 + ret); 851 + goto err_register_device; 852 + } 853 + 854 + devrec->regmap_long = devm_regmap_init(&spi->dev, 855 + &mrf24j40_long_regmap_bus, 856 + spi, &mrf24j40_long_regmap); 857 + if (IS_ERR(devrec->regmap_long)) { 858 + ret = PTR_ERR(devrec->regmap_long); 859 + dev_err(&spi->dev, "Failed to allocate long register map: %d\n", 860 + ret); 861 + goto err_register_device; 862 + } 863 + 864 + if (spi->max_speed_hz > MAX_SPI_SPEED_HZ) { 865 + dev_warn(&spi->dev, "spi clock above possible maximum: %d", 866 + MAX_SPI_SPEED_HZ); 867 + return -EINVAL; 868 + } 869 + 870 + ret = mrf24j40_hw_init(devrec); 871 + if (ret) 872 + goto err_register_device; 873 + 874 + mrf24j40_phy_setup(devrec); 875 + 876 + /* request IRQF_TRIGGER_LOW as fallback default */ 877 + irq_type = irq_get_trigger_type(spi->irq); 878 + if (!irq_type) 879 + irq_type = IRQF_TRIGGER_LOW; 880 + 881 + ret = devm_request_irq(&spi->dev, spi->irq, mrf24j40_isr, 882 + irq_type, dev_name(&spi->dev), devrec); 883 + if (ret) { 884 + dev_err(printdev(devrec), "Unable to get IRQ"); 885 + goto err_register_device; 886 + } 1206 887 1207 888 dev_dbg(printdev(devrec), "registered mrf24j40\n"); 1208 889 ret = ieee802154_register_hw(devrec->hw); 1209 890 if (ret) 1210 891 goto err_register_device; 1211 892 1212 - ret = mrf24j40_hw_init(devrec); 1213 - if (ret) 1214 - goto err_hw_init; 1215 - 1216 - ret = devm_request_threaded_irq(&spi->dev, 1217 - spi->irq, 1218 - NULL, 1219 - mrf24j40_isr, 1220 - IRQF_TRIGGER_LOW|IRQF_ONESHOT, 1221 - dev_name(&spi->dev), 1222 - devrec); 1223 - 1224 - if (ret) { 1225 - dev_err(printdev(devrec), "Unable to get IRQ"); 1226 - goto err_irq; 1227 - } 1228 - 1229 893 return 0; 1230 894 1231 - err_irq: 1232 - err_hw_init: 1233 - ieee802154_unregister_hw(devrec->hw); 1234 895 err_register_device: 1235 896 ieee802154_free_hw(devrec->hw); 1236 897 err_ret: ··· 1362 801 return 0; 1363 802 } 1364 803 804 + static const struct of_device_id mrf24j40_of_match[] = { 805 + { .compatible = "microchip,mrf24j40", .data = (void *)MRF24J40 }, 806 + { .compatible = "microchip,mrf24j40ma", .data = (void *)MRF24J40MA }, 807 + { .compatible = "microchip,mrf24j40mc", .data = (void *)MRF24J40MC }, 808 + { }, 809 + }; 810 + MODULE_DEVICE_TABLE(of, mrf24j40_of_match); 811 + 1365 812 static const struct spi_device_id mrf24j40_ids[] = { 1366 813 { "mrf24j40", MRF24J40 }, 1367 814 { "mrf24j40ma", MRF24J40MA }, ··· 1380 811 1381 812 static struct spi_driver mrf24j40_driver = { 1382 813 .driver = { 814 + .of_match_table = of_match_ptr(mrf24j40_of_match), 1383 815 .name = "mrf24j40", 1384 816 .owner = THIS_MODULE, 1385 817 },
+21 -1
include/linux/ieee802154.h
··· 25 25 26 26 #include <linux/types.h> 27 27 #include <linux/random.h> 28 - #include <asm/byteorder.h> 29 28 30 29 #define IEEE802154_MTU 127 31 30 #define IEEE802154_ACK_PSDU_LEN 5 32 31 #define IEEE802154_MIN_PSDU_LEN 9 33 32 #define IEEE802154_FCS_LEN 2 33 + #define IEEE802154_MAX_AUTH_TAG_LEN 16 34 + 35 + /* General MAC frame format: 36 + * 2 bytes: Frame Control 37 + * 1 byte: Sequence Number 38 + * 20 bytes: Addressing fields 39 + * 14 bytes: Auxiliary Security Header 40 + */ 41 + #define IEEE802154_MAX_HEADER_LEN (2 + 1 + 20 + 14) 42 + #define IEEE802154_MIN_HEADER_LEN (IEEE802154_ACK_PSDU_LEN - \ 43 + IEEE802154_FCS_LEN) 34 44 35 45 #define IEEE802154_PAN_ID_BROADCAST 0xffff 36 46 #define IEEE802154_ADDR_SHORT_BROADCAST 0xffff ··· 217 207 218 208 /* frame control handling */ 219 209 #define IEEE802154_FCTL_FTYPE 0x0003 210 + #define IEEE802154_FCTL_ACKREQ 0x0020 220 211 #define IEEE802154_FCTL_INTRA_PAN 0x0040 221 212 222 213 #define IEEE802154_FTYPE_DATA 0x0001 ··· 230 219 { 231 220 return (fc & cpu_to_le16(IEEE802154_FCTL_FTYPE)) == 232 221 cpu_to_le16(IEEE802154_FTYPE_DATA); 222 + } 223 + 224 + /** 225 + * ieee802154_is_ackreq - check if acknowledgment request bit is set 226 + * @fc: frame control bytes in little-endian byteorder 227 + */ 228 + static inline bool ieee802154_is_ackreq(__le16 fc) 229 + { 230 + return fc & cpu_to_le16(IEEE802154_FCTL_ACKREQ); 233 231 } 234 232 235 233 /**
+10
include/net/6lowpan.h
··· 61 61 #define UIP_PROTO_UDP 17 /* ipv6 next header value for UDP */ 62 62 #define UIP_FRAGH_LEN 8 /* ipv6 fragment header size */ 63 63 64 + #define EUI64_ADDR_LEN 8 65 + 66 + #define LOWPAN_NHC_MAX_ID_LEN 1 67 + /* Max IPHC Header len without IPv6 hdr specific inline data. 68 + * Useful for getting the "extra" bytes we need at worst case compression. 69 + * 70 + * LOWPAN_IPHC + CID + LOWPAN_NHC_MAX_ID_LEN 71 + */ 72 + #define LOWPAN_IPHC_MAX_HEADER_LEN (2 + 1 + LOWPAN_NHC_MAX_ID_LEN) 73 + 64 74 /* 65 75 * ipv6 address based on mac 66 76 * second bit-flip (Universe/Local) is done according RFC2464
+5
include/net/bluetooth/bluetooth.h
··· 122 122 __printf(1, 2) 123 123 void bt_info(const char *fmt, ...); 124 124 __printf(1, 2) 125 + void bt_warn(const char *fmt, ...); 126 + __printf(1, 2) 125 127 void bt_err(const char *fmt, ...); 126 128 __printf(1, 2) 127 129 void bt_err_ratelimited(const char *fmt, ...); 128 130 129 131 #define BT_INFO(fmt, ...) bt_info(fmt "\n", ##__VA_ARGS__) 132 + #define BT_WARN(fmt, ...) bt_warn(fmt "\n", ##__VA_ARGS__) 130 133 #define BT_ERR(fmt, ...) bt_err(fmt "\n", ##__VA_ARGS__) 131 134 #define BT_DBG(fmt, ...) pr_debug(fmt "\n", ##__VA_ARGS__) 132 135 ··· 137 134 138 135 #define bt_dev_info(hdev, fmt, ...) \ 139 136 BT_INFO("%s: " fmt, (hdev)->name, ##__VA_ARGS__) 137 + #define bt_dev_warn(hdev, fmt, ...) \ 138 + BT_WARN("%s: " fmt, (hdev)->name, ##__VA_ARGS__) 140 139 #define bt_dev_err(hdev, fmt, ...) \ 141 140 BT_ERR("%s: " fmt, (hdev)->name, ##__VA_ARGS__) 142 141 #define bt_dev_dbg(hdev, fmt, ...) \
+4
include/net/bluetooth/hci.h
··· 44 44 #define HCI_DEV_DOWN 4 45 45 #define HCI_DEV_SUSPEND 5 46 46 #define HCI_DEV_RESUME 6 47 + #define HCI_DEV_OPEN 7 48 + #define HCI_DEV_CLOSE 8 47 49 48 50 /* HCI notify events */ 49 51 #define HCI_NOTIFY_CONN_ADD 1 ··· 240 238 HCI_LE_SCAN_INTERRUPTED, 241 239 242 240 HCI_DUT_MODE, 241 + HCI_VENDOR_DIAG, 243 242 HCI_FORCE_BREDR_SMP, 244 243 HCI_FORCE_STATIC_ADDR, 245 244 ··· 263 260 #define HCI_ACLDATA_PKT 0x02 264 261 #define HCI_SCODATA_PKT 0x03 265 262 #define HCI_EVENT_PKT 0x04 263 + #define HCI_DIAG_PKT 0xf0 266 264 #define HCI_VENDOR_PKT 0xff 267 265 268 266 /* HCI packet types */
+5
include/net/bluetooth/hci_core.h
··· 398 398 int (*send)(struct hci_dev *hdev, struct sk_buff *skb); 399 399 void (*notify)(struct hci_dev *hdev, unsigned int evt); 400 400 void (*hw_error)(struct hci_dev *hdev, u8 code); 401 + int (*set_diag)(struct hci_dev *hdev, bool enable); 401 402 int (*set_bdaddr)(struct hci_dev *hdev, const bdaddr_t *bdaddr); 402 403 }; 403 404 ··· 1067 1066 void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb); 1068 1067 1069 1068 int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb); 1069 + int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb); 1070 1070 1071 1071 void hci_init_sysfs(struct hci_dev *hdev); 1072 1072 void hci_conn_init_sysfs(struct hci_conn *conn); ··· 1350 1348 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb); 1351 1349 1352 1350 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode); 1351 + 1352 + struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen, 1353 + const void *param, u32 timeout); 1353 1354 1354 1355 /* ----- HCI Sockets ----- */ 1355 1356 void hci_send_to_sock(struct hci_dev *hdev, struct sk_buff *skb);
+10
include/net/bluetooth/hci_mon.h
··· 39 39 #define HCI_MON_ACL_RX_PKT 5 40 40 #define HCI_MON_SCO_TX_PKT 6 41 41 #define HCI_MON_SCO_RX_PKT 7 42 + #define HCI_MON_OPEN_INDEX 8 43 + #define HCI_MON_CLOSE_INDEX 9 44 + #define HCI_MON_INDEX_INFO 10 45 + #define HCI_MON_VENDOR_DIAG 11 42 46 43 47 struct hci_mon_new_index { 44 48 __u8 type; ··· 51 47 char name[8]; 52 48 } __packed; 53 49 #define HCI_MON_NEW_INDEX_SIZE 16 50 + 51 + struct hci_mon_index_info { 52 + bdaddr_t bdaddr; 53 + __le16 manufacturer; 54 + } __packed; 55 + #define HCI_MON_INDEX_INFO_SIZE 8 54 56 55 57 #endif /* __HCI_MON_H */
+164
include/net/cfg802154.h
··· 27 27 struct wpan_phy; 28 28 struct wpan_phy_cca; 29 29 30 + #ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL 31 + struct ieee802154_llsec_device_key; 32 + struct ieee802154_llsec_seclevel; 33 + struct ieee802154_llsec_params; 34 + struct ieee802154_llsec_device; 35 + struct ieee802154_llsec_table; 36 + struct ieee802154_llsec_key_id; 37 + struct ieee802154_llsec_key; 38 + #endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */ 39 + 30 40 struct cfg802154_ops { 31 41 struct net_device * (*add_virtual_intf_deprecated)(struct wpan_phy *wpan_phy, 32 42 const char *name, ··· 75 65 struct wpan_dev *wpan_dev, bool mode); 76 66 int (*set_ackreq_default)(struct wpan_phy *wpan_phy, 77 67 struct wpan_dev *wpan_dev, bool ackreq); 68 + #ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL 69 + void (*get_llsec_table)(struct wpan_phy *wpan_phy, 70 + struct wpan_dev *wpan_dev, 71 + struct ieee802154_llsec_table **table); 72 + void (*lock_llsec_table)(struct wpan_phy *wpan_phy, 73 + struct wpan_dev *wpan_dev); 74 + void (*unlock_llsec_table)(struct wpan_phy *wpan_phy, 75 + struct wpan_dev *wpan_dev); 76 + /* TODO remove locking/get table callbacks, this is part of the 77 + * nl802154 interface and should be accessible from ieee802154 layer. 78 + */ 79 + int (*get_llsec_params)(struct wpan_phy *wpan_phy, 80 + struct wpan_dev *wpan_dev, 81 + struct ieee802154_llsec_params *params); 82 + int (*set_llsec_params)(struct wpan_phy *wpan_phy, 83 + struct wpan_dev *wpan_dev, 84 + const struct ieee802154_llsec_params *params, 85 + int changed); 86 + int (*add_llsec_key)(struct wpan_phy *wpan_phy, 87 + struct wpan_dev *wpan_dev, 88 + const struct ieee802154_llsec_key_id *id, 89 + const struct ieee802154_llsec_key *key); 90 + int (*del_llsec_key)(struct wpan_phy *wpan_phy, 91 + struct wpan_dev *wpan_dev, 92 + const struct ieee802154_llsec_key_id *id); 93 + int (*add_seclevel)(struct wpan_phy *wpan_phy, 94 + struct wpan_dev *wpan_dev, 95 + const struct ieee802154_llsec_seclevel *sl); 96 + int (*del_seclevel)(struct wpan_phy *wpan_phy, 97 + struct wpan_dev *wpan_dev, 98 + const struct ieee802154_llsec_seclevel *sl); 99 + int (*add_device)(struct wpan_phy *wpan_phy, 100 + struct wpan_dev *wpan_dev, 101 + const struct ieee802154_llsec_device *dev); 102 + int (*del_device)(struct wpan_phy *wpan_phy, 103 + struct wpan_dev *wpan_dev, __le64 extended_addr); 104 + int (*add_devkey)(struct wpan_phy *wpan_phy, 105 + struct wpan_dev *wpan_dev, 106 + __le64 extended_addr, 107 + const struct ieee802154_llsec_device_key *key); 108 + int (*del_devkey)(struct wpan_phy *wpan_phy, 109 + struct wpan_dev *wpan_dev, 110 + __le64 extended_addr, 111 + const struct ieee802154_llsec_device_key *key); 112 + #endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */ 78 113 }; 79 114 80 115 static inline bool ··· 222 167 char priv[0] __aligned(NETDEV_ALIGN); 223 168 }; 224 169 170 + struct ieee802154_addr { 171 + u8 mode; 172 + __le16 pan_id; 173 + union { 174 + __le16 short_addr; 175 + __le64 extended_addr; 176 + }; 177 + }; 178 + 179 + struct ieee802154_llsec_key_id { 180 + u8 mode; 181 + u8 id; 182 + union { 183 + struct ieee802154_addr device_addr; 184 + __le32 short_source; 185 + __le64 extended_source; 186 + }; 187 + }; 188 + 189 + #define IEEE802154_LLSEC_KEY_SIZE 16 190 + 191 + struct ieee802154_llsec_key { 192 + u8 frame_types; 193 + u32 cmd_frame_ids; 194 + /* TODO replace with NL802154_KEY_SIZE */ 195 + u8 key[IEEE802154_LLSEC_KEY_SIZE]; 196 + }; 197 + 198 + struct ieee802154_llsec_key_entry { 199 + struct list_head list; 200 + 201 + struct ieee802154_llsec_key_id id; 202 + struct ieee802154_llsec_key *key; 203 + }; 204 + 205 + struct ieee802154_llsec_params { 206 + bool enabled; 207 + 208 + __be32 frame_counter; 209 + u8 out_level; 210 + struct ieee802154_llsec_key_id out_key; 211 + 212 + __le64 default_key_source; 213 + 214 + __le16 pan_id; 215 + __le64 hwaddr; 216 + __le64 coord_hwaddr; 217 + __le16 coord_shortaddr; 218 + }; 219 + 220 + struct ieee802154_llsec_table { 221 + struct list_head keys; 222 + struct list_head devices; 223 + struct list_head security_levels; 224 + }; 225 + 226 + struct ieee802154_llsec_seclevel { 227 + struct list_head list; 228 + 229 + u8 frame_type; 230 + u8 cmd_frame_id; 231 + bool device_override; 232 + u32 sec_levels; 233 + }; 234 + 235 + struct ieee802154_llsec_device { 236 + struct list_head list; 237 + 238 + __le16 pan_id; 239 + __le16 short_addr; 240 + __le64 hwaddr; 241 + u32 frame_counter; 242 + bool seclevel_exempt; 243 + 244 + u8 key_mode; 245 + struct list_head keys; 246 + }; 247 + 248 + struct ieee802154_llsec_device_key { 249 + struct list_head list; 250 + 251 + struct ieee802154_llsec_key_id key_id; 252 + u32 frame_counter; 253 + }; 254 + 255 + struct wpan_dev_header_ops { 256 + /* TODO create callback currently assumes ieee802154_mac_cb inside 257 + * skb->cb. This should be changed to give these information as 258 + * parameter. 259 + */ 260 + int (*create)(struct sk_buff *skb, struct net_device *dev, 261 + const struct ieee802154_addr *daddr, 262 + const struct ieee802154_addr *saddr, 263 + unsigned int len); 264 + }; 265 + 225 266 struct wpan_dev { 226 267 struct wpan_phy *wpan_phy; 227 268 int iftype; ··· 325 174 /* the remainder of this struct should be private to cfg802154 */ 326 175 struct list_head list; 327 176 struct net_device *netdev; 177 + 178 + const struct wpan_dev_header_ops *header_ops; 328 179 329 180 /* lowpan interface, set when the wpan_dev belongs to one lowpan_dev */ 330 181 struct net_device *lowpan_dev; ··· 357 204 }; 358 205 359 206 #define to_phy(_dev) container_of(_dev, struct wpan_phy, dev) 207 + 208 + static inline int 209 + wpan_dev_hard_header(struct sk_buff *skb, struct net_device *dev, 210 + const struct ieee802154_addr *daddr, 211 + const struct ieee802154_addr *saddr, 212 + unsigned int len) 213 + { 214 + struct wpan_dev *wpan_dev = dev->ieee802154_ptr; 215 + 216 + return wpan_dev->header_ops->create(skb, dev, daddr, saddr, len); 217 + } 360 218 361 219 struct wpan_phy * 362 220 wpan_phy_new(const struct cfg802154_ops *ops, size_t priv_size);
+1 -85
include/net/ieee802154_netdev.h
··· 50 50 }; 51 51 }; 52 52 53 - struct ieee802154_addr { 54 - u8 mode; 55 - __le16 pan_id; 56 - union { 57 - __le16 short_addr; 58 - __le64 extended_addr; 59 - }; 60 - }; 61 - 62 53 struct ieee802154_hdr_fc { 63 54 #if defined(__LITTLE_ENDIAN_BITFIELD) 64 55 u16 type:3, ··· 90 99 * hdr->fc will be ignored. this includes the INTRA_PAN bit and the frame 91 100 * version, if SECEN is set. 92 101 */ 93 - int ieee802154_hdr_push(struct sk_buff *skb, const struct ieee802154_hdr *hdr); 102 + int ieee802154_hdr_push(struct sk_buff *skb, struct ieee802154_hdr *hdr); 94 103 95 104 /* pulls the entire 802.15.4 header off of the skb, including the security 96 105 * header, and performs pan id decompression ··· 234 243 return mac_cb(skb); 235 244 } 236 245 237 - #define IEEE802154_LLSEC_KEY_SIZE 16 238 - 239 - struct ieee802154_llsec_key_id { 240 - u8 mode; 241 - u8 id; 242 - union { 243 - struct ieee802154_addr device_addr; 244 - __le32 short_source; 245 - __le64 extended_source; 246 - }; 247 - }; 248 - 249 - struct ieee802154_llsec_key { 250 - u8 frame_types; 251 - u32 cmd_frame_ids; 252 - u8 key[IEEE802154_LLSEC_KEY_SIZE]; 253 - }; 254 - 255 - struct ieee802154_llsec_key_entry { 256 - struct list_head list; 257 - 258 - struct ieee802154_llsec_key_id id; 259 - struct ieee802154_llsec_key *key; 260 - }; 261 - 262 - struct ieee802154_llsec_device_key { 263 - struct list_head list; 264 - 265 - struct ieee802154_llsec_key_id key_id; 266 - u32 frame_counter; 267 - }; 268 - 269 246 enum { 270 247 IEEE802154_LLSEC_DEVKEY_IGNORE, 271 248 IEEE802154_LLSEC_DEVKEY_RESTRICT, 272 249 IEEE802154_LLSEC_DEVKEY_RECORD, 273 250 274 251 __IEEE802154_LLSEC_DEVKEY_MAX, 275 - }; 276 - 277 - struct ieee802154_llsec_device { 278 - struct list_head list; 279 - 280 - __le16 pan_id; 281 - __le16 short_addr; 282 - __le64 hwaddr; 283 - u32 frame_counter; 284 - bool seclevel_exempt; 285 - 286 - u8 key_mode; 287 - struct list_head keys; 288 - }; 289 - 290 - struct ieee802154_llsec_seclevel { 291 - struct list_head list; 292 - 293 - u8 frame_type; 294 - u8 cmd_frame_id; 295 - bool device_override; 296 - u32 sec_levels; 297 - }; 298 - 299 - struct ieee802154_llsec_params { 300 - bool enabled; 301 - 302 - __be32 frame_counter; 303 - u8 out_level; 304 - struct ieee802154_llsec_key_id out_key; 305 - 306 - __le64 default_key_source; 307 - 308 - __le16 pan_id; 309 - __le64 hwaddr; 310 - __le64 coord_hwaddr; 311 - __le16 coord_shortaddr; 312 - }; 313 - 314 - struct ieee802154_llsec_table { 315 - struct list_head keys; 316 - struct list_head devices; 317 - struct list_head security_levels; 318 252 }; 319 253 320 254 #define IEEE802154_MAC_SCAN_ED 0
+1 -9
include/net/mac802154.h
··· 23 23 24 24 #include <net/cfg802154.h> 25 25 26 - /* General MAC frame format: 27 - * 2 bytes: Frame Control 28 - * 1 byte: Sequence Number 29 - * 20 bytes: Addressing fields 30 - * 14 bytes: Auxiliary Security Header 31 - */ 32 - #define MAC802154_FRAME_HARD_HEADER_LEN (2 + 1 + 20 + 14) 33 - 34 26 /** 35 27 * enum ieee802154_hw_addr_filt_flags - hardware address filtering flags 36 28 * ··· 248 256 static inline __le16 ieee802154_get_fc_from_skb(const struct sk_buff *skb) 249 257 { 250 258 /* return some invalid fc on failure */ 251 - if (unlikely(skb->mac_len < 2)) { 259 + if (unlikely(skb->len < 2)) { 252 260 WARN_ON(1); 253 261 return cpu_to_le16(0); 254 262 }
+18
include/net/netlink.h
··· 1004 1004 } 1005 1005 1006 1006 /** 1007 + * nla_get_le32 - return payload of __le32 attribute 1008 + * @nla: __le32 netlink attribute 1009 + */ 1010 + static inline __le32 nla_get_le32(const struct nlattr *nla) 1011 + { 1012 + return *(__le32 *) nla_data(nla); 1013 + } 1014 + 1015 + /** 1007 1016 * nla_get_u16 - return payload of u16 attribute 1008 1017 * @nla: u16 netlink attribute 1009 1018 */ ··· 1072 1063 nla_memcpy(&tmp, nla, sizeof(tmp)); 1073 1064 1074 1065 return tmp; 1066 + } 1067 + 1068 + /** 1069 + * nla_get_le64 - return payload of __le64 attribute 1070 + * @nla: __le64 netlink attribute 1071 + */ 1072 + static inline __le64 nla_get_le64(const struct nlattr *nla) 1073 + { 1074 + return *(__le64 *) nla_data(nla); 1075 1075 } 1076 1076 1077 1077 /**
+191
include/net/nl802154.h
··· 56 56 57 57 /* add new commands above here */ 58 58 59 + #ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL 60 + NL802154_CMD_SET_SEC_PARAMS, 61 + NL802154_CMD_GET_SEC_KEY, /* can dump */ 62 + NL802154_CMD_NEW_SEC_KEY, 63 + NL802154_CMD_DEL_SEC_KEY, 64 + NL802154_CMD_GET_SEC_DEV, /* can dump */ 65 + NL802154_CMD_NEW_SEC_DEV, 66 + NL802154_CMD_DEL_SEC_DEV, 67 + NL802154_CMD_GET_SEC_DEVKEY, /* can dump */ 68 + NL802154_CMD_NEW_SEC_DEVKEY, 69 + NL802154_CMD_DEL_SEC_DEVKEY, 70 + NL802154_CMD_GET_SEC_LEVEL, /* can dump */ 71 + NL802154_CMD_NEW_SEC_LEVEL, 72 + NL802154_CMD_DEL_SEC_LEVEL, 73 + #endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */ 74 + 59 75 /* used to define NL802154_CMD_MAX below */ 60 76 __NL802154_CMD_AFTER_LAST, 61 77 NL802154_CMD_MAX = __NL802154_CMD_AFTER_LAST - 1 ··· 125 109 NL802154_ATTR_ACKREQ_DEFAULT, 126 110 127 111 /* add attributes here, update the policy in nl802154.c */ 112 + 113 + #ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL 114 + NL802154_ATTR_SEC_ENABLED, 115 + NL802154_ATTR_SEC_OUT_LEVEL, 116 + NL802154_ATTR_SEC_OUT_KEY_ID, 117 + NL802154_ATTR_SEC_FRAME_COUNTER, 118 + 119 + NL802154_ATTR_SEC_LEVEL, 120 + NL802154_ATTR_SEC_DEVICE, 121 + NL802154_ATTR_SEC_DEVKEY, 122 + NL802154_ATTR_SEC_KEY, 123 + #endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */ 128 124 129 125 __NL802154_ATTR_AFTER_LAST, 130 126 NL802154_ATTR_MAX = __NL802154_ATTR_AFTER_LAST - 1 ··· 274 246 __NL802154_SUPPORTED_BOOL_AFTER_LAST, 275 247 NL802154_SUPPORTED_BOOL_MAX = __NL802154_SUPPORTED_BOOL_AFTER_LAST - 1 276 248 }; 249 + 250 + #ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL 251 + 252 + enum nl802154_dev_addr_modes { 253 + NL802154_DEV_ADDR_NONE, 254 + __NL802154_DEV_ADDR_INVALID, 255 + NL802154_DEV_ADDR_SHORT, 256 + NL802154_DEV_ADDR_EXTENDED, 257 + 258 + /* keep last */ 259 + __NL802154_DEV_ADDR_AFTER_LAST, 260 + NL802154_DEV_ADDR_MAX = __NL802154_DEV_ADDR_AFTER_LAST - 1 261 + }; 262 + 263 + enum nl802154_dev_addr_attrs { 264 + NL802154_DEV_ADDR_ATTR_UNSPEC, 265 + 266 + NL802154_DEV_ADDR_ATTR_PAN_ID, 267 + NL802154_DEV_ADDR_ATTR_MODE, 268 + NL802154_DEV_ADDR_ATTR_SHORT, 269 + NL802154_DEV_ADDR_ATTR_EXTENDED, 270 + 271 + /* keep last */ 272 + __NL802154_DEV_ADDR_ATTR_AFTER_LAST, 273 + NL802154_DEV_ADDR_ATTR_MAX = __NL802154_DEV_ADDR_ATTR_AFTER_LAST - 1 274 + }; 275 + 276 + enum nl802154_key_id_modes { 277 + NL802154_KEY_ID_MODE_IMPLICIT, 278 + NL802154_KEY_ID_MODE_INDEX, 279 + NL802154_KEY_ID_MODE_INDEX_SHORT, 280 + NL802154_KEY_ID_MODE_INDEX_EXTENDED, 281 + 282 + /* keep last */ 283 + __NL802154_KEY_ID_MODE_AFTER_LAST, 284 + NL802154_KEY_ID_MODE_MAX = __NL802154_KEY_ID_MODE_AFTER_LAST - 1 285 + }; 286 + 287 + enum nl802154_key_id_attrs { 288 + NL802154_KEY_ID_ATTR_UNSPEC, 289 + 290 + NL802154_KEY_ID_ATTR_MODE, 291 + NL802154_KEY_ID_ATTR_INDEX, 292 + NL802154_KEY_ID_ATTR_IMPLICIT, 293 + NL802154_KEY_ID_ATTR_SOURCE_SHORT, 294 + NL802154_KEY_ID_ATTR_SOURCE_EXTENDED, 295 + 296 + /* keep last */ 297 + __NL802154_KEY_ID_ATTR_AFTER_LAST, 298 + NL802154_KEY_ID_ATTR_MAX = __NL802154_KEY_ID_ATTR_AFTER_LAST - 1 299 + }; 300 + 301 + enum nl802154_seclevels { 302 + NL802154_SECLEVEL_NONE, 303 + NL802154_SECLEVEL_MIC32, 304 + NL802154_SECLEVEL_MIC64, 305 + NL802154_SECLEVEL_MIC128, 306 + NL802154_SECLEVEL_ENC, 307 + NL802154_SECLEVEL_ENC_MIC32, 308 + NL802154_SECLEVEL_ENC_MIC64, 309 + NL802154_SECLEVEL_ENC_MIC128, 310 + 311 + /* keep last */ 312 + __NL802154_SECLEVEL_AFTER_LAST, 313 + NL802154_SECLEVEL_MAX = __NL802154_SECLEVEL_AFTER_LAST - 1 314 + }; 315 + 316 + enum nl802154_frames { 317 + NL802154_FRAME_BEACON, 318 + NL802154_FRAME_DATA, 319 + NL802154_FRAME_ACK, 320 + NL802154_FRAME_CMD, 321 + 322 + /* keep last */ 323 + __NL802154_FRAME_AFTER_LAST, 324 + NL802154_FRAME_MAX = __NL802154_FRAME_AFTER_LAST - 1 325 + }; 326 + 327 + enum nl802154_cmd_frames { 328 + __NL802154_CMD_FRAME_INVALID, 329 + NL802154_CMD_FRAME_ASSOC_REQUEST, 330 + NL802154_CMD_FRAME_ASSOC_RESPONSE, 331 + NL802154_CMD_FRAME_DISASSOC_NOTIFY, 332 + NL802154_CMD_FRAME_DATA_REQUEST, 333 + NL802154_CMD_FRAME_PAN_ID_CONFLICT_NOTIFY, 334 + NL802154_CMD_FRAME_ORPHAN_NOTIFY, 335 + NL802154_CMD_FRAME_BEACON_REQUEST, 336 + NL802154_CMD_FRAME_COORD_REALIGNMENT, 337 + NL802154_CMD_FRAME_GTS_REQUEST, 338 + 339 + /* keep last */ 340 + __NL802154_CMD_FRAME_AFTER_LAST, 341 + NL802154_CMD_FRAME_MAX = __NL802154_CMD_FRAME_AFTER_LAST - 1 342 + }; 343 + 344 + enum nl802154_seclevel_attrs { 345 + NL802154_SECLEVEL_ATTR_UNSPEC, 346 + 347 + NL802154_SECLEVEL_ATTR_LEVELS, 348 + NL802154_SECLEVEL_ATTR_FRAME, 349 + NL802154_SECLEVEL_ATTR_CMD_FRAME, 350 + NL802154_SECLEVEL_ATTR_DEV_OVERRIDE, 351 + 352 + /* keep last */ 353 + __NL802154_SECLEVEL_ATTR_AFTER_LAST, 354 + NL802154_SECLEVEL_ATTR_MAX = __NL802154_SECLEVEL_ATTR_AFTER_LAST - 1 355 + }; 356 + 357 + /* TODO what is this? couldn't find in mib */ 358 + enum { 359 + NL802154_DEVKEY_IGNORE, 360 + NL802154_DEVKEY_RESTRICT, 361 + NL802154_DEVKEY_RECORD, 362 + 363 + /* keep last */ 364 + __NL802154_DEVKEY_AFTER_LAST, 365 + NL802154_DEVKEY_MAX = __NL802154_DEVKEY_AFTER_LAST - 1 366 + }; 367 + 368 + enum nl802154_dev { 369 + NL802154_DEV_ATTR_UNSPEC, 370 + 371 + NL802154_DEV_ATTR_FRAME_COUNTER, 372 + NL802154_DEV_ATTR_PAN_ID, 373 + NL802154_DEV_ATTR_SHORT_ADDR, 374 + NL802154_DEV_ATTR_EXTENDED_ADDR, 375 + NL802154_DEV_ATTR_SECLEVEL_EXEMPT, 376 + NL802154_DEV_ATTR_KEY_MODE, 377 + 378 + /* keep last */ 379 + __NL802154_DEV_ATTR_AFTER_LAST, 380 + NL802154_DEV_ATTR_MAX = __NL802154_DEV_ATTR_AFTER_LAST - 1 381 + }; 382 + 383 + enum nl802154_devkey { 384 + NL802154_DEVKEY_ATTR_UNSPEC, 385 + 386 + NL802154_DEVKEY_ATTR_FRAME_COUNTER, 387 + NL802154_DEVKEY_ATTR_EXTENDED_ADDR, 388 + NL802154_DEVKEY_ATTR_ID, 389 + 390 + /* keep last */ 391 + __NL802154_DEVKEY_ATTR_AFTER_LAST, 392 + NL802154_DEVKEY_ATTR_MAX = __NL802154_DEVKEY_ATTR_AFTER_LAST - 1 393 + }; 394 + 395 + enum nl802154_key { 396 + NL802154_KEY_ATTR_UNSPEC, 397 + 398 + NL802154_KEY_ATTR_ID, 399 + NL802154_KEY_ATTR_USAGE_FRAMES, 400 + NL802154_KEY_ATTR_USAGE_CMDS, 401 + NL802154_KEY_ATTR_BYTES, 402 + 403 + /* keep last */ 404 + __NL802154_KEY_ATTR_AFTER_LAST, 405 + NL802154_KEY_ATTR_MAX = __NL802154_KEY_ATTR_AFTER_LAST - 1 406 + }; 407 + 408 + #define NL802154_KEY_SIZE 16 409 + #define NL802154_CMD_FRAME_NR_IDS 256 410 + 411 + #endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */ 277 412 278 413 #endif /* __NL802154_H */
+5
net/6lowpan/core.c
··· 17 17 18 18 void lowpan_netdev_setup(struct net_device *dev, enum lowpan_lltypes lltype) 19 19 { 20 + dev->addr_len = EUI64_ADDR_LEN; 21 + dev->type = ARPHRD_6LOWPAN; 22 + dev->mtu = IPV6_MIN_MTU; 23 + dev->priv_flags |= IFF_NO_QUEUE; 24 + 20 25 lowpan_priv(dev)->lltype = lltype; 21 26 } 22 27 EXPORT_SYMBOL(lowpan_netdev_setup);
-2
net/6lowpan/nhc.h
··· 8 8 #include <net/6lowpan.h> 9 9 #include <net/ipv6.h> 10 10 11 - #define LOWPAN_NHC_MAX_ID_LEN 1 12 - 13 11 /** 14 12 * LOWPAN_NHC - helper macro to generate nh id fields and lowpan_nhc struct 15 13 *
+24 -54
net/bluetooth/6lowpan.c
··· 35 35 static struct dentry *lowpan_control_debugfs; 36 36 37 37 #define IFACE_NAME_TEMPLATE "bt%d" 38 - #define EUI64_ADDR_LEN 8 39 38 40 39 struct skb_cb { 41 40 struct in6_addr addr; ··· 673 674 674 675 static void netdev_setup(struct net_device *dev) 675 676 { 676 - dev->addr_len = EUI64_ADDR_LEN; 677 - dev->type = ARPHRD_6LOWPAN; 678 - 679 677 dev->hard_header_len = 0; 680 678 dev->needed_tailroom = 0; 681 - dev->mtu = IPV6_MIN_MTU; 682 - dev->tx_queue_len = 0; 683 679 dev->flags = IFF_RUNNING | IFF_POINTOPOINT | 684 680 IFF_MULTICAST; 685 681 dev->watchdog_timeo = 0; ··· 769 775 770 776 chan->chan_type = L2CAP_CHAN_CONN_ORIENTED; 771 777 chan->mode = L2CAP_MODE_LE_FLOWCTL; 772 - chan->omtu = 65535; 773 - chan->imtu = chan->omtu; 774 - 775 - return chan; 776 - } 777 - 778 - static struct l2cap_chan *chan_open(struct l2cap_chan *pchan) 779 - { 780 - struct l2cap_chan *chan; 781 - 782 - chan = chan_create(); 783 - if (!chan) 784 - return NULL; 785 - 786 - chan->remote_mps = chan->omtu; 787 - chan->mps = chan->omtu; 788 - 789 - chan->state = BT_CONNECTED; 778 + chan->imtu = 1280; 790 779 791 780 return chan; 792 781 } ··· 896 919 { 897 920 struct l2cap_chan *chan; 898 921 899 - chan = chan_open(pchan); 922 + chan = chan_create(); 923 + if (!chan) 924 + return NULL; 925 + 900 926 chan->ops = pchan->ops; 901 927 902 928 BT_DBG("chan %p pchan %p", chan, pchan); ··· 1045 1065 return BDADDR_LE_RANDOM; 1046 1066 } 1047 1067 1048 - static struct l2cap_chan *chan_get(void) 1049 - { 1050 - struct l2cap_chan *pchan; 1051 - 1052 - pchan = chan_create(); 1053 - if (!pchan) 1054 - return NULL; 1055 - 1056 - pchan->ops = &bt_6lowpan_chan_ops; 1057 - 1058 - return pchan; 1059 - } 1060 - 1061 1068 static int bt_6lowpan_connect(bdaddr_t *addr, u8 dst_type) 1062 1069 { 1063 - struct l2cap_chan *pchan; 1070 + struct l2cap_chan *chan; 1064 1071 int err; 1065 1072 1066 - pchan = chan_get(); 1067 - if (!pchan) 1073 + chan = chan_create(); 1074 + if (!chan) 1068 1075 return -EINVAL; 1069 1076 1070 - err = l2cap_chan_connect(pchan, cpu_to_le16(L2CAP_PSM_IPSP), 0, 1077 + chan->ops = &bt_6lowpan_chan_ops; 1078 + 1079 + err = l2cap_chan_connect(chan, cpu_to_le16(L2CAP_PSM_IPSP), 0, 1071 1080 addr, dst_type); 1072 1081 1073 - BT_DBG("chan %p err %d", pchan, err); 1082 + BT_DBG("chan %p err %d", chan, err); 1074 1083 if (err < 0) 1075 - l2cap_chan_put(pchan); 1084 + l2cap_chan_put(chan); 1076 1085 1077 1086 return err; 1078 1087 } ··· 1086 1117 static struct l2cap_chan *bt_6lowpan_listen(void) 1087 1118 { 1088 1119 bdaddr_t *addr = BDADDR_ANY; 1089 - struct l2cap_chan *pchan; 1120 + struct l2cap_chan *chan; 1090 1121 int err; 1091 1122 1092 1123 if (!enable_6lowpan) 1093 1124 return NULL; 1094 1125 1095 - pchan = chan_get(); 1096 - if (!pchan) 1126 + chan = chan_create(); 1127 + if (!chan) 1097 1128 return NULL; 1098 1129 1099 - pchan->state = BT_LISTEN; 1100 - pchan->src_type = BDADDR_LE_PUBLIC; 1130 + chan->ops = &bt_6lowpan_chan_ops; 1131 + chan->state = BT_LISTEN; 1132 + chan->src_type = BDADDR_LE_PUBLIC; 1101 1133 1102 - atomic_set(&pchan->nesting, L2CAP_NESTING_PARENT); 1134 + atomic_set(&chan->nesting, L2CAP_NESTING_PARENT); 1103 1135 1104 - BT_DBG("chan %p src type %d", pchan, pchan->src_type); 1136 + BT_DBG("chan %p src type %d", chan, chan->src_type); 1105 1137 1106 - err = l2cap_add_psm(pchan, addr, cpu_to_le16(L2CAP_PSM_IPSP)); 1138 + err = l2cap_add_psm(chan, addr, cpu_to_le16(L2CAP_PSM_IPSP)); 1107 1139 if (err) { 1108 - l2cap_chan_put(pchan); 1140 + l2cap_chan_put(chan); 1109 1141 BT_ERR("psm cannot be added err %d", err); 1110 1142 return NULL; 1111 1143 } 1112 1144 1113 - return pchan; 1145 + return chan; 1114 1146 } 1115 1147 1116 1148 static int get_l2cap_conn(char *buf, bdaddr_t *addr, u8 *addr_type,
+128 -14
net/bluetooth/hci_core.c
··· 134 134 .llseek = default_llseek, 135 135 }; 136 136 137 + static ssize_t vendor_diag_read(struct file *file, char __user *user_buf, 138 + size_t count, loff_t *ppos) 139 + { 140 + struct hci_dev *hdev = file->private_data; 141 + char buf[3]; 142 + 143 + buf[0] = hci_dev_test_flag(hdev, HCI_VENDOR_DIAG) ? 'Y': 'N'; 144 + buf[1] = '\n'; 145 + buf[2] = '\0'; 146 + return simple_read_from_buffer(user_buf, count, ppos, buf, 2); 147 + } 148 + 149 + static ssize_t vendor_diag_write(struct file *file, const char __user *user_buf, 150 + size_t count, loff_t *ppos) 151 + { 152 + struct hci_dev *hdev = file->private_data; 153 + char buf[32]; 154 + size_t buf_size = min(count, (sizeof(buf)-1)); 155 + bool enable; 156 + int err; 157 + 158 + if (copy_from_user(buf, user_buf, buf_size)) 159 + return -EFAULT; 160 + 161 + buf[buf_size] = '\0'; 162 + if (strtobool(buf, &enable)) 163 + return -EINVAL; 164 + 165 + hci_req_lock(hdev); 166 + err = hdev->set_diag(hdev, enable); 167 + hci_req_unlock(hdev); 168 + 169 + if (err < 0) 170 + return err; 171 + 172 + if (enable) 173 + hci_dev_set_flag(hdev, HCI_VENDOR_DIAG); 174 + else 175 + hci_dev_clear_flag(hdev, HCI_VENDOR_DIAG); 176 + 177 + return count; 178 + } 179 + 180 + static const struct file_operations vendor_diag_fops = { 181 + .open = simple_open, 182 + .read = vendor_diag_read, 183 + .write = vendor_diag_write, 184 + .llseek = default_llseek, 185 + }; 186 + 187 + static void hci_debugfs_create_basic(struct hci_dev *hdev) 188 + { 189 + debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev, 190 + &dut_mode_fops); 191 + 192 + if (hdev->set_diag) 193 + debugfs_create_file("vendor_diag", 0644, hdev->debugfs, hdev, 194 + &vendor_diag_fops); 195 + } 196 + 137 197 /* ---- HCI requests ---- */ 138 198 139 199 static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode, ··· 910 850 if (err < 0) 911 851 return err; 912 852 913 - /* The Device Under Test (DUT) mode is special and available for 914 - * all controller types. So just create it early on. 915 - */ 916 - if (hci_dev_test_flag(hdev, HCI_SETUP)) { 917 - debugfs_create_file("dut_mode", 0644, hdev->debugfs, hdev, 918 - &dut_mode_fops); 919 - } 853 + if (hci_dev_test_flag(hdev, HCI_SETUP)) 854 + hci_debugfs_create_basic(hdev); 920 855 921 856 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT); 922 857 if (err < 0) ··· 987 932 err = __hci_req_sync(hdev, hci_init0_req, 0, HCI_INIT_TIMEOUT); 988 933 if (err < 0) 989 934 return err; 935 + 936 + if (hci_dev_test_flag(hdev, HCI_SETUP)) 937 + hci_debugfs_create_basic(hdev); 990 938 991 939 return 0; 992 940 } ··· 1443 1385 goto done; 1444 1386 } 1445 1387 1388 + set_bit(HCI_RUNNING, &hdev->flags); 1389 + hci_notify(hdev, HCI_DEV_OPEN); 1390 + 1446 1391 atomic_set(&hdev->cmd_cnt, 1); 1447 1392 set_bit(HCI_INIT, &hdev->flags); 1448 1393 ··· 1526 1465 kfree_skb(hdev->sent_cmd); 1527 1466 hdev->sent_cmd = NULL; 1528 1467 } 1468 + 1469 + clear_bit(HCI_RUNNING, &hdev->flags); 1470 + hci_notify(hdev, HCI_DEV_CLOSE); 1529 1471 1530 1472 hdev->close(hdev); 1531 1473 hdev->flags &= BIT(HCI_RAW); ··· 1615 1551 1616 1552 int hci_dev_do_close(struct hci_dev *hdev) 1617 1553 { 1554 + bool auto_off; 1555 + 1618 1556 BT_DBG("%s %p", hdev->name, hdev); 1619 1557 1620 1558 if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) && ··· 1672 1606 1673 1607 hci_discovery_set_state(hdev, DISCOVERY_STOPPED); 1674 1608 1675 - if (!hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF)) { 1676 - if (hdev->dev_type == HCI_BREDR) 1677 - mgmt_powered(hdev, 0); 1678 - } 1609 + auto_off = hci_dev_test_and_clear_flag(hdev, HCI_AUTO_OFF); 1610 + 1611 + if (!auto_off && hdev->dev_type == HCI_BREDR) 1612 + mgmt_powered(hdev, 0); 1679 1613 1680 1614 hci_inquiry_cache_flush(hdev); 1681 1615 hci_pend_le_actions_clear(hdev); ··· 1692 1626 /* Reset device */ 1693 1627 skb_queue_purge(&hdev->cmd_q); 1694 1628 atomic_set(&hdev->cmd_cnt, 1); 1695 - if (!hci_dev_test_flag(hdev, HCI_AUTO_OFF) && 1696 - !hci_dev_test_flag(hdev, HCI_UNCONFIGURED) && 1697 - test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) { 1629 + if (test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks) && 1630 + !auto_off && !hci_dev_test_flag(hdev, HCI_UNCONFIGURED)) { 1698 1631 set_bit(HCI_INIT, &hdev->flags); 1699 1632 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT); 1700 1633 clear_bit(HCI_INIT, &hdev->flags); ··· 1713 1648 kfree_skb(hdev->sent_cmd); 1714 1649 hdev->sent_cmd = NULL; 1715 1650 } 1651 + 1652 + clear_bit(HCI_RUNNING, &hdev->flags); 1653 + hci_notify(hdev, HCI_DEV_CLOSE); 1716 1654 1717 1655 /* After this point our queues are empty 1718 1656 * and no tasks are scheduled. */ ··· 3539 3471 return -ENXIO; 3540 3472 } 3541 3473 3474 + if (bt_cb(skb)->pkt_type != HCI_EVENT_PKT && 3475 + bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT && 3476 + bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) { 3477 + kfree_skb(skb); 3478 + return -EINVAL; 3479 + } 3480 + 3542 3481 /* Incoming skb */ 3543 3482 bt_cb(skb)->incoming = 1; 3544 3483 ··· 3558 3483 return 0; 3559 3484 } 3560 3485 EXPORT_SYMBOL(hci_recv_frame); 3486 + 3487 + /* Receive diagnostic message from HCI drivers */ 3488 + int hci_recv_diag(struct hci_dev *hdev, struct sk_buff *skb) 3489 + { 3490 + /* Time stamp */ 3491 + __net_timestamp(skb); 3492 + 3493 + /* Mark as diagnostic packet and send to monitor */ 3494 + bt_cb(skb)->pkt_type = HCI_DIAG_PKT; 3495 + hci_send_to_monitor(hdev, skb); 3496 + 3497 + kfree_skb(skb); 3498 + return 0; 3499 + } 3500 + EXPORT_SYMBOL(hci_recv_diag); 3561 3501 3562 3502 /* ---- Interface to upper protocols ---- */ 3563 3503 ··· 3619 3529 3620 3530 /* Get rid of skb owner, prior to sending to the driver. */ 3621 3531 skb_orphan(skb); 3532 + 3533 + if (!test_bit(HCI_RUNNING, &hdev->flags)) { 3534 + kfree_skb(skb); 3535 + return; 3536 + } 3622 3537 3623 3538 err = hdev->send(hdev, skb); 3624 3539 if (err < 0) { ··· 3674 3579 3675 3580 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE; 3676 3581 } 3582 + 3583 + /* Send HCI command and wait for command commplete event */ 3584 + struct sk_buff *hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen, 3585 + const void *param, u32 timeout) 3586 + { 3587 + struct sk_buff *skb; 3588 + 3589 + if (!test_bit(HCI_UP, &hdev->flags)) 3590 + return ERR_PTR(-ENETDOWN); 3591 + 3592 + bt_dev_dbg(hdev, "opcode 0x%4.4x plen %d", opcode, plen); 3593 + 3594 + hci_req_lock(hdev); 3595 + skb = __hci_cmd_sync(hdev, opcode, plen, param, timeout); 3596 + hci_req_unlock(hdev); 3597 + 3598 + return skb; 3599 + } 3600 + EXPORT_SYMBOL(hci_cmd_sync); 3677 3601 3678 3602 /* Send ACL data */ 3679 3603 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
+62 -8
net/bluetooth/hci_sock.c
··· 279 279 else 280 280 opcode = cpu_to_le16(HCI_MON_SCO_TX_PKT); 281 281 break; 282 + case HCI_DIAG_PKT: 283 + opcode = cpu_to_le16(HCI_MON_VENDOR_DIAG); 284 + break; 282 285 default: 283 286 return; 284 287 } ··· 306 303 { 307 304 struct hci_mon_hdr *hdr; 308 305 struct hci_mon_new_index *ni; 306 + struct hci_mon_index_info *ii; 309 307 struct sk_buff *skb; 310 308 __le16 opcode; 311 309 ··· 316 312 if (!skb) 317 313 return NULL; 318 314 319 - ni = (void *) skb_put(skb, HCI_MON_NEW_INDEX_SIZE); 315 + ni = (void *)skb_put(skb, HCI_MON_NEW_INDEX_SIZE); 320 316 ni->type = hdev->dev_type; 321 317 ni->bus = hdev->bus; 322 318 bacpy(&ni->bdaddr, &hdev->bdaddr); ··· 331 327 return NULL; 332 328 333 329 opcode = cpu_to_le16(HCI_MON_DEL_INDEX); 330 + break; 331 + 332 + case HCI_DEV_UP: 333 + skb = bt_skb_alloc(HCI_MON_INDEX_INFO_SIZE, GFP_ATOMIC); 334 + if (!skb) 335 + return NULL; 336 + 337 + ii = (void *)skb_put(skb, HCI_MON_INDEX_INFO_SIZE); 338 + bacpy(&ii->bdaddr, &hdev->bdaddr); 339 + ii->manufacturer = cpu_to_le16(hdev->manufacturer); 340 + 341 + opcode = cpu_to_le16(HCI_MON_INDEX_INFO); 342 + break; 343 + 344 + case HCI_DEV_OPEN: 345 + skb = bt_skb_alloc(0, GFP_ATOMIC); 346 + if (!skb) 347 + return NULL; 348 + 349 + opcode = cpu_to_le16(HCI_MON_OPEN_INDEX); 350 + break; 351 + 352 + case HCI_DEV_CLOSE: 353 + skb = bt_skb_alloc(0, GFP_ATOMIC); 354 + if (!skb) 355 + return NULL; 356 + 357 + opcode = cpu_to_le16(HCI_MON_CLOSE_INDEX); 334 358 break; 335 359 336 360 default: ··· 385 353 struct sk_buff *skb; 386 354 387 355 skb = create_monitor_event(hdev, HCI_DEV_REG); 356 + if (!skb) 357 + continue; 358 + 359 + if (sock_queue_rcv_skb(sk, skb)) 360 + kfree_skb(skb); 361 + 362 + if (!test_bit(HCI_RUNNING, &hdev->flags)) 363 + continue; 364 + 365 + skb = create_monitor_event(hdev, HCI_DEV_OPEN); 366 + if (!skb) 367 + continue; 368 + 369 + if (sock_queue_rcv_skb(sk, skb)) 370 + kfree_skb(skb); 371 + 372 + if (!test_bit(HCI_UP, &hdev->flags)) 373 + continue; 374 + 375 + skb = create_monitor_event(hdev, HCI_DEV_UP); 388 376 if (!skb) 389 377 continue; 390 378 ··· 444 392 445 393 void hci_sock_dev_event(struct hci_dev *hdev, int event) 446 394 { 447 - struct hci_ev_si_device ev; 448 - 449 395 BT_DBG("hdev %s event %d", hdev->name, event); 450 396 451 - /* Send event to monitor */ 452 397 if (atomic_read(&monitor_promisc)) { 453 398 struct sk_buff *skb; 454 399 400 + /* Send event to monitor */ 455 401 skb = create_monitor_event(hdev, event); 456 402 if (skb) { 457 403 hci_send_to_channel(HCI_CHANNEL_MONITOR, skb, ··· 458 408 } 459 409 } 460 410 461 - /* Send event to sockets */ 462 - ev.event = event; 463 - ev.dev_id = hdev->id; 464 - hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev); 411 + if (event <= HCI_DEV_DOWN) { 412 + struct hci_ev_si_device ev; 413 + 414 + /* Send event to sockets */ 415 + ev.event = event; 416 + ev.dev_id = hdev->id; 417 + hci_si_event(NULL, HCI_EV_SI_DEVICE, sizeof(ev), &ev); 418 + } 465 419 466 420 if (event == HCI_DEV_UNREG) { 467 421 struct sock *sk;
+16
net/bluetooth/lib.c
··· 151 151 } 152 152 EXPORT_SYMBOL(bt_info); 153 153 154 + void bt_warn(const char *format, ...) 155 + { 156 + struct va_format vaf; 157 + va_list args; 158 + 159 + va_start(args, format); 160 + 161 + vaf.fmt = format; 162 + vaf.va = &args; 163 + 164 + pr_warn("%pV", &vaf); 165 + 166 + va_end(args); 167 + } 168 + EXPORT_SYMBOL(bt_warn); 169 + 154 170 void bt_err(const char *format, ...) 155 171 { 156 172 struct va_format vaf;
+11 -7
net/ieee802154/6lowpan/core.c
··· 101 101 102 102 static void lowpan_setup(struct net_device *ldev) 103 103 { 104 - ldev->addr_len = IEEE802154_ADDR_LEN; 105 104 memset(ldev->broadcast, 0xff, IEEE802154_ADDR_LEN); 106 - ldev->type = ARPHRD_6LOWPAN; 107 - /* Frame Control + Sequence Number + Address fields + Security Header */ 108 - ldev->hard_header_len = 2 + 1 + 20 + 14; 109 - ldev->needed_tailroom = 2; /* FCS */ 110 - ldev->mtu = IPV6_MIN_MTU; 111 - ldev->priv_flags |= IFF_NO_QUEUE; 105 + /* We need an ipv6hdr as minimum len when calling xmit */ 106 + ldev->hard_header_len = sizeof(struct ipv6hdr); 112 107 ldev->flags = IFF_BROADCAST | IFF_MULTICAST; 113 108 114 109 ldev->netdev_ops = &lowpan_netdev_ops; ··· 151 156 lowpan_dev_info(ldev)->wdev = wdev; 152 157 /* Set the lowpan hardware address to the wpan hardware address. */ 153 158 memcpy(ldev->dev_addr, wdev->dev_addr, IEEE802154_ADDR_LEN); 159 + /* We need headroom for possible wpan_dev_hard_header call and tailroom 160 + * for encryption/fcs handling. The lowpan interface will replace 161 + * the IPv6 header with 6LoWPAN header. At worst case the 6LoWPAN 162 + * header has LOWPAN_IPHC_MAX_HEADER_LEN more bytes than the IPv6 163 + * header. 164 + */ 165 + ldev->needed_headroom = LOWPAN_IPHC_MAX_HEADER_LEN + 166 + wdev->needed_headroom; 167 + ldev->needed_tailroom = wdev->needed_tailroom; 154 168 155 169 lowpan_netdev_setup(ldev, LOWPAN_LLTYPE_IEEE802154); 156 170
+2
net/ieee802154/6lowpan/rx.c
··· 29 29 static int lowpan_give_skb_to_device(struct sk_buff *skb) 30 30 { 31 31 skb->protocol = htons(ETH_P_IPV6); 32 + skb->dev->stats.rx_packets++; 33 + skb->dev->stats.rx_bytes += skb->len; 32 34 33 35 return netif_rx(skb); 34 36 }
+35 -14
net/ieee802154/6lowpan/tx.c
··· 10 10 11 11 #include <net/6lowpan.h> 12 12 #include <net/ieee802154_netdev.h> 13 + #include <net/mac802154.h> 13 14 14 15 #include "6lowpan_i.h" 15 16 ··· 37 36 sizeof(struct lowpan_addr_info)); 38 37 } 39 38 39 + /* This callback will be called from AF_PACKET and IPv6 stack, the AF_PACKET 40 + * sockets gives an 8 byte array for addresses only! 41 + * 42 + * TODO I think AF_PACKET DGRAM (sending/receiving) RAW (sending) makes no 43 + * sense here. We should disable it, the right use-case would be AF_INET6 44 + * RAW/DGRAM sockets. 45 + */ 40 46 int lowpan_header_create(struct sk_buff *skb, struct net_device *ldev, 41 47 unsigned short type, const void *_daddr, 42 48 const void *_saddr, unsigned int len) ··· 79 71 80 72 static struct sk_buff* 81 73 lowpan_alloc_frag(struct sk_buff *skb, int size, 82 - const struct ieee802154_hdr *master_hdr) 74 + const struct ieee802154_hdr *master_hdr, bool frag1) 83 75 { 84 76 struct net_device *wdev = lowpan_dev_info(skb->dev)->wdev; 85 77 struct sk_buff *frag; 86 78 int rc; 87 79 88 - frag = alloc_skb(wdev->hard_header_len + wdev->needed_tailroom + size, 80 + frag = alloc_skb(wdev->needed_headroom + wdev->needed_tailroom + size, 89 81 GFP_ATOMIC); 90 82 91 83 if (likely(frag)) { 92 84 frag->dev = wdev; 93 85 frag->priority = skb->priority; 94 - skb_reserve(frag, wdev->hard_header_len); 86 + skb_reserve(frag, wdev->needed_headroom); 95 87 skb_reset_network_header(frag); 96 88 *mac_cb(frag) = *mac_cb(skb); 97 89 98 - rc = dev_hard_header(frag, wdev, 0, &master_hdr->dest, 99 - &master_hdr->source, size); 100 - if (rc < 0) { 101 - kfree_skb(frag); 102 - return ERR_PTR(rc); 90 + if (frag1) { 91 + memcpy(skb_put(frag, skb->mac_len), 92 + skb_mac_header(skb), skb->mac_len); 93 + } else { 94 + rc = wpan_dev_hard_header(frag, wdev, 95 + &master_hdr->dest, 96 + &master_hdr->source, size); 97 + if (rc < 0) { 98 + kfree_skb(frag); 99 + return ERR_PTR(rc); 100 + } 103 101 } 104 102 } else { 105 103 frag = ERR_PTR(-ENOMEM); ··· 117 103 static int 118 104 lowpan_xmit_fragment(struct sk_buff *skb, const struct ieee802154_hdr *wpan_hdr, 119 105 u8 *frag_hdr, int frag_hdrlen, 120 - int offset, int len) 106 + int offset, int len, bool frag1) 121 107 { 122 108 struct sk_buff *frag; 123 109 124 110 raw_dump_inline(__func__, " fragment header", frag_hdr, frag_hdrlen); 125 111 126 - frag = lowpan_alloc_frag(skb, frag_hdrlen + len, wpan_hdr); 112 + frag = lowpan_alloc_frag(skb, frag_hdrlen + len, wpan_hdr, frag1); 127 113 if (IS_ERR(frag)) 128 114 return PTR_ERR(frag); 129 115 ··· 162 148 163 149 rc = lowpan_xmit_fragment(skb, wpan_hdr, frag_hdr, 164 150 LOWPAN_FRAG1_HEAD_SIZE, 0, 165 - frag_len + skb_network_header_len(skb)); 151 + frag_len + skb_network_header_len(skb), 152 + true); 166 153 if (rc) { 167 154 pr_debug("%s unable to send FRAG1 packet (tag: %d)", 168 155 __func__, ntohs(frag_tag)); ··· 184 169 185 170 rc = lowpan_xmit_fragment(skb, wpan_hdr, frag_hdr, 186 171 LOWPAN_FRAGN_HEAD_SIZE, skb_offset, 187 - frag_len); 172 + frag_len, false); 188 173 if (rc) { 189 174 pr_debug("%s unable to send a FRAGN packet. (tag: %d, offset: %d)\n", 190 175 __func__, ntohs(frag_tag), skb_offset); ··· 192 177 } 193 178 } while (skb_unprocessed > frag_cap); 194 179 180 + ldev->stats.tx_packets++; 181 + ldev->stats.tx_bytes += dgram_size; 195 182 consume_skb(skb); 196 183 return NET_XMIT_SUCCESS; 197 184 ··· 245 228 cb->ackreq = wpan_dev->ackreq; 246 229 } 247 230 248 - return dev_hard_header(skb, lowpan_dev_info(ldev)->wdev, ETH_P_IPV6, 249 - (void *)&da, (void *)&sa, 0); 231 + return wpan_dev_hard_header(skb, lowpan_dev_info(ldev)->wdev, &da, &sa, 232 + 0); 250 233 } 251 234 252 235 netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *ldev) ··· 256 239 u16 dgram_size, dgram_offset; 257 240 258 241 pr_debug("package xmit\n"); 242 + 243 + WARN_ON_ONCE(skb->len > IPV6_MIN_MTU); 259 244 260 245 /* We must take a copy of the skb before we modify/replace the ipv6 261 246 * header as the header could be used elsewhere ··· 281 262 282 263 if (skb_tail_pointer(skb) - skb_network_header(skb) <= max_single) { 283 264 skb->dev = lowpan_dev_info(ldev)->wdev; 265 + ldev->stats.tx_packets++; 266 + ldev->stats.tx_bytes += dgram_size; 284 267 return dev_queue_xmit(skb); 285 268 } else { 286 269 netdev_tx_t rc;
+5
net/ieee802154/Kconfig
··· 12 12 13 13 if IEEE802154 14 14 15 + config IEEE802154_NL802154_EXPERIMENTAL 16 + bool "IEEE 802.15.4 experimental netlink support" 17 + ---help--- 18 + Adds experimental netlink support for nl802154. 19 + 15 20 config IEEE802154_SOCKET 16 21 tristate "IEEE 802.15.4 socket interface" 17 22 default y
+12
net/ieee802154/core.c
··· 95 95 return result; 96 96 } 97 97 98 + struct wpan_phy *wpan_phy_idx_to_wpan_phy(int wpan_phy_idx) 99 + { 100 + struct cfg802154_registered_device *rdev; 101 + 102 + ASSERT_RTNL(); 103 + 104 + rdev = cfg802154_rdev_by_wpan_phy_idx(wpan_phy_idx); 105 + if (!rdev) 106 + return NULL; 107 + return &rdev->wpan_phy; 108 + } 109 + 98 110 struct wpan_phy * 99 111 wpan_phy_new(const struct cfg802154_ops *ops, size_t priv_size) 100 112 {
+1
net/ieee802154/core.h
··· 42 42 void cfg802154_dev_free(struct cfg802154_registered_device *rdev); 43 43 struct cfg802154_registered_device * 44 44 cfg802154_rdev_by_wpan_phy_idx(int wpan_phy_idx); 45 + struct wpan_phy *wpan_phy_idx_to_wpan_phy(int wpan_phy_idx); 45 46 46 47 #endif /* __IEEE802154_CORE_H */
+10 -10
net/ieee802154/header_ops.c
··· 83 83 } 84 84 85 85 int 86 - ieee802154_hdr_push(struct sk_buff *skb, const struct ieee802154_hdr *hdr) 86 + ieee802154_hdr_push(struct sk_buff *skb, struct ieee802154_hdr *hdr) 87 87 { 88 - u8 buf[MAC802154_FRAME_HARD_HEADER_LEN]; 88 + u8 buf[IEEE802154_MAX_HEADER_LEN]; 89 89 int pos = 2; 90 90 int rc; 91 - struct ieee802154_hdr_fc fc = hdr->fc; 91 + struct ieee802154_hdr_fc *fc = &hdr->fc; 92 92 93 93 buf[pos++] = hdr->seq; 94 94 95 - fc.dest_addr_mode = hdr->dest.mode; 95 + fc->dest_addr_mode = hdr->dest.mode; 96 96 97 97 rc = ieee802154_hdr_push_addr(buf + pos, &hdr->dest, false); 98 98 if (rc < 0) 99 99 return -EINVAL; 100 100 pos += rc; 101 101 102 - fc.source_addr_mode = hdr->source.mode; 102 + fc->source_addr_mode = hdr->source.mode; 103 103 104 104 if (hdr->source.pan_id == hdr->dest.pan_id && 105 105 hdr->dest.mode != IEEE802154_ADDR_NONE) 106 - fc.intra_pan = true; 106 + fc->intra_pan = true; 107 107 108 - rc = ieee802154_hdr_push_addr(buf + pos, &hdr->source, fc.intra_pan); 108 + rc = ieee802154_hdr_push_addr(buf + pos, &hdr->source, fc->intra_pan); 109 109 if (rc < 0) 110 110 return -EINVAL; 111 111 pos += rc; 112 112 113 - if (fc.security_enabled) { 114 - fc.version = 1; 113 + if (fc->security_enabled) { 114 + fc->version = 1; 115 115 116 116 rc = ieee802154_hdr_push_sechdr(buf + pos, &hdr->sec); 117 117 if (rc < 0) ··· 120 120 pos += rc; 121 121 } 122 122 123 - memcpy(buf, &fc, 2); 123 + memcpy(buf, fc, 2); 124 124 125 125 memcpy(skb_push(skb, pos), buf, pos); 126 126
+1130 -3
net/ieee802154/nl802154.c
··· 232 232 [NL802154_ATTR_SUPPORTED_COMMANDS] = { .type = NLA_NESTED }, 233 233 234 234 [NL802154_ATTR_ACKREQ_DEFAULT] = { .type = NLA_U8 }, 235 + 236 + #ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL 237 + [NL802154_ATTR_SEC_ENABLED] = { .type = NLA_U8, }, 238 + [NL802154_ATTR_SEC_OUT_LEVEL] = { .type = NLA_U32, }, 239 + [NL802154_ATTR_SEC_OUT_KEY_ID] = { .type = NLA_NESTED, }, 240 + [NL802154_ATTR_SEC_FRAME_COUNTER] = { .type = NLA_U32 }, 241 + 242 + [NL802154_ATTR_SEC_LEVEL] = { .type = NLA_NESTED }, 243 + [NL802154_ATTR_SEC_DEVICE] = { .type = NLA_NESTED }, 244 + [NL802154_ATTR_SEC_DEVKEY] = { .type = NLA_NESTED }, 245 + [NL802154_ATTR_SEC_KEY] = { .type = NLA_NESTED }, 246 + #endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */ 235 247 }; 248 + 249 + #ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL 250 + static int 251 + nl802154_prepare_wpan_dev_dump(struct sk_buff *skb, 252 + struct netlink_callback *cb, 253 + struct cfg802154_registered_device **rdev, 254 + struct wpan_dev **wpan_dev) 255 + { 256 + int err; 257 + 258 + rtnl_lock(); 259 + 260 + if (!cb->args[0]) { 261 + err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl802154_fam.hdrsize, 262 + nl802154_fam.attrbuf, nl802154_fam.maxattr, 263 + nl802154_policy); 264 + if (err) 265 + goto out_unlock; 266 + 267 + *wpan_dev = __cfg802154_wpan_dev_from_attrs(sock_net(skb->sk), 268 + nl802154_fam.attrbuf); 269 + if (IS_ERR(*wpan_dev)) { 270 + err = PTR_ERR(*wpan_dev); 271 + goto out_unlock; 272 + } 273 + *rdev = wpan_phy_to_rdev((*wpan_dev)->wpan_phy); 274 + /* 0 is the first index - add 1 to parse only once */ 275 + cb->args[0] = (*rdev)->wpan_phy_idx + 1; 276 + cb->args[1] = (*wpan_dev)->identifier; 277 + } else { 278 + /* subtract the 1 again here */ 279 + struct wpan_phy *wpan_phy = wpan_phy_idx_to_wpan_phy(cb->args[0] - 1); 280 + struct wpan_dev *tmp; 281 + 282 + if (!wpan_phy) { 283 + err = -ENODEV; 284 + goto out_unlock; 285 + } 286 + *rdev = wpan_phy_to_rdev(wpan_phy); 287 + *wpan_dev = NULL; 288 + 289 + list_for_each_entry(tmp, &(*rdev)->wpan_dev_list, list) { 290 + if (tmp->identifier == cb->args[1]) { 291 + *wpan_dev = tmp; 292 + break; 293 + } 294 + } 295 + 296 + if (!*wpan_dev) { 297 + err = -ENODEV; 298 + goto out_unlock; 299 + } 300 + } 301 + 302 + return 0; 303 + out_unlock: 304 + rtnl_unlock(); 305 + return err; 306 + } 307 + 308 + static void 309 + nl802154_finish_wpan_dev_dump(struct cfg802154_registered_device *rdev) 310 + { 311 + rtnl_unlock(); 312 + } 313 + #endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */ 236 314 237 315 /* message building helper */ 238 316 static inline void *nl802154hdr_put(struct sk_buff *skb, u32 portid, u32 seq, ··· 690 612 ((u64)wpan_phy_to_rdev(wpan_dev->wpan_phy)->wpan_phy_idx << 32); 691 613 } 692 614 615 + #ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL 616 + #include <net/ieee802154_netdev.h> 617 + 618 + static int 619 + ieee802154_llsec_send_key_id(struct sk_buff *msg, 620 + const struct ieee802154_llsec_key_id *desc) 621 + { 622 + struct nlattr *nl_dev_addr; 623 + 624 + if (nla_put_u32(msg, NL802154_KEY_ID_ATTR_MODE, desc->mode)) 625 + return -ENOBUFS; 626 + 627 + switch (desc->mode) { 628 + case NL802154_KEY_ID_MODE_IMPLICIT: 629 + nl_dev_addr = nla_nest_start(msg, NL802154_KEY_ID_ATTR_IMPLICIT); 630 + if (!nl_dev_addr) 631 + return -ENOBUFS; 632 + 633 + if (nla_put_le16(msg, NL802154_DEV_ADDR_ATTR_PAN_ID, 634 + desc->device_addr.pan_id) || 635 + nla_put_u32(msg, NL802154_DEV_ADDR_ATTR_MODE, 636 + desc->device_addr.mode)) 637 + return -ENOBUFS; 638 + 639 + switch (desc->device_addr.mode) { 640 + case NL802154_DEV_ADDR_SHORT: 641 + if (nla_put_le16(msg, NL802154_DEV_ADDR_ATTR_SHORT, 642 + desc->device_addr.short_addr)) 643 + return -ENOBUFS; 644 + break; 645 + case NL802154_DEV_ADDR_EXTENDED: 646 + if (nla_put_le64(msg, NL802154_DEV_ADDR_ATTR_EXTENDED, 647 + desc->device_addr.extended_addr)) 648 + return -ENOBUFS; 649 + break; 650 + default: 651 + /* userspace should handle unknown */ 652 + break; 653 + } 654 + 655 + nla_nest_end(msg, nl_dev_addr); 656 + break; 657 + case NL802154_KEY_ID_MODE_INDEX: 658 + break; 659 + case NL802154_KEY_ID_MODE_INDEX_SHORT: 660 + /* TODO renmae short_source? */ 661 + if (nla_put_le32(msg, NL802154_KEY_ID_ATTR_SOURCE_SHORT, 662 + desc->short_source)) 663 + return -ENOBUFS; 664 + break; 665 + case NL802154_KEY_ID_MODE_INDEX_EXTENDED: 666 + if (nla_put_le64(msg, NL802154_KEY_ID_ATTR_SOURCE_EXTENDED, 667 + desc->extended_source)) 668 + return -ENOBUFS; 669 + break; 670 + default: 671 + /* userspace should handle unknown */ 672 + break; 673 + } 674 + 675 + /* TODO key_id to key_idx ? Check naming */ 676 + if (desc->mode != NL802154_KEY_ID_MODE_IMPLICIT) { 677 + if (nla_put_u8(msg, NL802154_KEY_ID_ATTR_INDEX, desc->id)) 678 + return -ENOBUFS; 679 + } 680 + 681 + return 0; 682 + } 683 + 684 + static int nl802154_get_llsec_params(struct sk_buff *msg, 685 + struct cfg802154_registered_device *rdev, 686 + struct wpan_dev *wpan_dev) 687 + { 688 + struct nlattr *nl_key_id; 689 + struct ieee802154_llsec_params params; 690 + int ret; 691 + 692 + ret = rdev_get_llsec_params(rdev, wpan_dev, &params); 693 + if (ret < 0) 694 + return ret; 695 + 696 + if (nla_put_u8(msg, NL802154_ATTR_SEC_ENABLED, params.enabled) || 697 + nla_put_u32(msg, NL802154_ATTR_SEC_OUT_LEVEL, params.out_level) || 698 + nla_put_be32(msg, NL802154_ATTR_SEC_FRAME_COUNTER, 699 + params.frame_counter)) 700 + return -ENOBUFS; 701 + 702 + nl_key_id = nla_nest_start(msg, NL802154_ATTR_SEC_OUT_KEY_ID); 703 + if (!nl_key_id) 704 + return -ENOBUFS; 705 + 706 + ret = ieee802154_llsec_send_key_id(msg, &params.out_key); 707 + if (ret < 0) 708 + return ret; 709 + 710 + nla_nest_end(msg, nl_key_id); 711 + 712 + return 0; 713 + } 714 + #endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */ 715 + 693 716 static int 694 717 nl802154_send_iface(struct sk_buff *msg, u32 portid, u32 seq, int flags, 695 718 struct cfg802154_registered_device *rdev, ··· 841 662 /* ackreq default behaviour */ 842 663 if (nla_put_u8(msg, NL802154_ATTR_ACKREQ_DEFAULT, wpan_dev->ackreq)) 843 664 goto nla_put_failure; 665 + 666 + #ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL 667 + if (nl802154_get_llsec_params(msg, rdev, wpan_dev) < 0) 668 + goto nla_put_failure; 669 + #endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */ 844 670 845 671 genlmsg_end(msg, hdr); 846 672 return 0; ··· 937 753 return -EINVAL; 938 754 } 939 755 940 - /* TODO add nla_get_le64 to netlink */ 941 756 if (info->attrs[NL802154_ATTR_EXTENDED_ADDR]) 942 - extended_addr = (__force __le64)nla_get_u64( 943 - info->attrs[NL802154_ATTR_EXTENDED_ADDR]); 757 + extended_addr = nla_get_le64(info->attrs[NL802154_ATTR_EXTENDED_ADDR]); 944 758 945 759 if (!rdev->ops->add_virtual_intf) 946 760 return -EOPNOTSUPP; ··· 1257 1075 return rdev_set_ackreq_default(rdev, wpan_dev, ackreq); 1258 1076 } 1259 1077 1078 + #ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL 1079 + static const struct nla_policy nl802154_dev_addr_policy[NL802154_DEV_ADDR_ATTR_MAX + 1] = { 1080 + [NL802154_DEV_ADDR_ATTR_PAN_ID] = { .type = NLA_U16 }, 1081 + [NL802154_DEV_ADDR_ATTR_MODE] = { .type = NLA_U32 }, 1082 + [NL802154_DEV_ADDR_ATTR_SHORT] = { .type = NLA_U16 }, 1083 + [NL802154_DEV_ADDR_ATTR_EXTENDED] = { .type = NLA_U64 }, 1084 + }; 1085 + 1086 + static int 1087 + ieee802154_llsec_parse_dev_addr(struct nlattr *nla, 1088 + struct ieee802154_addr *addr) 1089 + { 1090 + struct nlattr *attrs[NL802154_DEV_ADDR_ATTR_MAX + 1]; 1091 + 1092 + if (!nla || nla_parse_nested(attrs, NL802154_DEV_ADDR_ATTR_MAX, nla, 1093 + nl802154_dev_addr_policy)) 1094 + return -EINVAL; 1095 + 1096 + if (!attrs[NL802154_DEV_ADDR_ATTR_PAN_ID] && 1097 + !attrs[NL802154_DEV_ADDR_ATTR_MODE] && 1098 + !(attrs[NL802154_DEV_ADDR_ATTR_SHORT] || 1099 + attrs[NL802154_DEV_ADDR_ATTR_EXTENDED])) 1100 + return -EINVAL; 1101 + 1102 + addr->pan_id = nla_get_le16(attrs[NL802154_DEV_ADDR_ATTR_PAN_ID]); 1103 + addr->mode = nla_get_u32(attrs[NL802154_DEV_ADDR_ATTR_MODE]); 1104 + switch (addr->mode) { 1105 + case NL802154_DEV_ADDR_SHORT: 1106 + addr->short_addr = nla_get_le16(attrs[NL802154_DEV_ADDR_ATTR_SHORT]); 1107 + break; 1108 + case NL802154_DEV_ADDR_EXTENDED: 1109 + addr->extended_addr = nla_get_le64(attrs[NL802154_DEV_ADDR_ATTR_EXTENDED]); 1110 + break; 1111 + default: 1112 + return -EINVAL; 1113 + } 1114 + 1115 + return 0; 1116 + } 1117 + 1118 + static const struct nla_policy nl802154_key_id_policy[NL802154_KEY_ID_ATTR_MAX + 1] = { 1119 + [NL802154_KEY_ID_ATTR_MODE] = { .type = NLA_U32 }, 1120 + [NL802154_KEY_ID_ATTR_INDEX] = { .type = NLA_U8 }, 1121 + [NL802154_KEY_ID_ATTR_IMPLICIT] = { .type = NLA_NESTED }, 1122 + [NL802154_KEY_ID_ATTR_SOURCE_SHORT] = { .type = NLA_U32 }, 1123 + [NL802154_KEY_ID_ATTR_SOURCE_EXTENDED] = { .type = NLA_U64 }, 1124 + }; 1125 + 1126 + static int 1127 + ieee802154_llsec_parse_key_id(struct nlattr *nla, 1128 + struct ieee802154_llsec_key_id *desc) 1129 + { 1130 + struct nlattr *attrs[NL802154_KEY_ID_ATTR_MAX + 1]; 1131 + 1132 + if (!nla || nla_parse_nested(attrs, NL802154_KEY_ID_ATTR_MAX, nla, 1133 + nl802154_key_id_policy)) 1134 + return -EINVAL; 1135 + 1136 + if (!attrs[NL802154_KEY_ID_ATTR_MODE]) 1137 + return -EINVAL; 1138 + 1139 + desc->mode = nla_get_u32(attrs[NL802154_KEY_ID_ATTR_MODE]); 1140 + switch (desc->mode) { 1141 + case NL802154_KEY_ID_MODE_IMPLICIT: 1142 + if (!attrs[NL802154_KEY_ID_ATTR_IMPLICIT]) 1143 + return -EINVAL; 1144 + 1145 + if (ieee802154_llsec_parse_dev_addr(attrs[NL802154_KEY_ID_ATTR_IMPLICIT], 1146 + &desc->device_addr) < 0) 1147 + return -EINVAL; 1148 + break; 1149 + case NL802154_KEY_ID_MODE_INDEX: 1150 + break; 1151 + case NL802154_KEY_ID_MODE_INDEX_SHORT: 1152 + if (!attrs[NL802154_KEY_ID_ATTR_SOURCE_SHORT]) 1153 + return -EINVAL; 1154 + 1155 + desc->short_source = nla_get_le32(attrs[NL802154_KEY_ID_ATTR_SOURCE_SHORT]); 1156 + break; 1157 + case NL802154_KEY_ID_MODE_INDEX_EXTENDED: 1158 + if (!attrs[NL802154_KEY_ID_ATTR_SOURCE_EXTENDED]) 1159 + return -EINVAL; 1160 + 1161 + desc->extended_source = nla_get_le64(attrs[NL802154_KEY_ID_ATTR_SOURCE_EXTENDED]); 1162 + break; 1163 + default: 1164 + return -EINVAL; 1165 + } 1166 + 1167 + if (desc->mode != NL802154_KEY_ID_MODE_IMPLICIT) { 1168 + if (!attrs[NL802154_KEY_ID_ATTR_INDEX]) 1169 + return -EINVAL; 1170 + 1171 + /* TODO change id to idx */ 1172 + desc->id = nla_get_u8(attrs[NL802154_KEY_ID_ATTR_INDEX]); 1173 + } 1174 + 1175 + return 0; 1176 + } 1177 + 1178 + static int nl802154_set_llsec_params(struct sk_buff *skb, 1179 + struct genl_info *info) 1180 + { 1181 + struct cfg802154_registered_device *rdev = info->user_ptr[0]; 1182 + struct net_device *dev = info->user_ptr[1]; 1183 + struct wpan_dev *wpan_dev = dev->ieee802154_ptr; 1184 + struct ieee802154_llsec_params params; 1185 + u32 changed = 0; 1186 + int ret; 1187 + 1188 + if (info->attrs[NL802154_ATTR_SEC_ENABLED]) { 1189 + u8 enabled; 1190 + 1191 + enabled = nla_get_u8(info->attrs[NL802154_ATTR_SEC_ENABLED]); 1192 + if (enabled != 0 && enabled != 1) 1193 + return -EINVAL; 1194 + 1195 + params.enabled = nla_get_u8(info->attrs[NL802154_ATTR_SEC_ENABLED]); 1196 + changed |= IEEE802154_LLSEC_PARAM_ENABLED; 1197 + } 1198 + 1199 + if (info->attrs[NL802154_ATTR_SEC_OUT_KEY_ID]) { 1200 + ret = ieee802154_llsec_parse_key_id(info->attrs[NL802154_ATTR_SEC_OUT_KEY_ID], 1201 + &params.out_key); 1202 + if (ret < 0) 1203 + return ret; 1204 + 1205 + changed |= IEEE802154_LLSEC_PARAM_OUT_KEY; 1206 + } 1207 + 1208 + if (info->attrs[NL802154_ATTR_SEC_OUT_LEVEL]) { 1209 + params.out_level = nla_get_u32(info->attrs[NL802154_ATTR_SEC_OUT_LEVEL]); 1210 + if (params.out_level > NL802154_SECLEVEL_MAX) 1211 + return -EINVAL; 1212 + 1213 + changed |= IEEE802154_LLSEC_PARAM_OUT_LEVEL; 1214 + } 1215 + 1216 + if (info->attrs[NL802154_ATTR_SEC_FRAME_COUNTER]) { 1217 + params.frame_counter = nla_get_be32(info->attrs[NL802154_ATTR_SEC_FRAME_COUNTER]); 1218 + changed |= IEEE802154_LLSEC_PARAM_FRAME_COUNTER; 1219 + } 1220 + 1221 + return rdev_set_llsec_params(rdev, wpan_dev, &params, changed); 1222 + } 1223 + 1224 + static int nl802154_send_key(struct sk_buff *msg, u32 cmd, u32 portid, 1225 + u32 seq, int flags, 1226 + struct cfg802154_registered_device *rdev, 1227 + struct net_device *dev, 1228 + const struct ieee802154_llsec_key_entry *key) 1229 + { 1230 + void *hdr; 1231 + u32 commands[NL802154_CMD_FRAME_NR_IDS / 32]; 1232 + struct nlattr *nl_key, *nl_key_id; 1233 + 1234 + hdr = nl802154hdr_put(msg, portid, seq, flags, cmd); 1235 + if (!hdr) 1236 + return -1; 1237 + 1238 + if (nla_put_u32(msg, NL802154_ATTR_IFINDEX, dev->ifindex)) 1239 + goto nla_put_failure; 1240 + 1241 + nl_key = nla_nest_start(msg, NL802154_ATTR_SEC_KEY); 1242 + if (!nl_key) 1243 + goto nla_put_failure; 1244 + 1245 + nl_key_id = nla_nest_start(msg, NL802154_KEY_ATTR_ID); 1246 + if (!nl_key_id) 1247 + goto nla_put_failure; 1248 + 1249 + if (ieee802154_llsec_send_key_id(msg, &key->id) < 0) 1250 + goto nla_put_failure; 1251 + 1252 + nla_nest_end(msg, nl_key_id); 1253 + 1254 + if (nla_put_u8(msg, NL802154_KEY_ATTR_USAGE_FRAMES, 1255 + key->key->frame_types)) 1256 + goto nla_put_failure; 1257 + 1258 + if (key->key->frame_types & BIT(NL802154_FRAME_CMD)) { 1259 + /* TODO for each nested */ 1260 + memset(commands, 0, sizeof(commands)); 1261 + commands[7] = key->key->cmd_frame_ids; 1262 + if (nla_put(msg, NL802154_KEY_ATTR_USAGE_CMDS, 1263 + sizeof(commands), commands)) 1264 + goto nla_put_failure; 1265 + } 1266 + 1267 + if (nla_put(msg, NL802154_KEY_ATTR_BYTES, NL802154_KEY_SIZE, 1268 + key->key->key)) 1269 + goto nla_put_failure; 1270 + 1271 + nla_nest_end(msg, nl_key); 1272 + genlmsg_end(msg, hdr); 1273 + 1274 + return 0; 1275 + 1276 + nla_put_failure: 1277 + genlmsg_cancel(msg, hdr); 1278 + return -EMSGSIZE; 1279 + } 1280 + 1281 + static int 1282 + nl802154_dump_llsec_key(struct sk_buff *skb, struct netlink_callback *cb) 1283 + { 1284 + struct cfg802154_registered_device *rdev = NULL; 1285 + struct ieee802154_llsec_key_entry *key; 1286 + struct ieee802154_llsec_table *table; 1287 + struct wpan_dev *wpan_dev; 1288 + int err; 1289 + 1290 + err = nl802154_prepare_wpan_dev_dump(skb, cb, &rdev, &wpan_dev); 1291 + if (err) 1292 + return err; 1293 + 1294 + if (!wpan_dev->netdev) { 1295 + err = -EINVAL; 1296 + goto out_err; 1297 + } 1298 + 1299 + rdev_lock_llsec_table(rdev, wpan_dev); 1300 + rdev_get_llsec_table(rdev, wpan_dev, &table); 1301 + 1302 + /* TODO make it like station dump */ 1303 + if (cb->args[2]) 1304 + goto out; 1305 + 1306 + list_for_each_entry(key, &table->keys, list) { 1307 + if (nl802154_send_key(skb, NL802154_CMD_NEW_SEC_KEY, 1308 + NETLINK_CB(cb->skb).portid, 1309 + cb->nlh->nlmsg_seq, NLM_F_MULTI, 1310 + rdev, wpan_dev->netdev, key) < 0) { 1311 + /* TODO */ 1312 + err = -EIO; 1313 + rdev_unlock_llsec_table(rdev, wpan_dev); 1314 + goto out_err; 1315 + } 1316 + } 1317 + 1318 + cb->args[2] = 1; 1319 + 1320 + out: 1321 + rdev_unlock_llsec_table(rdev, wpan_dev); 1322 + err = skb->len; 1323 + out_err: 1324 + nl802154_finish_wpan_dev_dump(rdev); 1325 + 1326 + return err; 1327 + } 1328 + 1329 + static const struct nla_policy nl802154_key_policy[NL802154_KEY_ATTR_MAX + 1] = { 1330 + [NL802154_KEY_ATTR_ID] = { NLA_NESTED }, 1331 + /* TODO handle it as for_each_nested and NLA_FLAG? */ 1332 + [NL802154_KEY_ATTR_USAGE_FRAMES] = { NLA_U8 }, 1333 + /* TODO handle it as for_each_nested, not static array? */ 1334 + [NL802154_KEY_ATTR_USAGE_CMDS] = { .len = NL802154_CMD_FRAME_NR_IDS / 8 }, 1335 + [NL802154_KEY_ATTR_BYTES] = { .len = NL802154_KEY_SIZE }, 1336 + }; 1337 + 1338 + static int nl802154_add_llsec_key(struct sk_buff *skb, struct genl_info *info) 1339 + { 1340 + struct cfg802154_registered_device *rdev = info->user_ptr[0]; 1341 + struct net_device *dev = info->user_ptr[1]; 1342 + struct wpan_dev *wpan_dev = dev->ieee802154_ptr; 1343 + struct nlattr *attrs[NL802154_KEY_ATTR_MAX + 1]; 1344 + struct ieee802154_llsec_key key = { }; 1345 + struct ieee802154_llsec_key_id id = { }; 1346 + u32 commands[NL802154_CMD_FRAME_NR_IDS / 32] = { }; 1347 + 1348 + if (nla_parse_nested(attrs, NL802154_KEY_ATTR_MAX, 1349 + info->attrs[NL802154_ATTR_SEC_KEY], 1350 + nl802154_key_policy)) 1351 + return -EINVAL; 1352 + 1353 + if (!attrs[NL802154_KEY_ATTR_USAGE_FRAMES] || 1354 + !attrs[NL802154_KEY_ATTR_BYTES]) 1355 + return -EINVAL; 1356 + 1357 + if (ieee802154_llsec_parse_key_id(attrs[NL802154_KEY_ATTR_ID], &id) < 0) 1358 + return -ENOBUFS; 1359 + 1360 + key.frame_types = nla_get_u8(attrs[NL802154_KEY_ATTR_USAGE_FRAMES]); 1361 + if (key.frame_types > BIT(NL802154_FRAME_MAX) || 1362 + ((key.frame_types & BIT(NL802154_FRAME_CMD)) && 1363 + !attrs[NL802154_KEY_ATTR_USAGE_CMDS])) 1364 + return -EINVAL; 1365 + 1366 + if (attrs[NL802154_KEY_ATTR_USAGE_CMDS]) { 1367 + /* TODO for each nested */ 1368 + nla_memcpy(commands, attrs[NL802154_KEY_ATTR_USAGE_CMDS], 1369 + NL802154_CMD_FRAME_NR_IDS / 8); 1370 + 1371 + /* TODO understand the -EINVAL logic here? last condition */ 1372 + if (commands[0] || commands[1] || commands[2] || commands[3] || 1373 + commands[4] || commands[5] || commands[6] || 1374 + commands[7] > BIT(NL802154_CMD_FRAME_MAX)) 1375 + return -EINVAL; 1376 + 1377 + key.cmd_frame_ids = commands[7]; 1378 + } else { 1379 + key.cmd_frame_ids = 0; 1380 + } 1381 + 1382 + nla_memcpy(key.key, attrs[NL802154_KEY_ATTR_BYTES], NL802154_KEY_SIZE); 1383 + 1384 + if (ieee802154_llsec_parse_key_id(attrs[NL802154_KEY_ATTR_ID], &id) < 0) 1385 + return -ENOBUFS; 1386 + 1387 + return rdev_add_llsec_key(rdev, wpan_dev, &id, &key); 1388 + } 1389 + 1390 + static int nl802154_del_llsec_key(struct sk_buff *skb, struct genl_info *info) 1391 + { 1392 + struct cfg802154_registered_device *rdev = info->user_ptr[0]; 1393 + struct net_device *dev = info->user_ptr[1]; 1394 + struct wpan_dev *wpan_dev = dev->ieee802154_ptr; 1395 + struct nlattr *attrs[NL802154_KEY_ATTR_MAX + 1]; 1396 + struct ieee802154_llsec_key_id id; 1397 + 1398 + if (nla_parse_nested(attrs, NL802154_KEY_ATTR_MAX, 1399 + info->attrs[NL802154_ATTR_SEC_KEY], 1400 + nl802154_key_policy)) 1401 + return -EINVAL; 1402 + 1403 + if (ieee802154_llsec_parse_key_id(attrs[NL802154_KEY_ATTR_ID], &id) < 0) 1404 + return -ENOBUFS; 1405 + 1406 + return rdev_del_llsec_key(rdev, wpan_dev, &id); 1407 + } 1408 + 1409 + static int nl802154_send_device(struct sk_buff *msg, u32 cmd, u32 portid, 1410 + u32 seq, int flags, 1411 + struct cfg802154_registered_device *rdev, 1412 + struct net_device *dev, 1413 + const struct ieee802154_llsec_device *dev_desc) 1414 + { 1415 + void *hdr; 1416 + struct nlattr *nl_device; 1417 + 1418 + hdr = nl802154hdr_put(msg, portid, seq, flags, cmd); 1419 + if (!hdr) 1420 + return -1; 1421 + 1422 + if (nla_put_u32(msg, NL802154_ATTR_IFINDEX, dev->ifindex)) 1423 + goto nla_put_failure; 1424 + 1425 + nl_device = nla_nest_start(msg, NL802154_ATTR_SEC_DEVICE); 1426 + if (!nl_device) 1427 + goto nla_put_failure; 1428 + 1429 + if (nla_put_u32(msg, NL802154_DEV_ATTR_FRAME_COUNTER, 1430 + dev_desc->frame_counter) || 1431 + nla_put_le16(msg, NL802154_DEV_ATTR_PAN_ID, dev_desc->pan_id) || 1432 + nla_put_le16(msg, NL802154_DEV_ATTR_SHORT_ADDR, 1433 + dev_desc->short_addr) || 1434 + nla_put_le64(msg, NL802154_DEV_ATTR_EXTENDED_ADDR, 1435 + dev_desc->hwaddr) || 1436 + nla_put_u8(msg, NL802154_DEV_ATTR_SECLEVEL_EXEMPT, 1437 + dev_desc->seclevel_exempt) || 1438 + nla_put_u32(msg, NL802154_DEV_ATTR_KEY_MODE, dev_desc->key_mode)) 1439 + goto nla_put_failure; 1440 + 1441 + nla_nest_end(msg, nl_device); 1442 + genlmsg_end(msg, hdr); 1443 + 1444 + return 0; 1445 + 1446 + nla_put_failure: 1447 + genlmsg_cancel(msg, hdr); 1448 + return -EMSGSIZE; 1449 + } 1450 + 1451 + static int 1452 + nl802154_dump_llsec_dev(struct sk_buff *skb, struct netlink_callback *cb) 1453 + { 1454 + struct cfg802154_registered_device *rdev = NULL; 1455 + struct ieee802154_llsec_device *dev; 1456 + struct ieee802154_llsec_table *table; 1457 + struct wpan_dev *wpan_dev; 1458 + int err; 1459 + 1460 + err = nl802154_prepare_wpan_dev_dump(skb, cb, &rdev, &wpan_dev); 1461 + if (err) 1462 + return err; 1463 + 1464 + if (!wpan_dev->netdev) { 1465 + err = -EINVAL; 1466 + goto out_err; 1467 + } 1468 + 1469 + rdev_lock_llsec_table(rdev, wpan_dev); 1470 + rdev_get_llsec_table(rdev, wpan_dev, &table); 1471 + 1472 + /* TODO make it like station dump */ 1473 + if (cb->args[2]) 1474 + goto out; 1475 + 1476 + list_for_each_entry(dev, &table->devices, list) { 1477 + if (nl802154_send_device(skb, NL802154_CMD_NEW_SEC_LEVEL, 1478 + NETLINK_CB(cb->skb).portid, 1479 + cb->nlh->nlmsg_seq, NLM_F_MULTI, 1480 + rdev, wpan_dev->netdev, dev) < 0) { 1481 + /* TODO */ 1482 + err = -EIO; 1483 + rdev_unlock_llsec_table(rdev, wpan_dev); 1484 + goto out_err; 1485 + } 1486 + } 1487 + 1488 + cb->args[2] = 1; 1489 + 1490 + out: 1491 + rdev_unlock_llsec_table(rdev, wpan_dev); 1492 + err = skb->len; 1493 + out_err: 1494 + nl802154_finish_wpan_dev_dump(rdev); 1495 + 1496 + return err; 1497 + } 1498 + 1499 + static const struct nla_policy nl802154_dev_policy[NL802154_DEV_ATTR_MAX + 1] = { 1500 + [NL802154_DEV_ATTR_FRAME_COUNTER] = { NLA_U32 }, 1501 + [NL802154_DEV_ATTR_PAN_ID] = { .type = NLA_U16 }, 1502 + [NL802154_DEV_ATTR_SHORT_ADDR] = { .type = NLA_U16 }, 1503 + [NL802154_DEV_ATTR_EXTENDED_ADDR] = { .type = NLA_U64 }, 1504 + [NL802154_DEV_ATTR_SECLEVEL_EXEMPT] = { NLA_U8 }, 1505 + [NL802154_DEV_ATTR_KEY_MODE] = { NLA_U32 }, 1506 + }; 1507 + 1508 + static int 1509 + ieee802154_llsec_parse_device(struct nlattr *nla, 1510 + struct ieee802154_llsec_device *dev) 1511 + { 1512 + struct nlattr *attrs[NL802154_DEV_ATTR_MAX + 1]; 1513 + 1514 + if (!nla || nla_parse_nested(attrs, NL802154_DEV_ATTR_MAX, nla, 1515 + nl802154_dev_policy)) 1516 + return -EINVAL; 1517 + 1518 + memset(dev, 0, sizeof(*dev)); 1519 + 1520 + if (!attrs[NL802154_DEV_ATTR_FRAME_COUNTER] || 1521 + !attrs[NL802154_DEV_ATTR_PAN_ID] || 1522 + !attrs[NL802154_DEV_ATTR_SHORT_ADDR] || 1523 + !attrs[NL802154_DEV_ATTR_EXTENDED_ADDR] || 1524 + !attrs[NL802154_DEV_ATTR_SECLEVEL_EXEMPT] || 1525 + !attrs[NL802154_DEV_ATTR_KEY_MODE]) 1526 + return -EINVAL; 1527 + 1528 + /* TODO be32 */ 1529 + dev->frame_counter = nla_get_u32(attrs[NL802154_DEV_ATTR_FRAME_COUNTER]); 1530 + dev->pan_id = nla_get_le16(attrs[NL802154_DEV_ATTR_PAN_ID]); 1531 + dev->short_addr = nla_get_le16(attrs[NL802154_DEV_ATTR_SHORT_ADDR]); 1532 + /* TODO rename hwaddr to extended_addr */ 1533 + dev->hwaddr = nla_get_le64(attrs[NL802154_DEV_ATTR_EXTENDED_ADDR]); 1534 + dev->seclevel_exempt = nla_get_u8(attrs[NL802154_DEV_ATTR_SECLEVEL_EXEMPT]); 1535 + dev->key_mode = nla_get_u32(attrs[NL802154_DEV_ATTR_KEY_MODE]); 1536 + 1537 + if (dev->key_mode > NL802154_DEVKEY_MAX || 1538 + (dev->seclevel_exempt != 0 && dev->seclevel_exempt != 1)) 1539 + return -EINVAL; 1540 + 1541 + return 0; 1542 + } 1543 + 1544 + static int nl802154_add_llsec_dev(struct sk_buff *skb, struct genl_info *info) 1545 + { 1546 + struct cfg802154_registered_device *rdev = info->user_ptr[0]; 1547 + struct net_device *dev = info->user_ptr[1]; 1548 + struct wpan_dev *wpan_dev = dev->ieee802154_ptr; 1549 + struct ieee802154_llsec_device dev_desc; 1550 + 1551 + if (ieee802154_llsec_parse_device(info->attrs[NL802154_ATTR_SEC_DEVICE], 1552 + &dev_desc) < 0) 1553 + return -EINVAL; 1554 + 1555 + return rdev_add_device(rdev, wpan_dev, &dev_desc); 1556 + } 1557 + 1558 + static int nl802154_del_llsec_dev(struct sk_buff *skb, struct genl_info *info) 1559 + { 1560 + struct cfg802154_registered_device *rdev = info->user_ptr[0]; 1561 + struct net_device *dev = info->user_ptr[1]; 1562 + struct wpan_dev *wpan_dev = dev->ieee802154_ptr; 1563 + struct nlattr *attrs[NL802154_DEV_ATTR_MAX + 1]; 1564 + __le64 extended_addr; 1565 + 1566 + if (nla_parse_nested(attrs, NL802154_DEV_ATTR_MAX, 1567 + info->attrs[NL802154_ATTR_SEC_DEVICE], 1568 + nl802154_dev_policy)) 1569 + return -EINVAL; 1570 + 1571 + if (!attrs[NL802154_DEV_ATTR_EXTENDED_ADDR]) 1572 + return -EINVAL; 1573 + 1574 + extended_addr = nla_get_le64(attrs[NL802154_DEV_ATTR_EXTENDED_ADDR]); 1575 + return rdev_del_device(rdev, wpan_dev, extended_addr); 1576 + } 1577 + 1578 + static int nl802154_send_devkey(struct sk_buff *msg, u32 cmd, u32 portid, 1579 + u32 seq, int flags, 1580 + struct cfg802154_registered_device *rdev, 1581 + struct net_device *dev, __le64 extended_addr, 1582 + const struct ieee802154_llsec_device_key *devkey) 1583 + { 1584 + void *hdr; 1585 + struct nlattr *nl_devkey, *nl_key_id; 1586 + 1587 + hdr = nl802154hdr_put(msg, portid, seq, flags, cmd); 1588 + if (!hdr) 1589 + return -1; 1590 + 1591 + if (nla_put_u32(msg, NL802154_ATTR_IFINDEX, dev->ifindex)) 1592 + goto nla_put_failure; 1593 + 1594 + nl_devkey = nla_nest_start(msg, NL802154_ATTR_SEC_DEVKEY); 1595 + if (!nl_devkey) 1596 + goto nla_put_failure; 1597 + 1598 + if (nla_put_le64(msg, NL802154_DEVKEY_ATTR_EXTENDED_ADDR, 1599 + extended_addr) || 1600 + nla_put_u32(msg, NL802154_DEVKEY_ATTR_FRAME_COUNTER, 1601 + devkey->frame_counter)) 1602 + goto nla_put_failure; 1603 + 1604 + nl_key_id = nla_nest_start(msg, NL802154_DEVKEY_ATTR_ID); 1605 + if (!nl_key_id) 1606 + goto nla_put_failure; 1607 + 1608 + if (ieee802154_llsec_send_key_id(msg, &devkey->key_id) < 0) 1609 + goto nla_put_failure; 1610 + 1611 + nla_nest_end(msg, nl_key_id); 1612 + nla_nest_end(msg, nl_devkey); 1613 + genlmsg_end(msg, hdr); 1614 + 1615 + return 0; 1616 + 1617 + nla_put_failure: 1618 + genlmsg_cancel(msg, hdr); 1619 + return -EMSGSIZE; 1620 + } 1621 + 1622 + static int 1623 + nl802154_dump_llsec_devkey(struct sk_buff *skb, struct netlink_callback *cb) 1624 + { 1625 + struct cfg802154_registered_device *rdev = NULL; 1626 + struct ieee802154_llsec_device_key *kpos; 1627 + struct ieee802154_llsec_device *dpos; 1628 + struct ieee802154_llsec_table *table; 1629 + struct wpan_dev *wpan_dev; 1630 + int err; 1631 + 1632 + err = nl802154_prepare_wpan_dev_dump(skb, cb, &rdev, &wpan_dev); 1633 + if (err) 1634 + return err; 1635 + 1636 + if (!wpan_dev->netdev) { 1637 + err = -EINVAL; 1638 + goto out_err; 1639 + } 1640 + 1641 + rdev_lock_llsec_table(rdev, wpan_dev); 1642 + rdev_get_llsec_table(rdev, wpan_dev, &table); 1643 + 1644 + /* TODO make it like station dump */ 1645 + if (cb->args[2]) 1646 + goto out; 1647 + 1648 + /* TODO look if remove devkey and do some nested attribute */ 1649 + list_for_each_entry(dpos, &table->devices, list) { 1650 + list_for_each_entry(kpos, &dpos->keys, list) { 1651 + if (nl802154_send_devkey(skb, 1652 + NL802154_CMD_NEW_SEC_LEVEL, 1653 + NETLINK_CB(cb->skb).portid, 1654 + cb->nlh->nlmsg_seq, 1655 + NLM_F_MULTI, rdev, 1656 + wpan_dev->netdev, 1657 + dpos->hwaddr, 1658 + kpos) < 0) { 1659 + /* TODO */ 1660 + err = -EIO; 1661 + rdev_unlock_llsec_table(rdev, wpan_dev); 1662 + goto out_err; 1663 + } 1664 + } 1665 + } 1666 + 1667 + cb->args[2] = 1; 1668 + 1669 + out: 1670 + rdev_unlock_llsec_table(rdev, wpan_dev); 1671 + err = skb->len; 1672 + out_err: 1673 + nl802154_finish_wpan_dev_dump(rdev); 1674 + 1675 + return err; 1676 + } 1677 + 1678 + static const struct nla_policy nl802154_devkey_policy[NL802154_DEVKEY_ATTR_MAX + 1] = { 1679 + [NL802154_DEVKEY_ATTR_FRAME_COUNTER] = { NLA_U32 }, 1680 + [NL802154_DEVKEY_ATTR_EXTENDED_ADDR] = { NLA_U64 }, 1681 + [NL802154_DEVKEY_ATTR_ID] = { NLA_NESTED }, 1682 + }; 1683 + 1684 + static int nl802154_add_llsec_devkey(struct sk_buff *skb, struct genl_info *info) 1685 + { 1686 + struct cfg802154_registered_device *rdev = info->user_ptr[0]; 1687 + struct net_device *dev = info->user_ptr[1]; 1688 + struct wpan_dev *wpan_dev = dev->ieee802154_ptr; 1689 + struct nlattr *attrs[NL802154_DEVKEY_ATTR_MAX + 1]; 1690 + struct ieee802154_llsec_device_key key; 1691 + __le64 extended_addr; 1692 + 1693 + if (!info->attrs[NL802154_ATTR_SEC_DEVKEY] || 1694 + nla_parse_nested(attrs, NL802154_DEVKEY_ATTR_MAX, 1695 + info->attrs[NL802154_ATTR_SEC_DEVKEY], 1696 + nl802154_devkey_policy) < 0) 1697 + return -EINVAL; 1698 + 1699 + if (!attrs[NL802154_DEVKEY_ATTR_FRAME_COUNTER] || 1700 + !attrs[NL802154_DEVKEY_ATTR_EXTENDED_ADDR]) 1701 + return -EINVAL; 1702 + 1703 + /* TODO change key.id ? */ 1704 + if (ieee802154_llsec_parse_key_id(attrs[NL802154_DEVKEY_ATTR_ID], 1705 + &key.key_id) < 0) 1706 + return -ENOBUFS; 1707 + 1708 + /* TODO be32 */ 1709 + key.frame_counter = nla_get_u32(attrs[NL802154_DEVKEY_ATTR_FRAME_COUNTER]); 1710 + /* TODO change naming hwaddr -> extended_addr 1711 + * check unique identifier short+pan OR extended_addr 1712 + */ 1713 + extended_addr = nla_get_le64(attrs[NL802154_DEVKEY_ATTR_EXTENDED_ADDR]); 1714 + return rdev_add_devkey(rdev, wpan_dev, extended_addr, &key); 1715 + } 1716 + 1717 + static int nl802154_del_llsec_devkey(struct sk_buff *skb, struct genl_info *info) 1718 + { 1719 + struct cfg802154_registered_device *rdev = info->user_ptr[0]; 1720 + struct net_device *dev = info->user_ptr[1]; 1721 + struct wpan_dev *wpan_dev = dev->ieee802154_ptr; 1722 + struct nlattr *attrs[NL802154_DEVKEY_ATTR_MAX + 1]; 1723 + struct ieee802154_llsec_device_key key; 1724 + __le64 extended_addr; 1725 + 1726 + if (nla_parse_nested(attrs, NL802154_DEVKEY_ATTR_MAX, 1727 + info->attrs[NL802154_ATTR_SEC_DEVKEY], 1728 + nl802154_devkey_policy)) 1729 + return -EINVAL; 1730 + 1731 + if (!attrs[NL802154_DEVKEY_ATTR_EXTENDED_ADDR]) 1732 + return -EINVAL; 1733 + 1734 + /* TODO change key.id ? */ 1735 + if (ieee802154_llsec_parse_key_id(attrs[NL802154_DEVKEY_ATTR_ID], 1736 + &key.key_id) < 0) 1737 + return -ENOBUFS; 1738 + 1739 + /* TODO change naming hwaddr -> extended_addr 1740 + * check unique identifier short+pan OR extended_addr 1741 + */ 1742 + extended_addr = nla_get_le64(attrs[NL802154_DEVKEY_ATTR_EXTENDED_ADDR]); 1743 + return rdev_del_devkey(rdev, wpan_dev, extended_addr, &key); 1744 + } 1745 + 1746 + static int nl802154_send_seclevel(struct sk_buff *msg, u32 cmd, u32 portid, 1747 + u32 seq, int flags, 1748 + struct cfg802154_registered_device *rdev, 1749 + struct net_device *dev, 1750 + const struct ieee802154_llsec_seclevel *sl) 1751 + { 1752 + void *hdr; 1753 + struct nlattr *nl_seclevel; 1754 + 1755 + hdr = nl802154hdr_put(msg, portid, seq, flags, cmd); 1756 + if (!hdr) 1757 + return -1; 1758 + 1759 + if (nla_put_u32(msg, NL802154_ATTR_IFINDEX, dev->ifindex)) 1760 + goto nla_put_failure; 1761 + 1762 + nl_seclevel = nla_nest_start(msg, NL802154_ATTR_SEC_LEVEL); 1763 + if (!nl_seclevel) 1764 + goto nla_put_failure; 1765 + 1766 + if (nla_put_u32(msg, NL802154_SECLEVEL_ATTR_FRAME, sl->frame_type) || 1767 + nla_put_u32(msg, NL802154_SECLEVEL_ATTR_LEVELS, sl->sec_levels) || 1768 + nla_put_u8(msg, NL802154_SECLEVEL_ATTR_DEV_OVERRIDE, 1769 + sl->device_override)) 1770 + goto nla_put_failure; 1771 + 1772 + if (sl->frame_type == NL802154_FRAME_CMD) { 1773 + if (nla_put_u32(msg, NL802154_SECLEVEL_ATTR_CMD_FRAME, 1774 + sl->cmd_frame_id)) 1775 + goto nla_put_failure; 1776 + } 1777 + 1778 + nla_nest_end(msg, nl_seclevel); 1779 + genlmsg_end(msg, hdr); 1780 + 1781 + return 0; 1782 + 1783 + nla_put_failure: 1784 + genlmsg_cancel(msg, hdr); 1785 + return -EMSGSIZE; 1786 + } 1787 + 1788 + static int 1789 + nl802154_dump_llsec_seclevel(struct sk_buff *skb, struct netlink_callback *cb) 1790 + { 1791 + struct cfg802154_registered_device *rdev = NULL; 1792 + struct ieee802154_llsec_seclevel *sl; 1793 + struct ieee802154_llsec_table *table; 1794 + struct wpan_dev *wpan_dev; 1795 + int err; 1796 + 1797 + err = nl802154_prepare_wpan_dev_dump(skb, cb, &rdev, &wpan_dev); 1798 + if (err) 1799 + return err; 1800 + 1801 + if (!wpan_dev->netdev) { 1802 + err = -EINVAL; 1803 + goto out_err; 1804 + } 1805 + 1806 + rdev_lock_llsec_table(rdev, wpan_dev); 1807 + rdev_get_llsec_table(rdev, wpan_dev, &table); 1808 + 1809 + /* TODO make it like station dump */ 1810 + if (cb->args[2]) 1811 + goto out; 1812 + 1813 + list_for_each_entry(sl, &table->security_levels, list) { 1814 + if (nl802154_send_seclevel(skb, NL802154_CMD_NEW_SEC_LEVEL, 1815 + NETLINK_CB(cb->skb).portid, 1816 + cb->nlh->nlmsg_seq, NLM_F_MULTI, 1817 + rdev, wpan_dev->netdev, sl) < 0) { 1818 + /* TODO */ 1819 + err = -EIO; 1820 + rdev_unlock_llsec_table(rdev, wpan_dev); 1821 + goto out_err; 1822 + } 1823 + } 1824 + 1825 + cb->args[2] = 1; 1826 + 1827 + out: 1828 + rdev_unlock_llsec_table(rdev, wpan_dev); 1829 + err = skb->len; 1830 + out_err: 1831 + nl802154_finish_wpan_dev_dump(rdev); 1832 + 1833 + return err; 1834 + } 1835 + 1836 + static const struct nla_policy nl802154_seclevel_policy[NL802154_SECLEVEL_ATTR_MAX + 1] = { 1837 + [NL802154_SECLEVEL_ATTR_LEVELS] = { .type = NLA_U8 }, 1838 + [NL802154_SECLEVEL_ATTR_FRAME] = { .type = NLA_U32 }, 1839 + [NL802154_SECLEVEL_ATTR_CMD_FRAME] = { .type = NLA_U32 }, 1840 + [NL802154_SECLEVEL_ATTR_DEV_OVERRIDE] = { .type = NLA_U8 }, 1841 + }; 1842 + 1843 + static int 1844 + llsec_parse_seclevel(struct nlattr *nla, struct ieee802154_llsec_seclevel *sl) 1845 + { 1846 + struct nlattr *attrs[NL802154_SECLEVEL_ATTR_MAX + 1]; 1847 + 1848 + if (!nla || nla_parse_nested(attrs, NL802154_SECLEVEL_ATTR_MAX, nla, 1849 + nl802154_seclevel_policy)) 1850 + return -EINVAL; 1851 + 1852 + memset(sl, 0, sizeof(*sl)); 1853 + 1854 + if (!attrs[NL802154_SECLEVEL_ATTR_LEVELS] || 1855 + !attrs[NL802154_SECLEVEL_ATTR_FRAME] || 1856 + !attrs[NL802154_SECLEVEL_ATTR_DEV_OVERRIDE]) 1857 + return -EINVAL; 1858 + 1859 + sl->sec_levels = nla_get_u8(attrs[NL802154_SECLEVEL_ATTR_LEVELS]); 1860 + sl->frame_type = nla_get_u32(attrs[NL802154_SECLEVEL_ATTR_FRAME]); 1861 + sl->device_override = nla_get_u8(attrs[NL802154_SECLEVEL_ATTR_DEV_OVERRIDE]); 1862 + if (sl->frame_type > NL802154_FRAME_MAX || 1863 + (sl->device_override != 0 && sl->device_override != 1)) 1864 + return -EINVAL; 1865 + 1866 + if (sl->frame_type == NL802154_FRAME_CMD) { 1867 + if (!attrs[NL802154_SECLEVEL_ATTR_CMD_FRAME]) 1868 + return -EINVAL; 1869 + 1870 + sl->cmd_frame_id = nla_get_u32(attrs[NL802154_SECLEVEL_ATTR_CMD_FRAME]); 1871 + if (sl->cmd_frame_id > NL802154_CMD_FRAME_MAX) 1872 + return -EINVAL; 1873 + } 1874 + 1875 + return 0; 1876 + } 1877 + 1878 + static int nl802154_add_llsec_seclevel(struct sk_buff *skb, 1879 + struct genl_info *info) 1880 + { 1881 + struct cfg802154_registered_device *rdev = info->user_ptr[0]; 1882 + struct net_device *dev = info->user_ptr[1]; 1883 + struct wpan_dev *wpan_dev = dev->ieee802154_ptr; 1884 + struct ieee802154_llsec_seclevel sl; 1885 + 1886 + if (llsec_parse_seclevel(info->attrs[NL802154_ATTR_SEC_LEVEL], 1887 + &sl) < 0) 1888 + return -EINVAL; 1889 + 1890 + return rdev_add_seclevel(rdev, wpan_dev, &sl); 1891 + } 1892 + 1893 + static int nl802154_del_llsec_seclevel(struct sk_buff *skb, 1894 + struct genl_info *info) 1895 + { 1896 + struct cfg802154_registered_device *rdev = info->user_ptr[0]; 1897 + struct net_device *dev = info->user_ptr[1]; 1898 + struct wpan_dev *wpan_dev = dev->ieee802154_ptr; 1899 + struct ieee802154_llsec_seclevel sl; 1900 + 1901 + if (!info->attrs[NL802154_ATTR_SEC_LEVEL] || 1902 + llsec_parse_seclevel(info->attrs[NL802154_ATTR_SEC_LEVEL], 1903 + &sl) < 0) 1904 + return -EINVAL; 1905 + 1906 + return rdev_del_seclevel(rdev, wpan_dev, &sl); 1907 + } 1908 + #endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */ 1909 + 1260 1910 #define NL802154_FLAG_NEED_WPAN_PHY 0x01 1261 1911 #define NL802154_FLAG_NEED_NETDEV 0x02 1262 1912 #define NL802154_FLAG_NEED_RTNL 0x04 ··· 2303 1289 .internal_flags = NL802154_FLAG_NEED_NETDEV | 2304 1290 NL802154_FLAG_NEED_RTNL, 2305 1291 }, 1292 + #ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL 1293 + { 1294 + .cmd = NL802154_CMD_SET_SEC_PARAMS, 1295 + .doit = nl802154_set_llsec_params, 1296 + .policy = nl802154_policy, 1297 + .flags = GENL_ADMIN_PERM, 1298 + .internal_flags = NL802154_FLAG_NEED_NETDEV | 1299 + NL802154_FLAG_NEED_RTNL, 1300 + }, 1301 + { 1302 + .cmd = NL802154_CMD_GET_SEC_KEY, 1303 + /* TODO .doit by matching key id? */ 1304 + .dumpit = nl802154_dump_llsec_key, 1305 + .policy = nl802154_policy, 1306 + .flags = GENL_ADMIN_PERM, 1307 + .internal_flags = NL802154_FLAG_NEED_NETDEV | 1308 + NL802154_FLAG_NEED_RTNL, 1309 + }, 1310 + { 1311 + .cmd = NL802154_CMD_NEW_SEC_KEY, 1312 + .doit = nl802154_add_llsec_key, 1313 + .policy = nl802154_policy, 1314 + .flags = GENL_ADMIN_PERM, 1315 + .internal_flags = NL802154_FLAG_NEED_NETDEV | 1316 + NL802154_FLAG_NEED_RTNL, 1317 + }, 1318 + { 1319 + .cmd = NL802154_CMD_DEL_SEC_KEY, 1320 + .doit = nl802154_del_llsec_key, 1321 + .policy = nl802154_policy, 1322 + .flags = GENL_ADMIN_PERM, 1323 + .internal_flags = NL802154_FLAG_NEED_NETDEV | 1324 + NL802154_FLAG_NEED_RTNL, 1325 + }, 1326 + /* TODO unique identifier must short+pan OR extended_addr */ 1327 + { 1328 + .cmd = NL802154_CMD_GET_SEC_DEV, 1329 + /* TODO .doit by matching extended_addr? */ 1330 + .dumpit = nl802154_dump_llsec_dev, 1331 + .policy = nl802154_policy, 1332 + .flags = GENL_ADMIN_PERM, 1333 + .internal_flags = NL802154_FLAG_NEED_NETDEV | 1334 + NL802154_FLAG_NEED_RTNL, 1335 + }, 1336 + { 1337 + .cmd = NL802154_CMD_NEW_SEC_DEV, 1338 + .doit = nl802154_add_llsec_dev, 1339 + .policy = nl802154_policy, 1340 + .flags = GENL_ADMIN_PERM, 1341 + .internal_flags = NL802154_FLAG_NEED_NETDEV | 1342 + NL802154_FLAG_NEED_RTNL, 1343 + }, 1344 + { 1345 + .cmd = NL802154_CMD_DEL_SEC_DEV, 1346 + .doit = nl802154_del_llsec_dev, 1347 + .policy = nl802154_policy, 1348 + .flags = GENL_ADMIN_PERM, 1349 + .internal_flags = NL802154_FLAG_NEED_NETDEV | 1350 + NL802154_FLAG_NEED_RTNL, 1351 + }, 1352 + /* TODO remove complete devkey, put it as nested? */ 1353 + { 1354 + .cmd = NL802154_CMD_GET_SEC_DEVKEY, 1355 + /* TODO doit by matching ??? */ 1356 + .dumpit = nl802154_dump_llsec_devkey, 1357 + .policy = nl802154_policy, 1358 + .flags = GENL_ADMIN_PERM, 1359 + .internal_flags = NL802154_FLAG_NEED_NETDEV | 1360 + NL802154_FLAG_NEED_RTNL, 1361 + }, 1362 + { 1363 + .cmd = NL802154_CMD_NEW_SEC_DEVKEY, 1364 + .doit = nl802154_add_llsec_devkey, 1365 + .policy = nl802154_policy, 1366 + .flags = GENL_ADMIN_PERM, 1367 + .internal_flags = NL802154_FLAG_NEED_NETDEV | 1368 + NL802154_FLAG_NEED_RTNL, 1369 + }, 1370 + { 1371 + .cmd = NL802154_CMD_DEL_SEC_DEVKEY, 1372 + .doit = nl802154_del_llsec_devkey, 1373 + .policy = nl802154_policy, 1374 + .flags = GENL_ADMIN_PERM, 1375 + .internal_flags = NL802154_FLAG_NEED_NETDEV | 1376 + NL802154_FLAG_NEED_RTNL, 1377 + }, 1378 + { 1379 + .cmd = NL802154_CMD_GET_SEC_LEVEL, 1380 + /* TODO .doit by matching frame_type? */ 1381 + .dumpit = nl802154_dump_llsec_seclevel, 1382 + .policy = nl802154_policy, 1383 + .flags = GENL_ADMIN_PERM, 1384 + .internal_flags = NL802154_FLAG_NEED_NETDEV | 1385 + NL802154_FLAG_NEED_RTNL, 1386 + }, 1387 + { 1388 + .cmd = NL802154_CMD_NEW_SEC_LEVEL, 1389 + .doit = nl802154_add_llsec_seclevel, 1390 + .policy = nl802154_policy, 1391 + .flags = GENL_ADMIN_PERM, 1392 + .internal_flags = NL802154_FLAG_NEED_NETDEV | 1393 + NL802154_FLAG_NEED_RTNL, 1394 + }, 1395 + { 1396 + .cmd = NL802154_CMD_DEL_SEC_LEVEL, 1397 + /* TODO match frame_type only? */ 1398 + .doit = nl802154_del_llsec_seclevel, 1399 + .policy = nl802154_policy, 1400 + .flags = GENL_ADMIN_PERM, 1401 + .internal_flags = NL802154_FLAG_NEED_NETDEV | 1402 + NL802154_FLAG_NEED_RTNL, 1403 + }, 1404 + #endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */ 2306 1405 }; 2307 1406 2308 1407 /* initialisation/exit functions */
+109
net/ieee802154/rdev-ops.h
··· 208 208 return ret; 209 209 } 210 210 211 + #ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL 212 + /* TODO this is already a nl802154, so move into ieee802154 */ 213 + static inline void 214 + rdev_get_llsec_table(struct cfg802154_registered_device *rdev, 215 + struct wpan_dev *wpan_dev, 216 + struct ieee802154_llsec_table **table) 217 + { 218 + rdev->ops->get_llsec_table(&rdev->wpan_phy, wpan_dev, table); 219 + } 220 + 221 + static inline void 222 + rdev_lock_llsec_table(struct cfg802154_registered_device *rdev, 223 + struct wpan_dev *wpan_dev) 224 + { 225 + rdev->ops->lock_llsec_table(&rdev->wpan_phy, wpan_dev); 226 + } 227 + 228 + static inline void 229 + rdev_unlock_llsec_table(struct cfg802154_registered_device *rdev, 230 + struct wpan_dev *wpan_dev) 231 + { 232 + rdev->ops->unlock_llsec_table(&rdev->wpan_phy, wpan_dev); 233 + } 234 + 235 + static inline int 236 + rdev_get_llsec_params(struct cfg802154_registered_device *rdev, 237 + struct wpan_dev *wpan_dev, 238 + struct ieee802154_llsec_params *params) 239 + { 240 + return rdev->ops->get_llsec_params(&rdev->wpan_phy, wpan_dev, params); 241 + } 242 + 243 + static inline int 244 + rdev_set_llsec_params(struct cfg802154_registered_device *rdev, 245 + struct wpan_dev *wpan_dev, 246 + const struct ieee802154_llsec_params *params, 247 + u32 changed) 248 + { 249 + return rdev->ops->set_llsec_params(&rdev->wpan_phy, wpan_dev, params, 250 + changed); 251 + } 252 + 253 + static inline int 254 + rdev_add_llsec_key(struct cfg802154_registered_device *rdev, 255 + struct wpan_dev *wpan_dev, 256 + const struct ieee802154_llsec_key_id *id, 257 + const struct ieee802154_llsec_key *key) 258 + { 259 + return rdev->ops->add_llsec_key(&rdev->wpan_phy, wpan_dev, id, key); 260 + } 261 + 262 + static inline int 263 + rdev_del_llsec_key(struct cfg802154_registered_device *rdev, 264 + struct wpan_dev *wpan_dev, 265 + const struct ieee802154_llsec_key_id *id) 266 + { 267 + return rdev->ops->del_llsec_key(&rdev->wpan_phy, wpan_dev, id); 268 + } 269 + 270 + static inline int 271 + rdev_add_seclevel(struct cfg802154_registered_device *rdev, 272 + struct wpan_dev *wpan_dev, 273 + const struct ieee802154_llsec_seclevel *sl) 274 + { 275 + return rdev->ops->add_seclevel(&rdev->wpan_phy, wpan_dev, sl); 276 + } 277 + 278 + static inline int 279 + rdev_del_seclevel(struct cfg802154_registered_device *rdev, 280 + struct wpan_dev *wpan_dev, 281 + const struct ieee802154_llsec_seclevel *sl) 282 + { 283 + return rdev->ops->del_seclevel(&rdev->wpan_phy, wpan_dev, sl); 284 + } 285 + 286 + static inline int 287 + rdev_add_device(struct cfg802154_registered_device *rdev, 288 + struct wpan_dev *wpan_dev, 289 + const struct ieee802154_llsec_device *dev_desc) 290 + { 291 + return rdev->ops->add_device(&rdev->wpan_phy, wpan_dev, dev_desc); 292 + } 293 + 294 + static inline int 295 + rdev_del_device(struct cfg802154_registered_device *rdev, 296 + struct wpan_dev *wpan_dev, __le64 extended_addr) 297 + { 298 + return rdev->ops->del_device(&rdev->wpan_phy, wpan_dev, extended_addr); 299 + } 300 + 301 + static inline int 302 + rdev_add_devkey(struct cfg802154_registered_device *rdev, 303 + struct wpan_dev *wpan_dev, __le64 extended_addr, 304 + const struct ieee802154_llsec_device_key *devkey) 305 + { 306 + return rdev->ops->add_devkey(&rdev->wpan_phy, wpan_dev, extended_addr, 307 + devkey); 308 + } 309 + 310 + static inline int 311 + rdev_del_devkey(struct cfg802154_registered_device *rdev, 312 + struct wpan_dev *wpan_dev, __le64 extended_addr, 313 + const struct ieee802154_llsec_device_key *devkey) 314 + { 315 + return rdev->ops->del_devkey(&rdev->wpan_phy, wpan_dev, extended_addr, 316 + devkey); 317 + } 318 + #endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */ 319 + 211 320 #endif /* __CFG802154_RDEV_OPS */
+4 -4
net/ieee802154/socket.c
··· 273 273 goto out; 274 274 } 275 275 276 - mtu = dev->mtu; 276 + mtu = IEEE802154_MTU; 277 277 pr_debug("name = %s, mtu = %u\n", dev->name, mtu); 278 278 279 279 if (size > mtu) { ··· 637 637 err = -ENXIO; 638 638 goto out; 639 639 } 640 - mtu = dev->mtu; 640 + mtu = IEEE802154_MTU; 641 641 pr_debug("name = %s, mtu = %u\n", dev->name, mtu); 642 642 643 643 if (size > mtu) { ··· 676 676 cb->seclevel = ro->seclevel; 677 677 cb->seclevel_override = ro->seclevel_override; 678 678 679 - err = dev_hard_header(skb, dev, ETH_P_IEEE802154, &dst_addr, 680 - ro->bound ? &ro->src_addr : NULL, size); 679 + err = wpan_dev_hard_header(skb, dev, &dst_addr, 680 + ro->bound ? &ro->src_addr : NULL, size); 681 681 if (err < 0) 682 682 goto out_skb; 683 683
+205
net/mac802154/cfg.c
··· 266 266 return 0; 267 267 } 268 268 269 + #ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL 270 + static void 271 + ieee802154_get_llsec_table(struct wpan_phy *wpan_phy, 272 + struct wpan_dev *wpan_dev, 273 + struct ieee802154_llsec_table **table) 274 + { 275 + struct net_device *dev = wpan_dev->netdev; 276 + struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); 277 + 278 + *table = &sdata->sec.table; 279 + } 280 + 281 + static void 282 + ieee802154_lock_llsec_table(struct wpan_phy *wpan_phy, 283 + struct wpan_dev *wpan_dev) 284 + { 285 + struct net_device *dev = wpan_dev->netdev; 286 + struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); 287 + 288 + mutex_lock(&sdata->sec_mtx); 289 + } 290 + 291 + static void 292 + ieee802154_unlock_llsec_table(struct wpan_phy *wpan_phy, 293 + struct wpan_dev *wpan_dev) 294 + { 295 + struct net_device *dev = wpan_dev->netdev; 296 + struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); 297 + 298 + mutex_unlock(&sdata->sec_mtx); 299 + } 300 + 301 + static int 302 + ieee802154_set_llsec_params(struct wpan_phy *wpan_phy, 303 + struct wpan_dev *wpan_dev, 304 + const struct ieee802154_llsec_params *params, 305 + int changed) 306 + { 307 + struct net_device *dev = wpan_dev->netdev; 308 + struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); 309 + int res; 310 + 311 + mutex_lock(&sdata->sec_mtx); 312 + res = mac802154_llsec_set_params(&sdata->sec, params, changed); 313 + mutex_unlock(&sdata->sec_mtx); 314 + 315 + return res; 316 + } 317 + 318 + static int 319 + ieee802154_get_llsec_params(struct wpan_phy *wpan_phy, 320 + struct wpan_dev *wpan_dev, 321 + struct ieee802154_llsec_params *params) 322 + { 323 + struct net_device *dev = wpan_dev->netdev; 324 + struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); 325 + int res; 326 + 327 + mutex_lock(&sdata->sec_mtx); 328 + res = mac802154_llsec_get_params(&sdata->sec, params); 329 + mutex_unlock(&sdata->sec_mtx); 330 + 331 + return res; 332 + } 333 + 334 + static int 335 + ieee802154_add_llsec_key(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, 336 + const struct ieee802154_llsec_key_id *id, 337 + const struct ieee802154_llsec_key *key) 338 + { 339 + struct net_device *dev = wpan_dev->netdev; 340 + struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); 341 + int res; 342 + 343 + mutex_lock(&sdata->sec_mtx); 344 + res = mac802154_llsec_key_add(&sdata->sec, id, key); 345 + mutex_unlock(&sdata->sec_mtx); 346 + 347 + return res; 348 + } 349 + 350 + static int 351 + ieee802154_del_llsec_key(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, 352 + const struct ieee802154_llsec_key_id *id) 353 + { 354 + struct net_device *dev = wpan_dev->netdev; 355 + struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); 356 + int res; 357 + 358 + mutex_lock(&sdata->sec_mtx); 359 + res = mac802154_llsec_key_del(&sdata->sec, id); 360 + mutex_unlock(&sdata->sec_mtx); 361 + 362 + return res; 363 + } 364 + 365 + static int 366 + ieee802154_add_seclevel(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, 367 + const struct ieee802154_llsec_seclevel *sl) 368 + { 369 + struct net_device *dev = wpan_dev->netdev; 370 + struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); 371 + int res; 372 + 373 + mutex_lock(&sdata->sec_mtx); 374 + res = mac802154_llsec_seclevel_add(&sdata->sec, sl); 375 + mutex_unlock(&sdata->sec_mtx); 376 + 377 + return res; 378 + } 379 + 380 + static int 381 + ieee802154_del_seclevel(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, 382 + const struct ieee802154_llsec_seclevel *sl) 383 + { 384 + struct net_device *dev = wpan_dev->netdev; 385 + struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); 386 + int res; 387 + 388 + mutex_lock(&sdata->sec_mtx); 389 + res = mac802154_llsec_seclevel_del(&sdata->sec, sl); 390 + mutex_unlock(&sdata->sec_mtx); 391 + 392 + return res; 393 + } 394 + 395 + static int 396 + ieee802154_add_device(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, 397 + const struct ieee802154_llsec_device *dev_desc) 398 + { 399 + struct net_device *dev = wpan_dev->netdev; 400 + struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); 401 + int res; 402 + 403 + mutex_lock(&sdata->sec_mtx); 404 + res = mac802154_llsec_dev_add(&sdata->sec, dev_desc); 405 + mutex_unlock(&sdata->sec_mtx); 406 + 407 + return res; 408 + } 409 + 410 + static int 411 + ieee802154_del_device(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, 412 + __le64 extended_addr) 413 + { 414 + struct net_device *dev = wpan_dev->netdev; 415 + struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); 416 + int res; 417 + 418 + mutex_lock(&sdata->sec_mtx); 419 + res = mac802154_llsec_dev_del(&sdata->sec, extended_addr); 420 + mutex_unlock(&sdata->sec_mtx); 421 + 422 + return res; 423 + } 424 + 425 + static int 426 + ieee802154_add_devkey(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, 427 + __le64 extended_addr, 428 + const struct ieee802154_llsec_device_key *key) 429 + { 430 + struct net_device *dev = wpan_dev->netdev; 431 + struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); 432 + int res; 433 + 434 + mutex_lock(&sdata->sec_mtx); 435 + res = mac802154_llsec_devkey_add(&sdata->sec, extended_addr, key); 436 + mutex_unlock(&sdata->sec_mtx); 437 + 438 + return res; 439 + } 440 + 441 + static int 442 + ieee802154_del_devkey(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev, 443 + __le64 extended_addr, 444 + const struct ieee802154_llsec_device_key *key) 445 + { 446 + struct net_device *dev = wpan_dev->netdev; 447 + struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); 448 + int res; 449 + 450 + mutex_lock(&sdata->sec_mtx); 451 + res = mac802154_llsec_devkey_del(&sdata->sec, extended_addr, key); 452 + mutex_unlock(&sdata->sec_mtx); 453 + 454 + return res; 455 + } 456 + #endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */ 457 + 269 458 const struct cfg802154_ops mac802154_config_ops = { 270 459 .add_virtual_intf_deprecated = ieee802154_add_iface_deprecated, 271 460 .del_virtual_intf_deprecated = ieee802154_del_iface_deprecated, ··· 473 284 .set_max_frame_retries = ieee802154_set_max_frame_retries, 474 285 .set_lbt_mode = ieee802154_set_lbt_mode, 475 286 .set_ackreq_default = ieee802154_set_ackreq_default, 287 + #ifdef CONFIG_IEEE802154_NL802154_EXPERIMENTAL 288 + .get_llsec_table = ieee802154_get_llsec_table, 289 + .lock_llsec_table = ieee802154_lock_llsec_table, 290 + .unlock_llsec_table = ieee802154_unlock_llsec_table, 291 + /* TODO above */ 292 + .set_llsec_params = ieee802154_set_llsec_params, 293 + .get_llsec_params = ieee802154_get_llsec_params, 294 + .add_llsec_key = ieee802154_add_llsec_key, 295 + .del_llsec_key = ieee802154_del_llsec_key, 296 + .add_seclevel = ieee802154_add_seclevel, 297 + .del_seclevel = ieee802154_del_seclevel, 298 + .add_device = ieee802154_add_device, 299 + .del_device = ieee802154_del_device, 300 + .add_devkey = ieee802154_add_devkey, 301 + .del_devkey = ieee802154_del_devkey, 302 + #endif /* CONFIG_IEEE802154_NL802154_EXPERIMENTAL */ 476 303 };
+102 -16
net/mac802154/iface.c
··· 367 367 return 0; 368 368 } 369 369 370 - static int mac802154_header_create(struct sk_buff *skb, 371 - struct net_device *dev, 372 - unsigned short type, 373 - const void *daddr, 374 - const void *saddr, 375 - unsigned len) 370 + static int ieee802154_header_create(struct sk_buff *skb, 371 + struct net_device *dev, 372 + const struct ieee802154_addr *daddr, 373 + const struct ieee802154_addr *saddr, 374 + unsigned len) 376 375 { 377 376 struct ieee802154_hdr hdr; 378 377 struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); ··· 422 423 return hlen; 423 424 } 424 425 426 + static const struct wpan_dev_header_ops ieee802154_header_ops = { 427 + .create = ieee802154_header_create, 428 + }; 429 + 430 + /* This header create functionality assumes a 8 byte array for 431 + * source and destination pointer at maximum. To adapt this for 432 + * the 802.15.4 dataframe header we use extended address handling 433 + * here only and intra pan connection. fc fields are mostly fallback 434 + * handling. For provide dev_hard_header for dgram sockets. 435 + */ 436 + static int mac802154_header_create(struct sk_buff *skb, 437 + struct net_device *dev, 438 + unsigned short type, 439 + const void *daddr, 440 + const void *saddr, 441 + unsigned len) 442 + { 443 + struct ieee802154_hdr hdr; 444 + struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); 445 + struct wpan_dev *wpan_dev = &sdata->wpan_dev; 446 + struct ieee802154_mac_cb cb = { }; 447 + int hlen; 448 + 449 + if (!daddr) 450 + return -EINVAL; 451 + 452 + memset(&hdr.fc, 0, sizeof(hdr.fc)); 453 + hdr.fc.type = IEEE802154_FC_TYPE_DATA; 454 + hdr.fc.ack_request = wpan_dev->ackreq; 455 + hdr.seq = atomic_inc_return(&dev->ieee802154_ptr->dsn) & 0xFF; 456 + 457 + /* TODO currently a workaround to give zero cb block to set 458 + * security parameters defaults according MIB. 459 + */ 460 + if (mac802154_set_header_security(sdata, &hdr, &cb) < 0) 461 + return -EINVAL; 462 + 463 + hdr.dest.pan_id = wpan_dev->pan_id; 464 + hdr.dest.mode = IEEE802154_ADDR_LONG; 465 + ieee802154_be64_to_le64(&hdr.dest.extended_addr, daddr); 466 + 467 + hdr.source.pan_id = hdr.dest.pan_id; 468 + hdr.source.mode = IEEE802154_ADDR_LONG; 469 + 470 + if (!saddr) 471 + hdr.source.extended_addr = wpan_dev->extended_addr; 472 + else 473 + ieee802154_be64_to_le64(&hdr.source.extended_addr, saddr); 474 + 475 + hlen = ieee802154_hdr_push(skb, &hdr); 476 + if (hlen < 0) 477 + return -EINVAL; 478 + 479 + skb_reset_mac_header(skb); 480 + skb->mac_len = hlen; 481 + 482 + if (len > ieee802154_max_payload(&hdr)) 483 + return -EMSGSIZE; 484 + 485 + return hlen; 486 + } 487 + 425 488 static int 426 489 mac802154_header_parse(const struct sk_buff *skb, unsigned char *haddr) 427 490 { 428 491 struct ieee802154_hdr hdr; 429 - struct ieee802154_addr *addr = (struct ieee802154_addr *)haddr; 430 492 431 493 if (ieee802154_hdr_peek_addrs(skb, &hdr) < 0) { 432 494 pr_debug("malformed packet\n"); 433 495 return 0; 434 496 } 435 497 436 - *addr = hdr.source; 437 - return sizeof(*addr); 498 + if (hdr.source.mode == IEEE802154_ADDR_LONG) { 499 + ieee802154_le64_to_be64(haddr, &hdr.source.extended_addr); 500 + return IEEE802154_EXTENDED_ADDR_LEN; 501 + } 502 + 503 + return 0; 438 504 } 439 505 440 - static struct header_ops mac802154_header_ops = { 441 - .create = mac802154_header_create, 442 - .parse = mac802154_header_parse, 506 + static const struct header_ops mac802154_header_ops = { 507 + .create = mac802154_header_create, 508 + .parse = mac802154_header_parse, 443 509 }; 444 510 445 511 static const struct net_device_ops mac802154_wpan_ops = { ··· 535 471 dev->addr_len = IEEE802154_EXTENDED_ADDR_LEN; 536 472 memset(dev->broadcast, 0xff, IEEE802154_EXTENDED_ADDR_LEN); 537 473 538 - dev->hard_header_len = MAC802154_FRAME_HARD_HEADER_LEN; 539 - dev->needed_tailroom = 2 + 16; /* FCS + MIC */ 540 - dev->mtu = IEEE802154_MTU; 474 + /* Let hard_header_len set to IEEE802154_MIN_HEADER_LEN. AF_PACKET 475 + * will not send frames without any payload, but ack frames 476 + * has no payload, so substract one that we can send a 3 bytes 477 + * frame. The xmit callback assumes at least a hard header where two 478 + * bytes fc and sequence field are set. 479 + */ 480 + dev->hard_header_len = IEEE802154_MIN_HEADER_LEN - 1; 481 + /* The auth_tag header is for security and places in private payload 482 + * room of mac frame which stucks between payload and FCS field. 483 + */ 484 + dev->needed_tailroom = IEEE802154_MAX_AUTH_TAG_LEN + 485 + IEEE802154_FCS_LEN; 486 + /* The mtu size is the payload without mac header in this case. 487 + * We have a dynamic length header with a minimum header length 488 + * which is hard_header_len. In this case we let mtu to the size 489 + * of maximum payload which is IEEE802154_MTU - IEEE802154_FCS_LEN - 490 + * hard_header_len. The FCS which is set by hardware or ndo_start_xmit 491 + * and the minimum mac header which can be evaluated inside driver 492 + * layer. The rest of mac header will be part of payload if greater 493 + * than hard_header_len. 494 + */ 495 + dev->mtu = IEEE802154_MTU - IEEE802154_FCS_LEN - 496 + dev->hard_header_len; 541 497 dev->tx_queue_len = 300; 542 498 dev->flags = IFF_NOARP | IFF_BROADCAST; 543 499 } ··· 597 513 sdata->dev->netdev_ops = &mac802154_wpan_ops; 598 514 sdata->dev->ml_priv = &mac802154_mlme_wpan; 599 515 wpan_dev->promiscuous_mode = false; 516 + wpan_dev->header_ops = &ieee802154_header_ops; 600 517 601 518 mutex_init(&sdata->sec_mtx); 602 519 ··· 635 550 if (!ndev) 636 551 return ERR_PTR(-ENOMEM); 637 552 638 - ndev->needed_headroom = local->hw.extra_tx_headroom; 553 + ndev->needed_headroom = local->hw.extra_tx_headroom + 554 + IEEE802154_MAX_HEADER_LEN; 639 555 640 556 ret = dev_alloc_name(ndev, ndev->name); 641 557 if (ret < 0)
+1
net/mac802154/llsec.c
··· 401 401 402 402 hash_del_rcu(&pos->bucket_s); 403 403 hash_del_rcu(&pos->bucket_hw); 404 + list_del_rcu(&pos->dev.list); 404 405 call_rcu(&pos->rcu, llsec_dev_free_rcu); 405 406 406 407 return 0;
+4
net/mac802154/rx.c
··· 87 87 88 88 skb->dev = sdata->dev; 89 89 90 + /* TODO this should be moved after netif_receive_skb call, otherwise 91 + * wireshark will show a mac header with security fields and the 92 + * payload is already decrypted. 93 + */ 90 94 rc = mac802154_llsec_decrypt(&sdata->sec, skb); 91 95 if (rc) { 92 96 pr_debug("decryption failed: %i\n", rc);
+4 -3
net/mac802154/tx.c
··· 77 77 put_unaligned_le16(crc, skb_put(skb, 2)); 78 78 } 79 79 80 - if (skb_cow_head(skb, local->hw.extra_tx_headroom)) 81 - goto err_tx; 82 - 83 80 /* Stop the netif queue on each sub_if_data object. */ 84 81 ieee802154_stop_queue(&local->hw); 85 82 ··· 118 121 struct ieee802154_sub_if_data *sdata = IEEE802154_DEV_TO_SUB_IF(dev); 119 122 int rc; 120 123 124 + /* TODO we should move it to wpan_dev_hard_header and dev_hard_header 125 + * functions. The reason is wireshark will show a mac header which is 126 + * with security fields but the payload is not encrypted. 127 + */ 121 128 rc = mac802154_llsec_encrypt(&sdata->sec, skb); 122 129 if (rc) { 123 130 netdev_warn(dev, "encryption failed: %i\n", rc);