Merge branch 'for-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/dvrabel/uwb

* 'for-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/dvrabel/uwb: (31 commits)
uwb: remove beacon cache entry after calling uwb_notify()
uwb: remove unused include/linux/uwb/debug.h
uwb: use print_hex_dump()
uwb: use dev_dbg() for debug messages
uwb: fix memory leak in uwb_rc_notif()
wusb: fix oops when terminating a non-existant reservation
uwb: fix oops when terminating an already terminated reservation
uwb: improved MAS allocator and reservation conflict handling
wusb: add debug files for ASL, PZL and DI to the whci-hcd driver
uwb: fix oops in debug PAL's reservation callback
uwb: clean up whci_wait_for() timeout error message
wusb: whci-hcd shouldn't do ASL/PZL updates while channel is inactive
uwb: remove unused beacon group join/leave events
wlp: start/stop radio on network interface up/down
uwb: add basic radio manager
uwb: add pal parameter to new reservation callback
uwb: fix races between events and neh timers
uwb: don't unbind the radio controller driver when resetting
uwb: per-radio controller event thread and beacon cache
uwb: add commands to add/remove IEs to the debug interface
...

+3252 -2837
+8 -6
Documentation/ABI/testing/sysfs-class-uwb_rc
··· 32 Description: 33 Write: 34 35 - <channel> [<bpst offset>] 36 37 - to start beaconing on a specific channel, or stop 38 - beaconing if <channel> is -1. Valid channels depends 39 - on the radio controller's supported band groups. 40 41 - <bpst offset> may be used to try and join a specific 42 - beacon group if more than one was found during a scan. 43 44 What: /sys/class/uwb_rc/uwbN/scan 45 Date: July 2008
··· 32 Description: 33 Write: 34 35 + <channel> 36 37 + to force a specific channel to be used when beaconing, 38 + or, if <channel> is -1, to prohibit beaconing. If 39 + <channel> is 0, then the default channel selection 40 + algorithm will be used. Valid channels depends on the 41 + radio controller's supported band groups. 42 43 + Reading returns the currently active channel, or -1 if 44 + the radio controller is not beaconing. 45 46 What: /sys/class/uwb_rc/uwbN/scan 47 Date: July 2008
-9
Documentation/usb/wusb-cbaf
··· 80 start) 81 for dev in ${2:-$hdevs} 82 do 83 - uwb_rc=$(readlink -f $dev/uwb_rc) 84 - if cat $uwb_rc/beacon | grep -q -- "-1" 85 - then 86 - echo 13 0 > $uwb_rc/beacon 87 - echo I: started beaconing on ch 13 on $(basename $uwb_rc) >&2 88 - fi 89 echo $host_CHID > $dev/wusb_chid 90 echo I: started host $(basename $dev) >&2 91 done ··· 89 do 90 echo 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 > $dev/wusb_chid 91 echo I: stopped host $(basename $dev) >&2 92 - uwb_rc=$(readlink -f $dev/uwb_rc) 93 - echo -1 | cat > $uwb_rc/beacon 94 - echo I: stopped beaconing on $(basename $uwb_rc) >&2 95 done 96 ;; 97 set-chid)
··· 80 start) 81 for dev in ${2:-$hdevs} 82 do 83 echo $host_CHID > $dev/wusb_chid 84 echo I: started host $(basename $dev) >&2 85 done ··· 95 do 96 echo 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 > $dev/wusb_chid 97 echo I: stopped host $(basename $dev) >&2 98 done 99 ;; 100 set-chid)
+55 -104
drivers/usb/host/hwa-hc.c
··· 54 * DWA). 55 */ 56 #include <linux/kernel.h> 57 - #include <linux/version.h> 58 #include <linux/init.h> 59 #include <linux/module.h> 60 #include <linux/workqueue.h> ··· 62 #include "../wusbcore/wa-hc.h" 63 #include "../wusbcore/wusbhc.h" 64 65 - #define D_LOCAL 0 66 - #include <linux/uwb/debug.h> 67 - 68 struct hwahc { 69 struct wusbhc wusbhc; /* has to be 1st */ 70 struct wahc wa; 71 - u8 buffer[16]; /* for misc usb transactions */ 72 }; 73 74 - /** 75 * FIXME should be wusbhc 76 * 77 * NOTE: we need to cache the Cluster ID because later...there is no ··· 121 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); 122 struct device *dev = &hwahc->wa.usb_iface->dev; 123 124 - d_fnstart(4, dev, "(hwahc %p)\n", hwahc); 125 mutex_lock(&wusbhc->mutex); 126 wa_nep_disarm(&hwahc->wa); 127 result = __wa_set_feature(&hwahc->wa, WA_RESET); ··· 128 dev_err(dev, "error commanding HC to reset: %d\n", result); 129 goto error_unlock; 130 } 131 - d_printf(3, dev, "reset: waiting for device to change state\n"); 132 result = __wa_wait_status(&hwahc->wa, WA_STATUS_RESETTING, 0); 133 if (result < 0) { 134 dev_err(dev, "error waiting for HC to reset: %d\n", result); ··· 135 } 136 error_unlock: 137 mutex_unlock(&wusbhc->mutex); 138 - d_fnend(4, dev, "(hwahc %p) = %d\n", hwahc, result); 139 return result; 140 } 141 ··· 147 int result; 148 struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); 149 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); 150 - struct device *dev = &hwahc->wa.usb_iface->dev; 151 152 - /* Set up a Host Info WUSB Information Element */ 153 - d_fnstart(4, dev, "(hwahc %p)\n", hwahc); 154 result = -ENOSPC; 155 mutex_lock(&wusbhc->mutex); 156 - /* Start the numbering from the top so that the bottom 157 - * range of the unauth addr space is used for devices, 158 - * the top for HCs; use 0xfe - RC# */ 159 addr = wusb_cluster_id_get(); 160 if (addr == 0) 161 goto error_cluster_id_get; ··· 157 if (result < 0) 158 goto error_set_cluster_id; 159 160 - result = wa_nep_arm(&hwahc->wa, GFP_KERNEL); 161 - if (result < 0) { 162 - dev_err(dev, "cannot listen to notifications: %d\n", result); 163 - goto error_stop; 164 - } 165 usb_hcd->uses_new_polling = 1; 166 usb_hcd->poll_rh = 1; 167 usb_hcd->state = HC_STATE_RUNNING; 168 result = 0; 169 out: 170 mutex_unlock(&wusbhc->mutex); 171 - d_fnend(4, dev, "(hwahc %p) = %d\n", hwahc, result); 172 return result; 173 174 - error_stop: 175 - __wa_stop(&hwahc->wa); 176 error_set_cluster_id: 177 wusb_cluster_id_put(wusbhc->cluster_id); 178 error_cluster_id_get: 179 goto out; 180 181 - } 182 - 183 - /* 184 - * FIXME: break this function up 185 - */ 186 - static int __hwahc_op_wusbhc_start(struct wusbhc *wusbhc) 187 - { 188 - int result; 189 - struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); 190 - struct device *dev = &hwahc->wa.usb_iface->dev; 191 - 192 - /* Set up a Host Info WUSB Information Element */ 193 - d_fnstart(4, dev, "(hwahc %p)\n", hwahc); 194 - result = -ENOSPC; 195 - 196 - result = __wa_set_feature(&hwahc->wa, WA_ENABLE); 197 - if (result < 0) { 198 - dev_err(dev, "error commanding HC to start: %d\n", result); 199 - goto error_stop; 200 - } 201 - result = __wa_wait_status(&hwahc->wa, WA_ENABLE, WA_ENABLE); 202 - if (result < 0) { 203 - dev_err(dev, "error waiting for HC to start: %d\n", result); 204 - goto error_stop; 205 - } 206 - result = 0; 207 - out: 208 - d_fnend(4, dev, "(hwahc %p) = %d\n", hwahc, result); 209 - return result; 210 - 211 - error_stop: 212 - result = __wa_clear_feature(&hwahc->wa, WA_ENABLE); 213 - goto out; 214 } 215 216 static int hwahc_op_suspend(struct usb_hcd *usb_hcd, pm_message_t msg) ··· 191 return -ENOSYS; 192 } 193 194 - static void __hwahc_op_wusbhc_stop(struct wusbhc *wusbhc) 195 - { 196 - int result; 197 - struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); 198 - struct device *dev = &hwahc->wa.usb_iface->dev; 199 - 200 - d_fnstart(4, dev, "(hwahc %p)\n", hwahc); 201 - /* Nothing for now */ 202 - d_fnend(4, dev, "(hwahc %p) = %d\n", hwahc, result); 203 - return; 204 - } 205 - 206 /* 207 * No need to abort pipes, as when this is called, all the children 208 * has been disconnected and that has done it [through ··· 199 */ 200 static void hwahc_op_stop(struct usb_hcd *usb_hcd) 201 { 202 - int result; 203 struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); 204 - struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); 205 - struct wahc *wa = &hwahc->wa; 206 - struct device *dev = &wa->usb_iface->dev; 207 208 - d_fnstart(4, dev, "(hwahc %p)\n", hwahc); 209 mutex_lock(&wusbhc->mutex); 210 - wusbhc_stop(wusbhc); 211 - wa_nep_disarm(&hwahc->wa); 212 - result = __wa_stop(&hwahc->wa); 213 wusb_cluster_id_put(wusbhc->cluster_id); 214 mutex_unlock(&wusbhc->mutex); 215 - d_fnend(4, dev, "(hwahc %p) = %d\n", hwahc, result); 216 - return; 217 } 218 219 static int hwahc_op_get_frame_number(struct usb_hcd *usb_hcd) ··· 246 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); 247 248 rpipe_ep_disable(&hwahc->wa, ep); 249 } 250 251 /* ··· 552 itr_size = le16_to_cpu(usb_dev->actconfig->desc.wTotalLength); 553 while (itr_size >= sizeof(*hdr)) { 554 hdr = (struct usb_descriptor_header *) itr; 555 - d_printf(3, dev, "Extra device descriptor: " 556 - "type %02x/%u bytes @ %zu (%zu left)\n", 557 - hdr->bDescriptorType, hdr->bLength, 558 - (itr - usb_dev->rawdescriptors[actconfig_idx]), 559 - itr_size); 560 if (hdr->bDescriptorType == USB_DT_WIRE_ADAPTER) 561 goto found; 562 itr += hdr->bLength; ··· 765 { 766 struct wusbhc *wusbhc = &hwahc->wusbhc; 767 768 - d_fnstart(1, NULL, "(hwahc %p)\n", hwahc); 769 mutex_lock(&wusbhc->mutex); 770 __wa_destroy(&hwahc->wa); 771 wusbhc_destroy(&hwahc->wusbhc); ··· 774 usb_put_intf(hwahc->wa.usb_iface); 775 usb_put_dev(hwahc->wa.usb_dev); 776 mutex_unlock(&wusbhc->mutex); 777 - d_fnend(1, NULL, "(hwahc %p) = void\n", hwahc); 778 } 779 780 static void hwahc_init(struct hwahc *hwahc) ··· 790 struct hwahc *hwahc; 791 struct device *dev = &usb_iface->dev; 792 793 - d_fnstart(4, dev, "(%p, %p)\n", usb_iface, id); 794 result = -ENOMEM; 795 usb_hcd = usb_create_hcd(&hwahc_hc_driver, &usb_iface->dev, "wusb-hwa"); 796 if (usb_hcd == NULL) { ··· 816 dev_err(dev, "Cannot setup phase B of WUSBHC: %d\n", result); 817 goto error_wusbhc_b_create; 818 } 819 - d_fnend(4, dev, "(%p, %p) = 0\n", usb_iface, id); 820 return 0; 821 822 error_wusbhc_b_create: ··· 825 error_hwahc_create: 826 usb_put_hcd(usb_hcd); 827 error_alloc: 828 - d_fnend(4, dev, "(%p, %p) = %d\n", usb_iface, id, result); 829 return result; 830 } 831 ··· 838 wusbhc = usb_hcd_to_wusbhc(usb_hcd); 839 hwahc = container_of(wusbhc, struct hwahc, wusbhc); 840 841 - d_fnstart(1, NULL, "(hwahc %p [usb_iface %p])\n", hwahc, usb_iface); 842 wusbhc_b_destroy(&hwahc->wusbhc); 843 usb_remove_hcd(usb_hcd); 844 hwahc_destroy(hwahc); 845 usb_put_hcd(usb_hcd); 846 - d_fnend(1, NULL, "(hwahc %p [usb_iface %p]) = void\n", hwahc, 847 - usb_iface); 848 } 849 850 - /** USB device ID's that we handle */ 851 static struct usb_device_id hwahc_id_table[] = { 852 /* FIXME: use class labels for this */ 853 { USB_INTERFACE_INFO(0xe0, 0x02, 0x01), }, ··· 860 861 static int __init hwahc_driver_init(void) 862 { 863 - int result; 864 - result = usb_register(&hwahc_driver); 865 - if (result < 0) { 866 - printk(KERN_ERR "WA-CDS: Cannot register USB driver: %d\n", 867 - result); 868 - goto error_usb_register; 869 - } 870 - return 0; 871 - 872 - error_usb_register: 873 - return result; 874 - 875 } 876 module_init(hwahc_driver_init); 877
··· 54 * DWA). 55 */ 56 #include <linux/kernel.h> 57 #include <linux/init.h> 58 #include <linux/module.h> 59 #include <linux/workqueue.h> ··· 63 #include "../wusbcore/wa-hc.h" 64 #include "../wusbcore/wusbhc.h" 65 66 struct hwahc { 67 struct wusbhc wusbhc; /* has to be 1st */ 68 struct wahc wa; 69 }; 70 71 + /* 72 * FIXME should be wusbhc 73 * 74 * NOTE: we need to cache the Cluster ID because later...there is no ··· 126 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); 127 struct device *dev = &hwahc->wa.usb_iface->dev; 128 129 mutex_lock(&wusbhc->mutex); 130 wa_nep_disarm(&hwahc->wa); 131 result = __wa_set_feature(&hwahc->wa, WA_RESET); ··· 134 dev_err(dev, "error commanding HC to reset: %d\n", result); 135 goto error_unlock; 136 } 137 result = __wa_wait_status(&hwahc->wa, WA_STATUS_RESETTING, 0); 138 if (result < 0) { 139 dev_err(dev, "error waiting for HC to reset: %d\n", result); ··· 142 } 143 error_unlock: 144 mutex_unlock(&wusbhc->mutex); 145 return result; 146 } 147 ··· 155 int result; 156 struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); 157 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); 158 159 result = -ENOSPC; 160 mutex_lock(&wusbhc->mutex); 161 addr = wusb_cluster_id_get(); 162 if (addr == 0) 163 goto error_cluster_id_get; ··· 171 if (result < 0) 172 goto error_set_cluster_id; 173 174 usb_hcd->uses_new_polling = 1; 175 usb_hcd->poll_rh = 1; 176 usb_hcd->state = HC_STATE_RUNNING; 177 result = 0; 178 out: 179 mutex_unlock(&wusbhc->mutex); 180 return result; 181 182 error_set_cluster_id: 183 wusb_cluster_id_put(wusbhc->cluster_id); 184 error_cluster_id_get: 185 goto out; 186 187 } 188 189 static int hwahc_op_suspend(struct usb_hcd *usb_hcd, pm_message_t msg) ··· 246 return -ENOSYS; 247 } 248 249 /* 250 * No need to abort pipes, as when this is called, all the children 251 * has been disconnected and that has done it [through ··· 266 */ 267 static void hwahc_op_stop(struct usb_hcd *usb_hcd) 268 { 269 struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); 270 271 mutex_lock(&wusbhc->mutex); 272 wusb_cluster_id_put(wusbhc->cluster_id); 273 mutex_unlock(&wusbhc->mutex); 274 } 275 276 static int hwahc_op_get_frame_number(struct usb_hcd *usb_hcd) ··· 323 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); 324 325 rpipe_ep_disable(&hwahc->wa, ep); 326 + } 327 + 328 + static int __hwahc_op_wusbhc_start(struct wusbhc *wusbhc) 329 + { 330 + int result; 331 + struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); 332 + struct device *dev = &hwahc->wa.usb_iface->dev; 333 + 334 + result = __wa_set_feature(&hwahc->wa, WA_ENABLE); 335 + if (result < 0) { 336 + dev_err(dev, "error commanding HC to start: %d\n", result); 337 + goto error_stop; 338 + } 339 + result = __wa_wait_status(&hwahc->wa, WA_ENABLE, WA_ENABLE); 340 + if (result < 0) { 341 + dev_err(dev, "error waiting for HC to start: %d\n", result); 342 + goto error_stop; 343 + } 344 + result = wa_nep_arm(&hwahc->wa, GFP_KERNEL); 345 + if (result < 0) { 346 + dev_err(dev, "cannot listen to notifications: %d\n", result); 347 + goto error_stop; 348 + } 349 + return result; 350 + 351 + error_stop: 352 + __wa_clear_feature(&hwahc->wa, WA_ENABLE); 353 + return result; 354 + } 355 + 356 + static void __hwahc_op_wusbhc_stop(struct wusbhc *wusbhc, int delay) 357 + { 358 + struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); 359 + struct wahc *wa = &hwahc->wa; 360 + u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber; 361 + int ret; 362 + 363 + ret = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), 364 + WUSB_REQ_CHAN_STOP, 365 + USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, 366 + delay * 1000, 367 + iface_no, 368 + NULL, 0, 1000 /* FIXME: arbitrary */); 369 + if (ret == 0) 370 + msleep(delay); 371 + 372 + wa_nep_disarm(&hwahc->wa); 373 + __wa_stop(&hwahc->wa); 374 } 375 376 /* ··· 581 itr_size = le16_to_cpu(usb_dev->actconfig->desc.wTotalLength); 582 while (itr_size >= sizeof(*hdr)) { 583 hdr = (struct usb_descriptor_header *) itr; 584 + dev_dbg(dev, "Extra device descriptor: " 585 + "type %02x/%u bytes @ %zu (%zu left)\n", 586 + hdr->bDescriptorType, hdr->bLength, 587 + (itr - usb_dev->rawdescriptors[actconfig_idx]), 588 + itr_size); 589 if (hdr->bDescriptorType == USB_DT_WIRE_ADAPTER) 590 goto found; 591 itr += hdr->bLength; ··· 794 { 795 struct wusbhc *wusbhc = &hwahc->wusbhc; 796 797 mutex_lock(&wusbhc->mutex); 798 __wa_destroy(&hwahc->wa); 799 wusbhc_destroy(&hwahc->wusbhc); ··· 804 usb_put_intf(hwahc->wa.usb_iface); 805 usb_put_dev(hwahc->wa.usb_dev); 806 mutex_unlock(&wusbhc->mutex); 807 } 808 809 static void hwahc_init(struct hwahc *hwahc) ··· 821 struct hwahc *hwahc; 822 struct device *dev = &usb_iface->dev; 823 824 result = -ENOMEM; 825 usb_hcd = usb_create_hcd(&hwahc_hc_driver, &usb_iface->dev, "wusb-hwa"); 826 if (usb_hcd == NULL) { ··· 848 dev_err(dev, "Cannot setup phase B of WUSBHC: %d\n", result); 849 goto error_wusbhc_b_create; 850 } 851 return 0; 852 853 error_wusbhc_b_create: ··· 858 error_hwahc_create: 859 usb_put_hcd(usb_hcd); 860 error_alloc: 861 return result; 862 } 863 ··· 872 wusbhc = usb_hcd_to_wusbhc(usb_hcd); 873 hwahc = container_of(wusbhc, struct hwahc, wusbhc); 874 875 wusbhc_b_destroy(&hwahc->wusbhc); 876 usb_remove_hcd(usb_hcd); 877 hwahc_destroy(hwahc); 878 usb_put_hcd(usb_hcd); 879 } 880 881 static struct usb_device_id hwahc_id_table[] = { 882 /* FIXME: use class labels for this */ 883 { USB_INTERFACE_INFO(0xe0, 0x02, 0x01), }, ··· 898 899 static int __init hwahc_driver_init(void) 900 { 901 + return usb_register(&hwahc_driver); 902 } 903 module_init(hwahc_driver_init); 904
+1
drivers/usb/host/whci/Kbuild
··· 2 3 whci-hcd-y := \ 4 asl.o \ 5 hcd.o \ 6 hw.o \ 7 init.o \
··· 2 3 whci-hcd-y := \ 4 asl.o \ 5 + debug.o \ 6 hcd.o \ 7 hw.o \ 8 init.o \
+18 -28
drivers/usb/host/whci/asl.c
··· 19 #include <linux/dma-mapping.h> 20 #include <linux/uwb/umc.h> 21 #include <linux/usb.h> 22 - #define D_LOCAL 0 23 - #include <linux/uwb/debug.h> 24 25 #include "../../wusbcore/wusbhc.h" 26 27 #include "whcd.h" 28 - 29 - #if D_LOCAL >= 4 30 - static void dump_asl(struct whc *whc, const char *tag) 31 - { 32 - struct device *dev = &whc->umc->dev; 33 - struct whc_qset *qset; 34 - 35 - d_printf(4, dev, "ASL %s\n", tag); 36 - 37 - list_for_each_entry(qset, &whc->async_list, list_node) { 38 - dump_qset(qset, dev); 39 - } 40 - } 41 - #else 42 - static inline void dump_asl(struct whc *whc, const char *tag) 43 - { 44 - } 45 - #endif 46 - 47 48 static void qset_get_next_prev(struct whc *whc, struct whc_qset *qset, 49 struct whc_qset **next, struct whc_qset **prev) ··· 158 1000, "stop ASL"); 159 } 160 161 void asl_update(struct whc *whc, uint32_t wusbcmd) 162 { 163 - whc_write_wusbcmd(whc, wusbcmd, wusbcmd); 164 - wait_event(whc->async_list_wq, 165 - (le_readl(whc->base + WUSBCMD) & WUSBCMD_ASYNC_UPDATED) == 0); 166 } 167 168 /** ··· 196 197 spin_lock_irq(&whc->lock); 198 199 - dump_asl(whc, "before processing"); 200 - 201 /* 202 * Transerve the software list backwards so new qsets can be 203 * safely inserted into the ASL without making it non-circular. ··· 208 209 update |= process_qset(whc, qset); 210 } 211 - 212 - dump_asl(whc, "after processing"); 213 214 spin_unlock_irq(&whc->lock); 215
··· 19 #include <linux/dma-mapping.h> 20 #include <linux/uwb/umc.h> 21 #include <linux/usb.h> 22 23 #include "../../wusbcore/wusbhc.h" 24 25 #include "whcd.h" 26 27 static void qset_get_next_prev(struct whc *whc, struct whc_qset *qset, 28 struct whc_qset **next, struct whc_qset **prev) ··· 179 1000, "stop ASL"); 180 } 181 182 + /** 183 + * asl_update - request an ASL update and wait for the hardware to be synced 184 + * @whc: the WHCI HC 185 + * @wusbcmd: WUSBCMD value to start the update. 186 + * 187 + * If the WUSB HC is inactive (i.e., the ASL is stopped) then the 188 + * update must be skipped as the hardware may not respond to update 189 + * requests. 190 + */ 191 void asl_update(struct whc *whc, uint32_t wusbcmd) 192 { 193 + struct wusbhc *wusbhc = &whc->wusbhc; 194 + 195 + mutex_lock(&wusbhc->mutex); 196 + if (wusbhc->active) { 197 + whc_write_wusbcmd(whc, wusbcmd, wusbcmd); 198 + wait_event(whc->async_list_wq, 199 + (le_readl(whc->base + WUSBCMD) & WUSBCMD_ASYNC_UPDATED) == 0); 200 + } 201 + mutex_unlock(&wusbhc->mutex); 202 } 203 204 /** ··· 202 203 spin_lock_irq(&whc->lock); 204 205 /* 206 * Transerve the software list backwards so new qsets can be 207 * safely inserted into the ASL without making it non-circular. ··· 216 217 update |= process_qset(whc, qset); 218 } 219 220 spin_unlock_irq(&whc->lock); 221
+189
drivers/usb/host/whci/debug.c
···
··· 1 + /* 2 + * Wireless Host Controller (WHC) debug. 3 + * 4 + * Copyright (C) 2008 Cambridge Silicon Radio Ltd. 5 + * 6 + * This program is free software; you can redistribute it and/or 7 + * modify it under the terms of the GNU General Public License version 8 + * 2 as published by the Free Software Foundation. 9 + * 10 + * This program is distributed in the hope that it will be useful, 11 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 + * GNU General Public License for more details. 14 + * 15 + * You should have received a copy of the GNU General Public License 16 + * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 + */ 18 + #include <linux/kernel.h> 19 + #include <linux/debugfs.h> 20 + #include <linux/seq_file.h> 21 + 22 + #include "../../wusbcore/wusbhc.h" 23 + 24 + #include "whcd.h" 25 + 26 + struct whc_dbg { 27 + struct dentry *di_f; 28 + struct dentry *asl_f; 29 + struct dentry *pzl_f; 30 + }; 31 + 32 + void qset_print(struct seq_file *s, struct whc_qset *qset) 33 + { 34 + struct whc_std *std; 35 + struct urb *urb = NULL; 36 + int i; 37 + 38 + seq_printf(s, "qset %08x\n", (u32)qset->qset_dma); 39 + seq_printf(s, " -> %08x\n", (u32)qset->qh.link); 40 + seq_printf(s, " info: %08x %08x %08x\n", 41 + qset->qh.info1, qset->qh.info2, qset->qh.info3); 42 + seq_printf(s, " sts: %04x errs: %d\n", qset->qh.status, qset->qh.err_count); 43 + seq_printf(s, " TD: sts: %08x opts: %08x\n", 44 + qset->qh.overlay.qtd.status, qset->qh.overlay.qtd.options); 45 + 46 + for (i = 0; i < WHCI_QSET_TD_MAX; i++) { 47 + seq_printf(s, " %c%c TD[%d]: sts: %08x opts: %08x ptr: %08x\n", 48 + i == qset->td_start ? 'S' : ' ', 49 + i == qset->td_end ? 'E' : ' ', 50 + i, qset->qtd[i].status, qset->qtd[i].options, 51 + (u32)qset->qtd[i].page_list_ptr); 52 + } 53 + seq_printf(s, " ntds: %d\n", qset->ntds); 54 + list_for_each_entry(std, &qset->stds, list_node) { 55 + if (urb != std->urb) { 56 + urb = std->urb; 57 + seq_printf(s, " urb %p transferred: %d bytes\n", urb, 58 + urb->actual_length); 59 + } 60 + if (std->qtd) 61 + seq_printf(s, " sTD[%td]: %zu bytes @ %08x\n", 62 + std->qtd - &qset->qtd[0], 63 + std->len, std->num_pointers ? 64 + (u32)(std->pl_virt[0].buf_ptr) : (u32)std->dma_addr); 65 + else 66 + seq_printf(s, " sTD[-]: %zd bytes @ %08x\n", 67 + std->len, std->num_pointers ? 68 + (u32)(std->pl_virt[0].buf_ptr) : (u32)std->dma_addr); 69 + } 70 + } 71 + 72 + static int di_print(struct seq_file *s, void *p) 73 + { 74 + struct whc *whc = s->private; 75 + char buf[72]; 76 + int d; 77 + 78 + for (d = 0; d < whc->n_devices; d++) { 79 + struct di_buf_entry *di = &whc->di_buf[d]; 80 + 81 + bitmap_scnprintf(buf, sizeof(buf), 82 + (unsigned long *)di->availability_info, UWB_NUM_MAS); 83 + 84 + seq_printf(s, "DI[%d]\n", d); 85 + seq_printf(s, " availability: %s\n", buf); 86 + seq_printf(s, " %c%c key idx: %d dev addr: %d\n", 87 + (di->addr_sec_info & WHC_DI_SECURE) ? 'S' : ' ', 88 + (di->addr_sec_info & WHC_DI_DISABLE) ? 'D' : ' ', 89 + (di->addr_sec_info & WHC_DI_KEY_IDX_MASK) >> 8, 90 + (di->addr_sec_info & WHC_DI_DEV_ADDR_MASK)); 91 + } 92 + return 0; 93 + } 94 + 95 + static int asl_print(struct seq_file *s, void *p) 96 + { 97 + struct whc *whc = s->private; 98 + struct whc_qset *qset; 99 + 100 + list_for_each_entry(qset, &whc->async_list, list_node) { 101 + qset_print(s, qset); 102 + } 103 + 104 + return 0; 105 + } 106 + 107 + static int pzl_print(struct seq_file *s, void *p) 108 + { 109 + struct whc *whc = s->private; 110 + struct whc_qset *qset; 111 + int period; 112 + 113 + for (period = 0; period < 5; period++) { 114 + seq_printf(s, "Period %d\n", period); 115 + list_for_each_entry(qset, &whc->periodic_list[period], list_node) { 116 + qset_print(s, qset); 117 + } 118 + } 119 + return 0; 120 + } 121 + 122 + static int di_open(struct inode *inode, struct file *file) 123 + { 124 + return single_open(file, di_print, inode->i_private); 125 + } 126 + 127 + static int asl_open(struct inode *inode, struct file *file) 128 + { 129 + return single_open(file, asl_print, inode->i_private); 130 + } 131 + 132 + static int pzl_open(struct inode *inode, struct file *file) 133 + { 134 + return single_open(file, pzl_print, inode->i_private); 135 + } 136 + 137 + static struct file_operations di_fops = { 138 + .open = di_open, 139 + .read = seq_read, 140 + .llseek = seq_lseek, 141 + .release = single_release, 142 + .owner = THIS_MODULE, 143 + }; 144 + 145 + static struct file_operations asl_fops = { 146 + .open = asl_open, 147 + .read = seq_read, 148 + .llseek = seq_lseek, 149 + .release = single_release, 150 + .owner = THIS_MODULE, 151 + }; 152 + 153 + static struct file_operations pzl_fops = { 154 + .open = pzl_open, 155 + .read = seq_read, 156 + .llseek = seq_lseek, 157 + .release = single_release, 158 + .owner = THIS_MODULE, 159 + }; 160 + 161 + void whc_dbg_init(struct whc *whc) 162 + { 163 + if (whc->wusbhc.pal.debugfs_dir == NULL) 164 + return; 165 + 166 + whc->dbg = kzalloc(sizeof(struct whc_dbg), GFP_KERNEL); 167 + if (whc->dbg == NULL) 168 + return; 169 + 170 + whc->dbg->di_f = debugfs_create_file("di", 0444, 171 + whc->wusbhc.pal.debugfs_dir, whc, 172 + &di_fops); 173 + whc->dbg->asl_f = debugfs_create_file("asl", 0444, 174 + whc->wusbhc.pal.debugfs_dir, whc, 175 + &asl_fops); 176 + whc->dbg->pzl_f = debugfs_create_file("pzl", 0444, 177 + whc->wusbhc.pal.debugfs_dir, whc, 178 + &pzl_fops); 179 + } 180 + 181 + void whc_dbg_clean_up(struct whc *whc) 182 + { 183 + if (whc->dbg) { 184 + debugfs_remove(whc->dbg->pzl_f); 185 + debugfs_remove(whc->dbg->asl_f); 186 + debugfs_remove(whc->dbg->di_f); 187 + kfree(whc->dbg); 188 + } 189 + }
+3 -3
drivers/usb/host/whci/hcd.c
··· 15 * You should have received a copy of the GNU General Public License 16 * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 */ 18 - #include <linux/version.h> 19 #include <linux/kernel.h> 20 #include <linux/init.h> 21 #include <linux/uwb/umc.h> ··· 90 struct whc *whc = wusbhc_to_whc(wusbhc); 91 92 mutex_lock(&wusbhc->mutex); 93 - 94 - wusbhc_stop(wusbhc); 95 96 /* stop HC */ 97 le_writel(0, whc->base + WUSBINTR); ··· 273 goto error_wusbhc_b_create; 274 } 275 276 return 0; 277 278 error_wusbhc_b_create: ··· 298 struct whc *whc = wusbhc_to_whc(wusbhc); 299 300 if (usb_hcd) { 301 wusbhc_b_destroy(wusbhc); 302 usb_remove_hcd(usb_hcd); 303 wusbhc_destroy(wusbhc);
··· 15 * You should have received a copy of the GNU General Public License 16 * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 */ 18 #include <linux/kernel.h> 19 #include <linux/init.h> 20 #include <linux/uwb/umc.h> ··· 91 struct whc *whc = wusbhc_to_whc(wusbhc); 92 93 mutex_lock(&wusbhc->mutex); 94 95 /* stop HC */ 96 le_writel(0, whc->base + WUSBINTR); ··· 276 goto error_wusbhc_b_create; 277 } 278 279 + whc_dbg_init(whc); 280 + 281 return 0; 282 283 error_wusbhc_b_create: ··· 299 struct whc *whc = wusbhc_to_whc(wusbhc); 300 301 if (usb_hcd) { 302 + whc_dbg_clean_up(whc); 303 wusbhc_b_destroy(wusbhc); 304 usb_remove_hcd(usb_hcd); 305 wusbhc_destroy(wusbhc);
+5 -3
drivers/usb/host/whci/hw.c
··· 50 unsigned long flags; 51 dma_addr_t dma_addr; 52 int t; 53 54 mutex_lock(&whc->mutex); 55 ··· 62 dev_err(&whc->umc->dev, "generic command timeout (%04x/%04x)\n", 63 le_readl(whc->base + WUSBGENCMDSTS), 64 le_readl(whc->base + WUSBGENCMDPARAMS)); 65 - return -ETIMEDOUT; 66 } 67 68 if (addr) { ··· 82 whc->base + WUSBGENCMDSTS); 83 84 spin_unlock_irqrestore(&whc->lock, flags); 85 - 86 mutex_unlock(&whc->mutex); 87 88 - return 0; 89 }
··· 50 unsigned long flags; 51 dma_addr_t dma_addr; 52 int t; 53 + int ret = 0; 54 55 mutex_lock(&whc->mutex); 56 ··· 61 dev_err(&whc->umc->dev, "generic command timeout (%04x/%04x)\n", 62 le_readl(whc->base + WUSBGENCMDSTS), 63 le_readl(whc->base + WUSBGENCMDPARAMS)); 64 + ret = -ETIMEDOUT; 65 + goto out; 66 } 67 68 if (addr) { ··· 80 whc->base + WUSBGENCMDSTS); 81 82 spin_unlock_irqrestore(&whc->lock, flags); 83 + out: 84 mutex_unlock(&whc->mutex); 85 86 + return ret; 87 }
-1
drivers/usb/host/whci/int.c
··· 15 * You should have received a copy of the GNU General Public License 16 * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 */ 18 - #include <linux/version.h> 19 #include <linux/kernel.h> 20 #include <linux/init.h> 21 #include <linux/uwb/umc.h>
··· 15 * You should have received a copy of the GNU General Public License 16 * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 */ 18 #include <linux/kernel.h> 19 #include <linux/init.h> 20 #include <linux/uwb/umc.h>
+18 -31
drivers/usb/host/whci/pzl.c
··· 19 #include <linux/dma-mapping.h> 20 #include <linux/uwb/umc.h> 21 #include <linux/usb.h> 22 - #define D_LOCAL 0 23 - #include <linux/uwb/debug.h> 24 25 #include "../../wusbcore/wusbhc.h" 26 27 #include "whcd.h" 28 - 29 - #if D_LOCAL >= 4 30 - static void dump_pzl(struct whc *whc, const char *tag) 31 - { 32 - struct device *dev = &whc->umc->dev; 33 - struct whc_qset *qset; 34 - int period = 0; 35 - 36 - d_printf(4, dev, "PZL %s\n", tag); 37 - 38 - for (period = 0; period < 5; period++) { 39 - d_printf(4, dev, "Period %d\n", period); 40 - list_for_each_entry(qset, &whc->periodic_list[period], list_node) { 41 - dump_qset(qset, dev); 42 - } 43 - } 44 - } 45 - #else 46 - static inline void dump_pzl(struct whc *whc, const char *tag) 47 - { 48 - } 49 - #endif 50 51 static void update_pzl_pointers(struct whc *whc, int period, u64 addr) 52 { ··· 171 1000, "stop PZL"); 172 } 173 174 void pzl_update(struct whc *whc, uint32_t wusbcmd) 175 { 176 - whc_write_wusbcmd(whc, wusbcmd, wusbcmd); 177 - wait_event(whc->periodic_list_wq, 178 - (le_readl(whc->base + WUSBCMD) & WUSBCMD_PERIODIC_UPDATED) == 0); 179 } 180 181 static void update_pzl_hw_view(struct whc *whc) ··· 226 227 spin_lock_irq(&whc->lock); 228 229 - dump_pzl(whc, "before processing"); 230 - 231 for (period = 4; period >= 0; period--) { 232 list_for_each_entry_safe(qset, t, &whc->periodic_list[period], list_node) { 233 if (!qset->in_hw_list) ··· 236 237 if (update & (WHC_UPDATE_ADDED | WHC_UPDATE_REMOVED)) 238 update_pzl_hw_view(whc); 239 - 240 - dump_pzl(whc, "after processing"); 241 242 spin_unlock_irq(&whc->lock); 243
··· 19 #include <linux/dma-mapping.h> 20 #include <linux/uwb/umc.h> 21 #include <linux/usb.h> 22 23 #include "../../wusbcore/wusbhc.h" 24 25 #include "whcd.h" 26 27 static void update_pzl_pointers(struct whc *whc, int period, u64 addr) 28 { ··· 195 1000, "stop PZL"); 196 } 197 198 + /** 199 + * pzl_update - request a PZL update and wait for the hardware to be synced 200 + * @whc: the WHCI HC 201 + * @wusbcmd: WUSBCMD value to start the update. 202 + * 203 + * If the WUSB HC is inactive (i.e., the PZL is stopped) then the 204 + * update must be skipped as the hardware may not respond to update 205 + * requests. 206 + */ 207 void pzl_update(struct whc *whc, uint32_t wusbcmd) 208 { 209 + struct wusbhc *wusbhc = &whc->wusbhc; 210 + 211 + mutex_lock(&wusbhc->mutex); 212 + if (wusbhc->active) { 213 + whc_write_wusbcmd(whc, wusbcmd, wusbcmd); 214 + wait_event(whc->periodic_list_wq, 215 + (le_readl(whc->base + WUSBCMD) & WUSBCMD_PERIODIC_UPDATED) == 0); 216 + } 217 + mutex_unlock(&wusbhc->mutex); 218 } 219 220 static void update_pzl_hw_view(struct whc *whc) ··· 235 236 spin_lock_irq(&whc->lock); 237 238 for (period = 4; period >= 0; period--) { 239 list_for_each_entry_safe(qset, t, &whc->periodic_list[period], list_node) { 240 if (!qset->in_hw_list) ··· 247 248 if (update & (WHC_UPDATE_ADDED | WHC_UPDATE_REMOVED)) 249 update_pzl_hw_view(whc); 250 251 spin_unlock_irq(&whc->lock); 252
-40
drivers/usb/host/whci/qset.c
··· 24 25 #include "whcd.h" 26 27 - void dump_qset(struct whc_qset *qset, struct device *dev) 28 - { 29 - struct whc_std *std; 30 - struct urb *urb = NULL; 31 - int i; 32 - 33 - dev_dbg(dev, "qset %08x\n", (u32)qset->qset_dma); 34 - dev_dbg(dev, " -> %08x\n", (u32)qset->qh.link); 35 - dev_dbg(dev, " info: %08x %08x %08x\n", 36 - qset->qh.info1, qset->qh.info2, qset->qh.info3); 37 - dev_dbg(dev, " sts: %04x errs: %d\n", qset->qh.status, qset->qh.err_count); 38 - dev_dbg(dev, " TD: sts: %08x opts: %08x\n", 39 - qset->qh.overlay.qtd.status, qset->qh.overlay.qtd.options); 40 - 41 - for (i = 0; i < WHCI_QSET_TD_MAX; i++) { 42 - dev_dbg(dev, " %c%c TD[%d]: sts: %08x opts: %08x ptr: %08x\n", 43 - i == qset->td_start ? 'S' : ' ', 44 - i == qset->td_end ? 'E' : ' ', 45 - i, qset->qtd[i].status, qset->qtd[i].options, 46 - (u32)qset->qtd[i].page_list_ptr); 47 - } 48 - dev_dbg(dev, " ntds: %d\n", qset->ntds); 49 - list_for_each_entry(std, &qset->stds, list_node) { 50 - if (urb != std->urb) { 51 - urb = std->urb; 52 - dev_dbg(dev, " urb %p transferred: %d bytes\n", urb, 53 - urb->actual_length); 54 - } 55 - if (std->qtd) 56 - dev_dbg(dev, " sTD[%td]: %zu bytes @ %08x\n", 57 - std->qtd - &qset->qtd[0], 58 - std->len, std->num_pointers ? 59 - (u32)(std->pl_virt[0].buf_ptr) : (u32)std->dma_addr); 60 - else 61 - dev_dbg(dev, " sTD[-]: %zd bytes @ %08x\n", 62 - std->len, std->num_pointers ? 63 - (u32)(std->pl_virt[0].buf_ptr) : (u32)std->dma_addr); 64 - } 65 - } 66 - 67 struct whc_qset *qset_alloc(struct whc *whc, gfp_t mem_flags) 68 { 69 struct whc_qset *qset;
··· 24 25 #include "whcd.h" 26 27 struct whc_qset *qset_alloc(struct whc *whc, gfp_t mem_flags) 28 { 29 struct whc_qset *qset;
+9 -2
drivers/usb/host/whci/whcd.h
··· 21 #define __WHCD_H 22 23 #include <linux/uwb/whci.h> 24 #include <linux/workqueue.h> 25 26 #include "whci-hc.h" ··· 29 /* Generic command timeout. */ 30 #define WHC_GENCMD_TIMEOUT_MS 100 31 32 33 struct whc { 34 struct wusbhc wusbhc; ··· 71 struct list_head periodic_removed_list; 72 wait_queue_head_t periodic_list_wq; 73 struct work_struct periodic_work; 74 }; 75 76 #define wusbhc_to_whc(w) (container_of((w), struct whc, wusbhc)) ··· 140 141 /* wusb.c */ 142 int whc_wusbhc_start(struct wusbhc *wusbhc); 143 - void whc_wusbhc_stop(struct wusbhc *wusbhc); 144 int whc_mmcie_add(struct wusbhc *wusbhc, u8 interval, u8 repeat_cnt, 145 u8 handle, struct wuie_hdr *wuie); 146 int whc_mmcie_rm(struct wusbhc *wusbhc, u8 handle); ··· 194 struct whc_qtd *qtd); 195 enum whc_update qset_add_qtds(struct whc *whc, struct whc_qset *qset); 196 void qset_remove_complete(struct whc *whc, struct whc_qset *qset); 197 - void dump_qset(struct whc_qset *qset, struct device *dev); 198 void pzl_update(struct whc *whc, uint32_t wusbcmd); 199 void asl_update(struct whc *whc, uint32_t wusbcmd); 200 201 #endif /* #ifndef __WHCD_H */
··· 21 #define __WHCD_H 22 23 #include <linux/uwb/whci.h> 24 + #include <linux/uwb/umc.h> 25 #include <linux/workqueue.h> 26 27 #include "whci-hc.h" ··· 28 /* Generic command timeout. */ 29 #define WHC_GENCMD_TIMEOUT_MS 100 30 31 + struct whc_dbg; 32 33 struct whc { 34 struct wusbhc wusbhc; ··· 69 struct list_head periodic_removed_list; 70 wait_queue_head_t periodic_list_wq; 71 struct work_struct periodic_work; 72 + 73 + struct whc_dbg *dbg; 74 }; 75 76 #define wusbhc_to_whc(w) (container_of((w), struct whc, wusbhc)) ··· 136 137 /* wusb.c */ 138 int whc_wusbhc_start(struct wusbhc *wusbhc); 139 + void whc_wusbhc_stop(struct wusbhc *wusbhc, int delay); 140 int whc_mmcie_add(struct wusbhc *wusbhc, u8 interval, u8 repeat_cnt, 141 u8 handle, struct wuie_hdr *wuie); 142 int whc_mmcie_rm(struct wusbhc *wusbhc, u8 handle); ··· 190 struct whc_qtd *qtd); 191 enum whc_update qset_add_qtds(struct whc *whc, struct whc_qset *qset); 192 void qset_remove_complete(struct whc *whc, struct whc_qset *qset); 193 void pzl_update(struct whc *whc, uint32_t wusbcmd); 194 void asl_update(struct whc *whc, uint32_t wusbcmd); 195 + 196 + /* debug.c */ 197 + void whc_dbg_init(struct whc *whc); 198 + void whc_dbg_clean_up(struct whc *whc); 199 200 #endif /* #ifndef __WHCD_H */
+2
drivers/usb/host/whci/whci-hc.h
··· 410 # define WUSBDNTSCTRL_SLOTS(s) ((s) << 0) 411 412 #define WUSBTIME 0x68 413 #define WUSBBPST 0x6c 414 #define WUSBDIBUPDATED 0x70 415
··· 410 # define WUSBDNTSCTRL_SLOTS(s) ((s) << 0) 411 412 #define WUSBTIME 0x68 413 + # define WUSBTIME_CHANNEL_TIME_MASK 0x00ffffff 414 + 415 #define WUSBBPST 0x6c 416 #define WUSBDIBUPDATED 0x70 417
+12 -31
drivers/usb/host/whci/wusb.c
··· 15 * You should have received a copy of the GNU General Public License 16 * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 */ 18 - #include <linux/version.h> 19 #include <linux/kernel.h> 20 #include <linux/init.h> 21 #include <linux/uwb/umc.h> 22 - #define D_LOCAL 1 23 - #include <linux/uwb/debug.h> 24 25 #include "../../wusbcore/wusbhc.h" 26 27 #include "whcd.h" 28 29 - #if D_LOCAL >= 1 30 - static void dump_di(struct whc *whc, int idx) 31 - { 32 - struct di_buf_entry *di = &whc->di_buf[idx]; 33 - struct device *dev = &whc->umc->dev; 34 - char buf[128]; 35 - 36 - bitmap_scnprintf(buf, sizeof(buf), (unsigned long *)di->availability_info, UWB_NUM_MAS); 37 - 38 - d_printf(1, dev, "DI[%d]\n", idx); 39 - d_printf(1, dev, " availability: %s\n", buf); 40 - d_printf(1, dev, " %c%c key idx: %d dev addr: %d\n", 41 - (di->addr_sec_info & WHC_DI_SECURE) ? 'S' : ' ', 42 - (di->addr_sec_info & WHC_DI_DISABLE) ? 'D' : ' ', 43 - (di->addr_sec_info & WHC_DI_KEY_IDX_MASK) >> 8, 44 - (di->addr_sec_info & WHC_DI_DEV_ADDR_MASK)); 45 - } 46 - #else 47 - static inline void dump_di(struct whc *whc, int idx) 48 - { 49 - } 50 - #endif 51 - 52 static int whc_update_di(struct whc *whc, int idx) 53 { 54 int offset = idx / 32; 55 u32 bit = 1 << (idx % 32); 56 - 57 - dump_di(whc, idx); 58 59 le_writel(bit, whc->base + WUSBDIBUPDATED + offset); 60 ··· 36 } 37 38 /* 39 - * WHCI starts and stops MMCs based on there being a valid GTK so 40 - * these need only start/stop the asynchronous and periodic schedules. 41 */ 42 43 int whc_wusbhc_start(struct wusbhc *wusbhc) ··· 51 return 0; 52 } 53 54 - void whc_wusbhc_stop(struct wusbhc *wusbhc) 55 { 56 struct whc *whc = wusbhc_to_whc(wusbhc); 57 58 pzl_stop(whc); 59 asl_stop(whc); 60 } 61 62 int whc_mmcie_add(struct wusbhc *wusbhc, u8 interval, u8 repeat_cnt,
··· 15 * You should have received a copy of the GNU General Public License 16 * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 */ 18 #include <linux/kernel.h> 19 #include <linux/init.h> 20 #include <linux/uwb/umc.h> 21 22 #include "../../wusbcore/wusbhc.h" 23 24 #include "whcd.h" 25 26 static int whc_update_di(struct whc *whc, int idx) 27 { 28 int offset = idx / 32; 29 u32 bit = 1 << (idx % 32); 30 31 le_writel(bit, whc->base + WUSBDIBUPDATED + offset); 32 ··· 64 } 65 66 /* 67 + * WHCI starts MMCs based on there being a valid GTK so these need 68 + * only start/stop the asynchronous and periodic schedules and send a 69 + * channel stop command. 70 */ 71 72 int whc_wusbhc_start(struct wusbhc *wusbhc) ··· 78 return 0; 79 } 80 81 + void whc_wusbhc_stop(struct wusbhc *wusbhc, int delay) 82 { 83 struct whc *whc = wusbhc_to_whc(wusbhc); 84 + u32 stop_time, now_time; 85 + int ret; 86 87 pzl_stop(whc); 88 asl_stop(whc); 89 + 90 + now_time = le_readl(whc->base + WUSBTIME) & WUSBTIME_CHANNEL_TIME_MASK; 91 + stop_time = (now_time + ((delay * 8) << 7)) & 0x00ffffff; 92 + ret = whc_do_gencmd(whc, WUSBGENCMDSTS_CHAN_STOP, stop_time, NULL, 0); 93 + if (ret == 0) 94 + msleep(delay); 95 } 96 97 int whc_mmcie_add(struct wusbhc *wusbhc, u8 interval, u8 repeat_cnt,
-1
drivers/usb/wusbcore/cbaf.c
··· 88 */ 89 #include <linux/module.h> 90 #include <linux/ctype.h> 91 - #include <linux/version.h> 92 #include <linux/usb.h> 93 #include <linux/interrupt.h> 94 #include <linux/delay.h>
··· 88 */ 89 #include <linux/module.h> 90 #include <linux/ctype.h> 91 #include <linux/usb.h> 92 #include <linux/interrupt.h> 93 #include <linux/delay.h>
+29 -50
drivers/usb/wusbcore/crypto.c
··· 51 #include <linux/uwb.h> 52 #include <linux/usb/wusb.h> 53 #include <linux/scatterlist.h> 54 - #define D_LOCAL 0 55 - #include <linux/uwb/debug.h> 56 57 58 /* 59 * Block of data, as understood by AES-CCM ··· 211 const u8 bzero[16] = { 0 }; 212 size_t zero_padding; 213 214 - d_fnstart(3, NULL, "(tfm_cbc %p, tfm_aes %p, mic %p, " 215 - "n %p, a %p, b %p, blen %zu)\n", 216 - tfm_cbc, tfm_aes, mic, n, a, b, blen); 217 /* 218 * These checks should be compile time optimized out 219 * ensure @a fills b1's mac_header and following fields ··· 252 b1.la = cpu_to_be16(blen + 14); 253 memcpy(&b1.mac_header, a, sizeof(*a)); 254 255 - d_printf(4, NULL, "I: B0 (%zu bytes)\n", sizeof(b0)); 256 - d_dump(4, NULL, &b0, sizeof(b0)); 257 - d_printf(4, NULL, "I: B1 (%zu bytes)\n", sizeof(b1)); 258 - d_dump(4, NULL, &b1, sizeof(b1)); 259 - d_printf(4, NULL, "I: B (%zu bytes)\n", blen); 260 - d_dump(4, NULL, b, blen); 261 - d_printf(4, NULL, "I: B 0-padding (%zu bytes)\n", zero_padding); 262 - d_printf(4, NULL, "D: IV before crypto (%zu)\n", ivsize); 263 - d_dump(4, NULL, iv, ivsize); 264 - 265 sg_init_table(sg, ARRAY_SIZE(sg)); 266 sg_set_buf(&sg[0], &b0, sizeof(b0)); 267 sg_set_buf(&sg[1], &b1, sizeof(b1)); ··· 268 result); 269 goto error_cbc_crypt; 270 } 271 - d_printf(4, NULL, "D: MIC tag\n"); 272 - d_dump(4, NULL, iv, ivsize); 273 274 /* Now we crypt the MIC Tag (*iv) with Ax -- values per WUSB1.0[6.5] 275 * The procedure is to AES crypt the A0 block and XOR the MIC ··· 282 ax.counter = 0; 283 crypto_cipher_encrypt_one(tfm_aes, (void *)&ax, (void *)&ax); 284 bytewise_xor(mic, &ax, iv, 8); 285 - d_printf(4, NULL, "D: CTR[MIC]\n"); 286 - d_dump(4, NULL, &ax, 8); 287 - d_printf(4, NULL, "D: CCM-MIC tag\n"); 288 - d_dump(4, NULL, mic, 8); 289 result = 8; 290 error_cbc_crypt: 291 kfree(dst_buf); 292 error_dst_buf: 293 - d_fnend(3, NULL, "(tfm_cbc %p, tfm_aes %p, mic %p, " 294 - "n %p, a %p, b %p, blen %zu)\n", 295 - tfm_cbc, tfm_aes, mic, n, a, b, blen); 296 return result; 297 } 298 ··· 306 struct crypto_cipher *tfm_aes; 307 u64 sfn = 0; 308 __le64 sfn_le; 309 - 310 - d_fnstart(3, NULL, "(out %p, out_size %zu, key %p, _n %p, " 311 - "a %p, b %p, blen %zu, len %zu)\n", out, out_size, 312 - key, _n, a, b, blen, len); 313 314 tfm_cbc = crypto_alloc_blkcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC); 315 if (IS_ERR(tfm_cbc)) { ··· 348 error_setkey_cbc: 349 crypto_free_blkcipher(tfm_cbc); 350 error_alloc_cbc: 351 - d_fnend(3, NULL, "(out %p, out_size %zu, key %p, _n %p, " 352 - "a %p, b %p, blen %zu, len %zu) = %d\n", out, out_size, 353 - key, _n, a, b, blen, len, (int)bytes); 354 return result; 355 } 356 ··· 401 "mismatch between MIC result and WUSB1.0[A2]\n"); 402 hs_size = sizeof(stv_hsmic_hs) - sizeof(stv_hsmic_hs.MIC); 403 printk(KERN_ERR "E: Handshake2 in: (%zu bytes)\n", hs_size); 404 - dump_bytes(NULL, &stv_hsmic_hs, hs_size); 405 printk(KERN_ERR "E: CCM Nonce in: (%zu bytes)\n", 406 sizeof(stv_hsmic_n)); 407 - dump_bytes(NULL, &stv_hsmic_n, sizeof(stv_hsmic_n)); 408 printk(KERN_ERR "E: MIC out:\n"); 409 - dump_bytes(NULL, mic, sizeof(mic)); 410 printk(KERN_ERR "E: MIC out (from WUSB1.0[A.2]):\n"); 411 - dump_bytes(NULL, stv_hsmic_hs.MIC, sizeof(stv_hsmic_hs.MIC)); 412 result = -EINVAL; 413 } else 414 result = 0; ··· 476 printk(KERN_ERR "E: WUSB key derivation test: " 477 "mismatch between key derivation result " 478 "and WUSB1.0[A1] Errata 2006/12\n"); 479 - printk(KERN_ERR "E: keydvt in: key (%zu bytes)\n", 480 - sizeof(stv_key_a1)); 481 - dump_bytes(NULL, stv_key_a1, sizeof(stv_key_a1)); 482 - printk(KERN_ERR "E: keydvt in: nonce (%zu bytes)\n", 483 - sizeof(stv_keydvt_n_a1)); 484 - dump_bytes(NULL, &stv_keydvt_n_a1, sizeof(stv_keydvt_n_a1)); 485 - printk(KERN_ERR "E: keydvt in: hnonce & dnonce (%zu bytes)\n", 486 - sizeof(stv_keydvt_in_a1)); 487 - dump_bytes(NULL, &stv_keydvt_in_a1, sizeof(stv_keydvt_in_a1)); 488 printk(KERN_ERR "E: keydvt out: KCK\n"); 489 - dump_bytes(NULL, &keydvt_out.kck, sizeof(keydvt_out.kck)); 490 printk(KERN_ERR "E: keydvt out: PTK\n"); 491 - dump_bytes(NULL, &keydvt_out.ptk, sizeof(keydvt_out.ptk)); 492 result = -EINVAL; 493 } else 494 result = 0; ··· 502 { 503 int result; 504 505 - result = wusb_key_derive_verify(); 506 - if (result < 0) 507 - return result; 508 - return wusb_oob_mic_verify(); 509 } 510 511 void wusb_crypto_exit(void)
··· 51 #include <linux/uwb.h> 52 #include <linux/usb/wusb.h> 53 #include <linux/scatterlist.h> 54 55 + static int debug_crypto_verify = 0; 56 + 57 + module_param(debug_crypto_verify, int, 0); 58 + MODULE_PARM_DESC(debug_crypto_verify, "verify the key generation algorithms"); 59 + 60 + static void wusb_key_dump(const void *buf, size_t len) 61 + { 62 + print_hex_dump(KERN_ERR, " ", DUMP_PREFIX_OFFSET, 16, 1, 63 + buf, len, 0); 64 + } 65 66 /* 67 * Block of data, as understood by AES-CCM ··· 203 const u8 bzero[16] = { 0 }; 204 size_t zero_padding; 205 206 /* 207 * These checks should be compile time optimized out 208 * ensure @a fills b1's mac_header and following fields ··· 247 b1.la = cpu_to_be16(blen + 14); 248 memcpy(&b1.mac_header, a, sizeof(*a)); 249 250 sg_init_table(sg, ARRAY_SIZE(sg)); 251 sg_set_buf(&sg[0], &b0, sizeof(b0)); 252 sg_set_buf(&sg[1], &b1, sizeof(b1)); ··· 273 result); 274 goto error_cbc_crypt; 275 } 276 277 /* Now we crypt the MIC Tag (*iv) with Ax -- values per WUSB1.0[6.5] 278 * The procedure is to AES crypt the A0 block and XOR the MIC ··· 289 ax.counter = 0; 290 crypto_cipher_encrypt_one(tfm_aes, (void *)&ax, (void *)&ax); 291 bytewise_xor(mic, &ax, iv, 8); 292 result = 8; 293 error_cbc_crypt: 294 kfree(dst_buf); 295 error_dst_buf: 296 return result; 297 } 298 ··· 320 struct crypto_cipher *tfm_aes; 321 u64 sfn = 0; 322 __le64 sfn_le; 323 324 tfm_cbc = crypto_alloc_blkcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC); 325 if (IS_ERR(tfm_cbc)) { ··· 366 error_setkey_cbc: 367 crypto_free_blkcipher(tfm_cbc); 368 error_alloc_cbc: 369 return result; 370 } 371 ··· 422 "mismatch between MIC result and WUSB1.0[A2]\n"); 423 hs_size = sizeof(stv_hsmic_hs) - sizeof(stv_hsmic_hs.MIC); 424 printk(KERN_ERR "E: Handshake2 in: (%zu bytes)\n", hs_size); 425 + wusb_key_dump(&stv_hsmic_hs, hs_size); 426 printk(KERN_ERR "E: CCM Nonce in: (%zu bytes)\n", 427 sizeof(stv_hsmic_n)); 428 + wusb_key_dump(&stv_hsmic_n, sizeof(stv_hsmic_n)); 429 printk(KERN_ERR "E: MIC out:\n"); 430 + wusb_key_dump(mic, sizeof(mic)); 431 printk(KERN_ERR "E: MIC out (from WUSB1.0[A.2]):\n"); 432 + wusb_key_dump(stv_hsmic_hs.MIC, sizeof(stv_hsmic_hs.MIC)); 433 result = -EINVAL; 434 } else 435 result = 0; ··· 497 printk(KERN_ERR "E: WUSB key derivation test: " 498 "mismatch between key derivation result " 499 "and WUSB1.0[A1] Errata 2006/12\n"); 500 + printk(KERN_ERR "E: keydvt in: key\n"); 501 + wusb_key_dump(stv_key_a1, sizeof(stv_key_a1)); 502 + printk(KERN_ERR "E: keydvt in: nonce\n"); 503 + wusb_key_dump( &stv_keydvt_n_a1, sizeof(stv_keydvt_n_a1)); 504 + printk(KERN_ERR "E: keydvt in: hnonce & dnonce\n"); 505 + wusb_key_dump(&stv_keydvt_in_a1, sizeof(stv_keydvt_in_a1)); 506 printk(KERN_ERR "E: keydvt out: KCK\n"); 507 + wusb_key_dump(&keydvt_out.kck, sizeof(keydvt_out.kck)); 508 printk(KERN_ERR "E: keydvt out: PTK\n"); 509 + wusb_key_dump(&keydvt_out.ptk, sizeof(keydvt_out.ptk)); 510 result = -EINVAL; 511 } else 512 result = 0; ··· 526 { 527 int result; 528 529 + if (debug_crypto_verify) { 530 + result = wusb_key_derive_verify(); 531 + if (result < 0) 532 + return result; 533 + return wusb_oob_mic_verify(); 534 + } 535 + return 0; 536 } 537 538 void wusb_crypto_exit(void)
-4
drivers/usb/wusbcore/dev-sysfs.c
··· 28 #include <linux/workqueue.h> 29 #include "wusbhc.h" 30 31 - #undef D_LOCAL 32 - #define D_LOCAL 4 33 - #include <linux/uwb/debug.h> 34 - 35 static ssize_t wusb_disconnect_store(struct device *dev, 36 struct device_attribute *attr, 37 const char *buf, size_t size)
··· 28 #include <linux/workqueue.h> 29 #include "wusbhc.h" 30 31 static ssize_t wusb_disconnect_store(struct device *dev, 32 struct device_attribute *attr, 33 const char *buf, size_t size)
+27 -204
drivers/usb/wusbcore/devconnect.c
··· 57 * Called by notif.c:wusb_handle_dn_connect() 58 * when a DN_Connect is received. 59 * 60 - * wusbhc_devconnect_auth() Called by rh.c:wusbhc_rh_port_reset() when 61 - * doing the device connect sequence. 62 - * 63 * wusb_devconnect_acked() Ack done, release resources. 64 * 65 * wusb_handle_dn_alive() Called by notif.c:wusb_handle_dn() ··· 65 * wusb_handle_dn_disconnect()Called by notif.c:wusb_handle_dn() to 66 * process a disconenct request from a 67 * device. 68 - * 69 - * wusb_dev_reset() Called by rh.c:wusbhc_rh_port_reset() when 70 - * resetting a device. 71 * 72 * __wusb_dev_disable() Called by rh.c:wusbhc_rh_clear_port_feat() when 73 * disabling a port. ··· 90 #include <linux/ctype.h> 91 #include <linux/workqueue.h> 92 #include "wusbhc.h" 93 - 94 - #undef D_LOCAL 95 - #define D_LOCAL 1 96 - #include <linux/uwb/debug.h> 97 98 static void wusbhc_devconnect_acked_work(struct work_struct *work); 99 ··· 230 list_add_tail(&wusb_dev->cack_node, &wusbhc->cack_list); 231 wusbhc->cack_count++; 232 wusbhc_fill_cack_ie(wusbhc); 233 return wusb_dev; 234 } 235 ··· 241 */ 242 static void wusbhc_cack_rm(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) 243 { 244 - struct device *dev = wusbhc->dev; 245 - d_fnstart(3, dev, "(wusbhc %p wusb_dev %p)\n", wusbhc, wusb_dev); 246 list_del_init(&wusb_dev->cack_node); 247 wusbhc->cack_count--; 248 wusbhc_fill_cack_ie(wusbhc); 249 - d_fnend(3, dev, "(wusbhc %p wusb_dev %p) = void\n", wusbhc, wusb_dev); 250 } 251 252 /* ··· 251 static 252 void wusbhc_devconnect_acked(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) 253 { 254 - struct device *dev = wusbhc->dev; 255 - d_fnstart(3, dev, "(wusbhc %p wusb_dev %p)\n", wusbhc, wusb_dev); 256 wusbhc_cack_rm(wusbhc, wusb_dev); 257 if (wusbhc->cack_count) 258 wusbhc_mmcie_set(wusbhc, 0, 0, &wusbhc->cack_ie.hdr); 259 else 260 wusbhc_mmcie_rm(wusbhc, &wusbhc->cack_ie.hdr); 261 - d_fnend(3, dev, "(wusbhc %p wusb_dev %p) = void\n", wusbhc, wusb_dev); 262 } 263 264 static void wusbhc_devconnect_acked_work(struct work_struct *work) ··· 305 struct wusb_port *port; 306 unsigned idx, devnum; 307 308 - d_fnstart(3, dev, "(%p, %p, %s)\n", wusbhc, dnc, pr_cdid); 309 mutex_lock(&wusbhc->mutex); 310 311 /* Check we are not handling it already */ ··· 350 port->wusb_dev = wusb_dev; 351 port->status |= USB_PORT_STAT_CONNECTION; 352 port->change |= USB_PORT_STAT_C_CONNECTION; 353 - port->reset_count = 0; 354 /* Now the port status changed to connected; khubd will 355 * pick the change up and try to reset the port to bring it to 356 * the enabled state--so this process returns up to the stack 357 - * and it calls back into wusbhc_rh_port_reset() who will call 358 - * devconnect_auth(). 359 */ 360 error_unlock: 361 mutex_unlock(&wusbhc->mutex); 362 - d_fnend(3, dev, "(%p, %p, %s) = void\n", wusbhc, dnc, pr_cdid); 363 return; 364 365 } ··· 379 static void __wusbhc_dev_disconnect(struct wusbhc *wusbhc, 380 struct wusb_port *port) 381 { 382 - struct device *dev = wusbhc->dev; 383 struct wusb_dev *wusb_dev = port->wusb_dev; 384 385 - d_fnstart(3, dev, "(wusbhc %p, port %p)\n", wusbhc, port); 386 port->status &= ~(USB_PORT_STAT_CONNECTION | USB_PORT_STAT_ENABLE 387 | USB_PORT_STAT_SUSPEND | USB_PORT_STAT_RESET 388 | USB_PORT_STAT_LOW_SPEED | USB_PORT_STAT_HIGH_SPEED); ··· 392 wusb_dev_put(wusb_dev); 393 } 394 port->wusb_dev = NULL; 395 - /* don't reset the reset_count to zero or wusbhc_rh_port_reset will get 396 - * confused! We only reset to zero when we connect a new device. 397 - */ 398 399 /* After a device disconnects, change the GTK (see [WUSB] 400 * section 6.2.11.2). */ 401 wusbhc_gtk_rekey(wusbhc); 402 403 - d_fnend(3, dev, "(wusbhc %p, port %p) = void\n", wusbhc, port); 404 /* The Wireless USB part has forgotten about the device already; now 405 * khubd's timer will pick up the disconnection and remove the USB 406 * device from the system 407 */ 408 - } 409 - 410 - /* 411 - * Authenticate a device into the WUSB Cluster 412 - * 413 - * Called from the Root Hub code (rh.c:wusbhc_rh_port_reset()) when 414 - * asking for a reset on a port that is not enabled (ie: first connect 415 - * on the port). 416 - * 417 - * Performs the 4way handshake to allow the device to comunicate w/ the 418 - * WUSB Cluster securely; once done, issue a request to the device for 419 - * it to change to address 0. 420 - * 421 - * This mimics the reset step of Wired USB that once resetting a 422 - * device, leaves the port in enabled state and the dev with the 423 - * default address (0). 424 - * 425 - * WUSB1.0[7.1.2] 426 - * 427 - * @port_idx: port where the change happened--This is the index into 428 - * the wusbhc port array, not the USB port number. 429 - */ 430 - int wusbhc_devconnect_auth(struct wusbhc *wusbhc, u8 port_idx) 431 - { 432 - struct device *dev = wusbhc->dev; 433 - struct wusb_port *port = wusb_port_by_idx(wusbhc, port_idx); 434 - 435 - d_fnstart(3, dev, "(%p, %u)\n", wusbhc, port_idx); 436 - port->status &= ~USB_PORT_STAT_RESET; 437 - port->status |= USB_PORT_STAT_ENABLE; 438 - port->change |= USB_PORT_STAT_C_RESET | USB_PORT_STAT_C_ENABLE; 439 - d_fnend(3, dev, "(%p, %u) = 0\n", wusbhc, port_idx); 440 - return 0; 441 } 442 443 /* ··· 470 */ 471 static void wusbhc_keep_alive_run(struct work_struct *ws) 472 { 473 - struct delayed_work *dw = 474 - container_of(ws, struct delayed_work, work); 475 - struct wusbhc *wusbhc = 476 - container_of(dw, struct wusbhc, keep_alive_timer); 477 478 - d_fnstart(5, wusbhc->dev, "(wusbhc %p)\n", wusbhc); 479 - if (wusbhc->active) { 480 - mutex_lock(&wusbhc->mutex); 481 - __wusbhc_keep_alive(wusbhc); 482 - mutex_unlock(&wusbhc->mutex); 483 - queue_delayed_work(wusbd, &wusbhc->keep_alive_timer, 484 - (wusbhc->trust_timeout * CONFIG_HZ)/1000/2); 485 - } 486 - d_fnend(5, wusbhc->dev, "(wusbhc %p) = void\n", wusbhc); 487 - return; 488 } 489 490 /* ··· 521 */ 522 static void wusbhc_handle_dn_alive(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) 523 { 524 - struct device *dev = wusbhc->dev; 525 - 526 - d_printf(2, dev, "DN ALIVE: device 0x%02x pong\n", wusb_dev->addr); 527 - 528 mutex_lock(&wusbhc->mutex); 529 wusb_dev->entry_ts = jiffies; 530 __wusbhc_keep_alive(wusbhc); ··· 553 "no-beacon" 554 }; 555 556 - d_fnstart(3, dev, "(%p, %p, %zu)\n", wusbhc, dn_hdr, size); 557 if (size < sizeof(*dnc)) { 558 dev_err(dev, "DN CONNECT: short notification (%zu < %zu)\n", 559 size, sizeof(*dnc)); 560 - goto out; 561 } 562 563 dnc = container_of(dn_hdr, struct wusb_dn_connect, hdr); ··· 568 wusb_dn_connect_new_connection(dnc) ? "connect" : "reconnect"); 569 /* ACK the connect */ 570 wusbhc_devconnect_ack(wusbhc, dnc, pr_cdid); 571 - out: 572 - d_fnend(3, dev, "(%p, %p, %zu) = void\n", 573 - wusbhc, dn_hdr, size); 574 - return; 575 } 576 577 /* ··· 586 mutex_lock(&wusbhc->mutex); 587 __wusbhc_dev_disconnect(wusbhc, wusb_port_by_idx(wusbhc, wusb_dev->port_idx)); 588 mutex_unlock(&wusbhc->mutex); 589 - } 590 - 591 - /* 592 - * Reset a WUSB device on a HWA 593 - * 594 - * @wusbhc 595 - * @port_idx Index of the port where the device is 596 - * 597 - * In Wireless USB, a reset is more or less equivalent to a full 598 - * disconnect; so we just do a full disconnect and send the device a 599 - * Device Reset IE (WUSB1.0[7.5.11]) giving it a few millisecs (6 MMCs). 600 - * 601 - * @wusbhc should be refcounted and unlocked 602 - */ 603 - int wusbhc_dev_reset(struct wusbhc *wusbhc, u8 port_idx) 604 - { 605 - int result; 606 - struct device *dev = wusbhc->dev; 607 - struct wusb_dev *wusb_dev; 608 - struct wuie_reset *ie; 609 - 610 - d_fnstart(3, dev, "(%p, %u)\n", wusbhc, port_idx); 611 - mutex_lock(&wusbhc->mutex); 612 - result = 0; 613 - wusb_dev = wusb_port_by_idx(wusbhc, port_idx)->wusb_dev; 614 - if (wusb_dev == NULL) { 615 - /* reset no device? ignore */ 616 - dev_dbg(dev, "RESET: no device at port %u, ignoring\n", 617 - port_idx); 618 - goto error_unlock; 619 - } 620 - result = -ENOMEM; 621 - ie = kzalloc(sizeof(*ie), GFP_KERNEL); 622 - if (ie == NULL) 623 - goto error_unlock; 624 - ie->hdr.bLength = sizeof(ie->hdr) + sizeof(ie->CDID); 625 - ie->hdr.bIEIdentifier = WUIE_ID_RESET_DEVICE; 626 - ie->CDID = wusb_dev->cdid; 627 - result = wusbhc_mmcie_set(wusbhc, 0xff, 6, &ie->hdr); 628 - if (result < 0) { 629 - dev_err(dev, "RESET: cant's set MMC: %d\n", result); 630 - goto error_kfree; 631 - } 632 - __wusbhc_dev_disconnect(wusbhc, wusb_port_by_idx(wusbhc, port_idx)); 633 - 634 - /* 120ms, hopefully 6 MMCs (FIXME) */ 635 - msleep(120); 636 - wusbhc_mmcie_rm(wusbhc, &ie->hdr); 637 - error_kfree: 638 - kfree(ie); 639 - error_unlock: 640 - mutex_unlock(&wusbhc->mutex); 641 - d_fnend(3, dev, "(%p, %u) = %d\n", wusbhc, port_idx, result); 642 - return result; 643 } 644 645 /* ··· 608 struct device *dev = wusbhc->dev; 609 struct wusb_dev *wusb_dev; 610 611 - d_fnstart(3, dev, "(%p, %p)\n", wusbhc, dn_hdr); 612 - 613 if (size < sizeof(struct wusb_dn_hdr)) { 614 dev_err(dev, "DN data shorter than DN header (%d < %d)\n", 615 (int)size, (int)sizeof(struct wusb_dn_hdr)); 616 - goto out; 617 } 618 619 wusb_dev = wusbhc_find_dev_by_addr(wusbhc, srcaddr); 620 if (wusb_dev == NULL && dn_hdr->bType != WUSB_DN_CONNECT) { 621 dev_dbg(dev, "ignoring DN %d from unconnected device %02x\n", 622 dn_hdr->bType, srcaddr); 623 - goto out; 624 } 625 626 switch (dn_hdr->bType) { ··· 643 dev_warn(dev, "unknown DN %u (%d octets) from %u\n", 644 dn_hdr->bType, (int)size, srcaddr); 645 } 646 - out: 647 - d_fnend(3, dev, "(%p, %p) = void\n", wusbhc, dn_hdr); 648 - return; 649 } 650 EXPORT_SYMBOL_GPL(wusbhc_handle_dn); 651 ··· 672 struct wusb_dev *wusb_dev; 673 struct wuie_disconnect *ie; 674 675 - d_fnstart(3, dev, "(%p, %u)\n", wusbhc, port_idx); 676 - result = 0; 677 wusb_dev = wusb_port_by_idx(wusbhc, port_idx)->wusb_dev; 678 if (wusb_dev == NULL) { 679 /* reset no device? ignore */ 680 dev_dbg(dev, "DISCONNECT: no device at port %u, ignoring\n", 681 port_idx); 682 - goto error; 683 } 684 __wusbhc_dev_disconnect(wusbhc, wusb_port_by_idx(wusbhc, port_idx)); 685 686 - result = -ENOMEM; 687 ie = kzalloc(sizeof(*ie), GFP_KERNEL); 688 if (ie == NULL) 689 - goto error; 690 ie->hdr.bLength = sizeof(*ie); 691 ie->hdr.bIEIdentifier = WUIE_ID_DEVICE_DISCONNECT; 692 ie->bDeviceAddress = wusb_dev->addr; 693 result = wusbhc_mmcie_set(wusbhc, 0, 0, &ie->hdr); 694 - if (result < 0) { 695 dev_err(dev, "DISCONNECT: can't set MMC: %d\n", result); 696 - goto error_kfree; 697 } 698 - 699 - /* 120ms, hopefully 6 MMCs */ 700 - msleep(100); 701 - wusbhc_mmcie_rm(wusbhc, &ie->hdr); 702 - error_kfree: 703 kfree(ie); 704 - error: 705 - d_fnend(3, dev, "(%p, %u) = %d\n", wusbhc, port_idx, result); 706 - return; 707 - } 708 - 709 - static void wusb_cap_descr_printf(const unsigned level, struct device *dev, 710 - const struct usb_wireless_cap_descriptor *wcd) 711 - { 712 - d_printf(level, dev, 713 - "WUSB Capability Descriptor\n" 714 - " bDevCapabilityType 0x%02x\n" 715 - " bmAttributes 0x%02x\n" 716 - " wPhyRates 0x%04x\n" 717 - " bmTFITXPowerInfo 0x%02x\n" 718 - " bmFFITXPowerInfo 0x%02x\n" 719 - " bmBandGroup 0x%04x\n" 720 - " bReserved 0x%02x\n", 721 - wcd->bDevCapabilityType, 722 - wcd->bmAttributes, 723 - le16_to_cpu(wcd->wPHYRates), 724 - wcd->bmTFITXPowerInfo, 725 - wcd->bmFFITXPowerInfo, 726 - wcd->bmBandGroup, 727 - wcd->bReserved); 728 } 729 730 /* ··· 738 } 739 cap_size = cap_hdr->bLength; 740 cap_type = cap_hdr->bDevCapabilityType; 741 - d_printf(4, dev, "BOS Capability: 0x%02x (%zu bytes)\n", 742 - cap_type, cap_size); 743 if (cap_size == 0) 744 break; 745 if (cap_size > top - itr) { ··· 749 result = -EBADF; 750 goto error_bad_cap; 751 } 752 - d_dump(3, dev, itr, cap_size); 753 switch (cap_type) { 754 case USB_CAP_TYPE_WIRELESS_USB: 755 if (cap_size != sizeof(*wusb_dev->wusb_cap_descr)) ··· 756 "descriptor is %zu bytes vs %zu " 757 "needed\n", cap_size, 758 sizeof(*wusb_dev->wusb_cap_descr)); 759 - else { 760 wusb_dev->wusb_cap_descr = itr; 761 - wusb_cap_descr_printf(3, dev, itr); 762 - } 763 break; 764 default: 765 dev_err(dev, "BUG? Unknown BOS capability 0x%02x " ··· 822 "%zu bytes): %zd\n", desc_size, result); 823 goto error_get_descriptor; 824 } 825 - d_printf(2, dev, "Got BOS descriptor %zd bytes, %u capabilities\n", 826 - result, bos->bNumDeviceCaps); 827 - d_dump(2, dev, bos, result); 828 result = wusb_dev_bos_grok(usb_dev, wusb_dev, bos, result); 829 if (result < 0) 830 goto error_bad_bos; ··· 888 if (usb_dev->wusb == 0 || usb_dev->devnum == 1) 889 return; /* skip non wusb and wusb RHs */ 890 891 - d_fnstart(3, dev, "(usb_dev %p)\n", usb_dev); 892 - 893 wusbhc = wusbhc_get_by_usb_dev(usb_dev); 894 if (wusbhc == NULL) 895 goto error_nodev; ··· 917 wusb_dev_put(wusb_dev); 918 wusbhc_put(wusbhc); 919 error_nodev: 920 - d_fnend(3, dev, "(usb_dev %p) = void\n", usb_dev); 921 return; 922 923 wusb_dev_sysfs_rm(wusb_dev); ··· 1003 1004 void wusb_dev_destroy(struct kref *_wusb_dev) 1005 { 1006 - struct wusb_dev *wusb_dev 1007 - = container_of(_wusb_dev, struct wusb_dev, refcnt); 1008 list_del_init(&wusb_dev->cack_node); 1009 wusb_dev_free(wusb_dev); 1010 - d_fnend(1, NULL, "%s (wusb_dev %p) = void\n", __func__, wusb_dev); 1011 } 1012 EXPORT_SYMBOL_GPL(wusb_dev_destroy); 1013 ··· 1018 */ 1019 int wusbhc_devconnect_create(struct wusbhc *wusbhc) 1020 { 1021 - d_fnstart(3, wusbhc->dev, "(wusbhc %p)\n", wusbhc); 1022 - 1023 wusbhc->keep_alive_ie.hdr.bIEIdentifier = WUIE_ID_KEEP_ALIVE; 1024 wusbhc->keep_alive_ie.hdr.bLength = sizeof(wusbhc->keep_alive_ie.hdr); 1025 INIT_DELAYED_WORK(&wusbhc->keep_alive_timer, wusbhc_keep_alive_run); ··· 1026 wusbhc->cack_ie.hdr.bLength = sizeof(wusbhc->cack_ie.hdr); 1027 INIT_LIST_HEAD(&wusbhc->cack_list); 1028 1029 - d_fnend(3, wusbhc->dev, "(wusbhc %p) = void\n", wusbhc); 1030 return 0; 1031 } 1032 ··· 1034 */ 1035 void wusbhc_devconnect_destroy(struct wusbhc *wusbhc) 1036 { 1037 - d_fnstart(3, wusbhc->dev, "(wusbhc %p)\n", wusbhc); 1038 - d_fnend(3, wusbhc->dev, "(wusbhc %p) = void\n", wusbhc); 1039 } 1040 1041 /* ··· 1046 * FIXME: This also enables the keep alives but this is not necessary 1047 * until there are connected and authenticated devices. 1048 */ 1049 - int wusbhc_devconnect_start(struct wusbhc *wusbhc, 1050 - const struct wusb_ckhdid *chid) 1051 { 1052 struct device *dev = wusbhc->dev; 1053 struct wuie_host_info *hi; ··· 1059 hi->hdr.bLength = sizeof(*hi); 1060 hi->hdr.bIEIdentifier = WUIE_ID_HOST_INFO; 1061 hi->attributes = cpu_to_le16((wusbhc->rsv->stream << 3) | WUIE_HI_CAP_ALL); 1062 - hi->CHID = *chid; 1063 result = wusbhc_mmcie_set(wusbhc, 0, 0, &hi->hdr); 1064 if (result < 0) { 1065 dev_err(dev, "Cannot add Host Info MMCIE: %d\n", result);
··· 57 * Called by notif.c:wusb_handle_dn_connect() 58 * when a DN_Connect is received. 59 * 60 * wusb_devconnect_acked() Ack done, release resources. 61 * 62 * wusb_handle_dn_alive() Called by notif.c:wusb_handle_dn() ··· 68 * wusb_handle_dn_disconnect()Called by notif.c:wusb_handle_dn() to 69 * process a disconenct request from a 70 * device. 71 * 72 * __wusb_dev_disable() Called by rh.c:wusbhc_rh_clear_port_feat() when 73 * disabling a port. ··· 96 #include <linux/ctype.h> 97 #include <linux/workqueue.h> 98 #include "wusbhc.h" 99 100 static void wusbhc_devconnect_acked_work(struct work_struct *work); 101 ··· 240 list_add_tail(&wusb_dev->cack_node, &wusbhc->cack_list); 241 wusbhc->cack_count++; 242 wusbhc_fill_cack_ie(wusbhc); 243 + 244 return wusb_dev; 245 } 246 ··· 250 */ 251 static void wusbhc_cack_rm(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) 252 { 253 list_del_init(&wusb_dev->cack_node); 254 wusbhc->cack_count--; 255 wusbhc_fill_cack_ie(wusbhc); 256 } 257 258 /* ··· 263 static 264 void wusbhc_devconnect_acked(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) 265 { 266 wusbhc_cack_rm(wusbhc, wusb_dev); 267 if (wusbhc->cack_count) 268 wusbhc_mmcie_set(wusbhc, 0, 0, &wusbhc->cack_ie.hdr); 269 else 270 wusbhc_mmcie_rm(wusbhc, &wusbhc->cack_ie.hdr); 271 } 272 273 static void wusbhc_devconnect_acked_work(struct work_struct *work) ··· 320 struct wusb_port *port; 321 unsigned idx, devnum; 322 323 mutex_lock(&wusbhc->mutex); 324 325 /* Check we are not handling it already */ ··· 366 port->wusb_dev = wusb_dev; 367 port->status |= USB_PORT_STAT_CONNECTION; 368 port->change |= USB_PORT_STAT_C_CONNECTION; 369 /* Now the port status changed to connected; khubd will 370 * pick the change up and try to reset the port to bring it to 371 * the enabled state--so this process returns up to the stack 372 + * and it calls back into wusbhc_rh_port_reset(). 373 */ 374 error_unlock: 375 mutex_unlock(&wusbhc->mutex); 376 return; 377 378 } ··· 398 static void __wusbhc_dev_disconnect(struct wusbhc *wusbhc, 399 struct wusb_port *port) 400 { 401 struct wusb_dev *wusb_dev = port->wusb_dev; 402 403 port->status &= ~(USB_PORT_STAT_CONNECTION | USB_PORT_STAT_ENABLE 404 | USB_PORT_STAT_SUSPEND | USB_PORT_STAT_RESET 405 | USB_PORT_STAT_LOW_SPEED | USB_PORT_STAT_HIGH_SPEED); ··· 413 wusb_dev_put(wusb_dev); 414 } 415 port->wusb_dev = NULL; 416 417 /* After a device disconnects, change the GTK (see [WUSB] 418 * section 6.2.11.2). */ 419 wusbhc_gtk_rekey(wusbhc); 420 421 /* The Wireless USB part has forgotten about the device already; now 422 * khubd's timer will pick up the disconnection and remove the USB 423 * device from the system 424 */ 425 } 426 427 /* ··· 528 */ 529 static void wusbhc_keep_alive_run(struct work_struct *ws) 530 { 531 + struct delayed_work *dw = container_of(ws, struct delayed_work, work); 532 + struct wusbhc *wusbhc = container_of(dw, struct wusbhc, keep_alive_timer); 533 534 + mutex_lock(&wusbhc->mutex); 535 + __wusbhc_keep_alive(wusbhc); 536 + mutex_unlock(&wusbhc->mutex); 537 + 538 + queue_delayed_work(wusbd, &wusbhc->keep_alive_timer, 539 + msecs_to_jiffies(wusbhc->trust_timeout / 2)); 540 } 541 542 /* ··· 585 */ 586 static void wusbhc_handle_dn_alive(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) 587 { 588 mutex_lock(&wusbhc->mutex); 589 wusb_dev->entry_ts = jiffies; 590 __wusbhc_keep_alive(wusbhc); ··· 621 "no-beacon" 622 }; 623 624 if (size < sizeof(*dnc)) { 625 dev_err(dev, "DN CONNECT: short notification (%zu < %zu)\n", 626 size, sizeof(*dnc)); 627 + return; 628 } 629 630 dnc = container_of(dn_hdr, struct wusb_dn_connect, hdr); ··· 637 wusb_dn_connect_new_connection(dnc) ? "connect" : "reconnect"); 638 /* ACK the connect */ 639 wusbhc_devconnect_ack(wusbhc, dnc, pr_cdid); 640 } 641 642 /* ··· 659 mutex_lock(&wusbhc->mutex); 660 __wusbhc_dev_disconnect(wusbhc, wusb_port_by_idx(wusbhc, wusb_dev->port_idx)); 661 mutex_unlock(&wusbhc->mutex); 662 } 663 664 /* ··· 735 struct device *dev = wusbhc->dev; 736 struct wusb_dev *wusb_dev; 737 738 if (size < sizeof(struct wusb_dn_hdr)) { 739 dev_err(dev, "DN data shorter than DN header (%d < %d)\n", 740 (int)size, (int)sizeof(struct wusb_dn_hdr)); 741 + return; 742 } 743 744 wusb_dev = wusbhc_find_dev_by_addr(wusbhc, srcaddr); 745 if (wusb_dev == NULL && dn_hdr->bType != WUSB_DN_CONNECT) { 746 dev_dbg(dev, "ignoring DN %d from unconnected device %02x\n", 747 dn_hdr->bType, srcaddr); 748 + return; 749 } 750 751 switch (dn_hdr->bType) { ··· 772 dev_warn(dev, "unknown DN %u (%d octets) from %u\n", 773 dn_hdr->bType, (int)size, srcaddr); 774 } 775 } 776 EXPORT_SYMBOL_GPL(wusbhc_handle_dn); 777 ··· 804 struct wusb_dev *wusb_dev; 805 struct wuie_disconnect *ie; 806 807 wusb_dev = wusb_port_by_idx(wusbhc, port_idx)->wusb_dev; 808 if (wusb_dev == NULL) { 809 /* reset no device? ignore */ 810 dev_dbg(dev, "DISCONNECT: no device at port %u, ignoring\n", 811 port_idx); 812 + return; 813 } 814 __wusbhc_dev_disconnect(wusbhc, wusb_port_by_idx(wusbhc, port_idx)); 815 816 ie = kzalloc(sizeof(*ie), GFP_KERNEL); 817 if (ie == NULL) 818 + return; 819 ie->hdr.bLength = sizeof(*ie); 820 ie->hdr.bIEIdentifier = WUIE_ID_DEVICE_DISCONNECT; 821 ie->bDeviceAddress = wusb_dev->addr; 822 result = wusbhc_mmcie_set(wusbhc, 0, 0, &ie->hdr); 823 + if (result < 0) 824 dev_err(dev, "DISCONNECT: can't set MMC: %d\n", result); 825 + else { 826 + /* At least 6 MMCs, assuming at least 1 MMC per zone. */ 827 + msleep(7*4); 828 + wusbhc_mmcie_rm(wusbhc, &ie->hdr); 829 } 830 kfree(ie); 831 } 832 833 /* ··· 899 } 900 cap_size = cap_hdr->bLength; 901 cap_type = cap_hdr->bDevCapabilityType; 902 if (cap_size == 0) 903 break; 904 if (cap_size > top - itr) { ··· 912 result = -EBADF; 913 goto error_bad_cap; 914 } 915 switch (cap_type) { 916 case USB_CAP_TYPE_WIRELESS_USB: 917 if (cap_size != sizeof(*wusb_dev->wusb_cap_descr)) ··· 920 "descriptor is %zu bytes vs %zu " 921 "needed\n", cap_size, 922 sizeof(*wusb_dev->wusb_cap_descr)); 923 + else 924 wusb_dev->wusb_cap_descr = itr; 925 break; 926 default: 927 dev_err(dev, "BUG? Unknown BOS capability 0x%02x " ··· 988 "%zu bytes): %zd\n", desc_size, result); 989 goto error_get_descriptor; 990 } 991 + 992 result = wusb_dev_bos_grok(usb_dev, wusb_dev, bos, result); 993 if (result < 0) 994 goto error_bad_bos; ··· 1056 if (usb_dev->wusb == 0 || usb_dev->devnum == 1) 1057 return; /* skip non wusb and wusb RHs */ 1058 1059 wusbhc = wusbhc_get_by_usb_dev(usb_dev); 1060 if (wusbhc == NULL) 1061 goto error_nodev; ··· 1087 wusb_dev_put(wusb_dev); 1088 wusbhc_put(wusbhc); 1089 error_nodev: 1090 return; 1091 1092 wusb_dev_sysfs_rm(wusb_dev); ··· 1174 1175 void wusb_dev_destroy(struct kref *_wusb_dev) 1176 { 1177 + struct wusb_dev *wusb_dev = container_of(_wusb_dev, struct wusb_dev, refcnt); 1178 + 1179 list_del_init(&wusb_dev->cack_node); 1180 wusb_dev_free(wusb_dev); 1181 } 1182 EXPORT_SYMBOL_GPL(wusb_dev_destroy); 1183 ··· 1190 */ 1191 int wusbhc_devconnect_create(struct wusbhc *wusbhc) 1192 { 1193 wusbhc->keep_alive_ie.hdr.bIEIdentifier = WUIE_ID_KEEP_ALIVE; 1194 wusbhc->keep_alive_ie.hdr.bLength = sizeof(wusbhc->keep_alive_ie.hdr); 1195 INIT_DELAYED_WORK(&wusbhc->keep_alive_timer, wusbhc_keep_alive_run); ··· 1200 wusbhc->cack_ie.hdr.bLength = sizeof(wusbhc->cack_ie.hdr); 1201 INIT_LIST_HEAD(&wusbhc->cack_list); 1202 1203 return 0; 1204 } 1205 ··· 1209 */ 1210 void wusbhc_devconnect_destroy(struct wusbhc *wusbhc) 1211 { 1212 + /* no op */ 1213 } 1214 1215 /* ··· 1222 * FIXME: This also enables the keep alives but this is not necessary 1223 * until there are connected and authenticated devices. 1224 */ 1225 + int wusbhc_devconnect_start(struct wusbhc *wusbhc) 1226 { 1227 struct device *dev = wusbhc->dev; 1228 struct wuie_host_info *hi; ··· 1236 hi->hdr.bLength = sizeof(*hi); 1237 hi->hdr.bIEIdentifier = WUIE_ID_HOST_INFO; 1238 hi->attributes = cpu_to_le16((wusbhc->rsv->stream << 3) | WUIE_HI_CAP_ALL); 1239 + hi->CHID = wusbhc->chid; 1240 result = wusbhc_mmcie_set(wusbhc, 0, 0, &hi->hdr); 1241 if (result < 0) { 1242 dev_err(dev, "Cannot add Host Info MMCIE: %d\n", result);
+41 -77
drivers/usb/wusbcore/mmc.c
··· 159 } 160 EXPORT_SYMBOL_GPL(wusbhc_mmcie_rm); 161 162 /* 163 * wusbhc_start - start transmitting MMCs and accepting connections 164 * @wusbhc: the HC to start 165 - * @chid: the CHID to use for this host 166 * 167 * Establishes a cluster reservation, enables device connections, and 168 * starts MMCs with appropriate DNTS parameters. 169 */ 170 - int wusbhc_start(struct wusbhc *wusbhc, const struct wusb_ckhdid *chid) 171 { 172 int result; 173 struct device *dev = wusbhc->dev; ··· 201 goto error_rsv_establish; 202 } 203 204 - result = wusbhc_devconnect_start(wusbhc, chid); 205 if (result < 0) { 206 dev_err(dev, "error enabling device connections: %d\n", result); 207 goto error_devconnect_start; ··· 219 dev_err(dev, "Cannot set DNTS parameters: %d\n", result); 220 goto error_set_num_dnts; 221 } 222 - result = wusbhc->start(wusbhc); 223 if (result < 0) { 224 dev_err(dev, "error starting wusbch: %d\n", result); 225 goto error_wusbhc_start; 226 } 227 - wusbhc->active = 1; 228 return 0; 229 230 error_wusbhc_start: ··· 239 } 240 241 /* 242 - * Disconnect all from the WUSB Channel 243 - * 244 - * Send a Host Disconnect IE in the MMC, wait, don't send it any more 245 - */ 246 - static int __wusbhc_host_disconnect_ie(struct wusbhc *wusbhc) 247 - { 248 - int result = -ENOMEM; 249 - struct wuie_host_disconnect *host_disconnect_ie; 250 - might_sleep(); 251 - host_disconnect_ie = kmalloc(sizeof(*host_disconnect_ie), GFP_KERNEL); 252 - if (host_disconnect_ie == NULL) 253 - goto error_alloc; 254 - host_disconnect_ie->hdr.bLength = sizeof(*host_disconnect_ie); 255 - host_disconnect_ie->hdr.bIEIdentifier = WUIE_ID_HOST_DISCONNECT; 256 - result = wusbhc_mmcie_set(wusbhc, 0, 0, &host_disconnect_ie->hdr); 257 - if (result < 0) 258 - goto error_mmcie_set; 259 - 260 - /* WUSB1.0[8.5.3.1 & 7.5.2] */ 261 - msleep(100); 262 - wusbhc_mmcie_rm(wusbhc, &host_disconnect_ie->hdr); 263 - error_mmcie_set: 264 - kfree(host_disconnect_ie); 265 - error_alloc: 266 - return result; 267 - } 268 - 269 - /* 270 * wusbhc_stop - stop transmitting MMCs 271 * @wusbhc: the HC to stop 272 * 273 - * Send a Host Disconnect IE, wait, remove all the MMCs (stop sending MMCs). 274 - * 275 - * If we can't allocate a Host Stop IE, screw it, we don't notify the 276 - * devices we are disconnecting... 277 */ 278 void wusbhc_stop(struct wusbhc *wusbhc) 279 { 280 - if (wusbhc->active) { 281 - wusbhc->active = 0; 282 - wusbhc->stop(wusbhc); 283 - wusbhc_sec_stop(wusbhc); 284 - __wusbhc_host_disconnect_ie(wusbhc); 285 - wusbhc_devconnect_stop(wusbhc); 286 - wusbhc_rsv_terminate(wusbhc); 287 - } 288 - } 289 - EXPORT_SYMBOL_GPL(wusbhc_stop); 290 - 291 - /* 292 - * Change the CHID in a WUSB Channel 293 - * 294 - * If it is just a new CHID, send a Host Disconnect IE and then change 295 - * the CHID IE. 296 - */ 297 - static int __wusbhc_chid_change(struct wusbhc *wusbhc, 298 - const struct wusb_ckhdid *chid) 299 - { 300 - int result = -ENOSYS; 301 - struct device *dev = wusbhc->dev; 302 - dev_err(dev, "%s() not implemented yet\n", __func__); 303 - return result; 304 - 305 - BUG_ON(wusbhc->wuie_host_info == NULL); 306 - __wusbhc_host_disconnect_ie(wusbhc); 307 - wusbhc->wuie_host_info->CHID = *chid; 308 - result = wusbhc_mmcie_set(wusbhc, 0, 0, &wusbhc->wuie_host_info->hdr); 309 - if (result < 0) 310 - dev_err(dev, "Can't update Host Info WUSB IE: %d\n", result); 311 - return result; 312 } 313 314 /* ··· 267 chid = NULL; 268 269 mutex_lock(&wusbhc->mutex); 270 - if (wusbhc->active) { 271 - if (chid) 272 - result = __wusbhc_chid_change(wusbhc, chid); 273 - else 274 - wusbhc_stop(wusbhc); 275 - } else { 276 - if (chid) 277 - wusbhc_start(wusbhc, chid); 278 } 279 mutex_unlock(&wusbhc->mutex); 280 return result; 281 } 282 EXPORT_SYMBOL_GPL(wusbhc_chid_set);
··· 159 } 160 EXPORT_SYMBOL_GPL(wusbhc_mmcie_rm); 161 162 + static int wusbhc_mmc_start(struct wusbhc *wusbhc) 163 + { 164 + int ret; 165 + 166 + mutex_lock(&wusbhc->mutex); 167 + ret = wusbhc->start(wusbhc); 168 + if (ret >= 0) 169 + wusbhc->active = 1; 170 + mutex_unlock(&wusbhc->mutex); 171 + 172 + return ret; 173 + } 174 + 175 + static void wusbhc_mmc_stop(struct wusbhc *wusbhc) 176 + { 177 + mutex_lock(&wusbhc->mutex); 178 + wusbhc->active = 0; 179 + wusbhc->stop(wusbhc, WUSB_CHANNEL_STOP_DELAY_MS); 180 + mutex_unlock(&wusbhc->mutex); 181 + } 182 + 183 /* 184 * wusbhc_start - start transmitting MMCs and accepting connections 185 * @wusbhc: the HC to start 186 * 187 * Establishes a cluster reservation, enables device connections, and 188 * starts MMCs with appropriate DNTS parameters. 189 */ 190 + int wusbhc_start(struct wusbhc *wusbhc) 191 { 192 int result; 193 struct device *dev = wusbhc->dev; ··· 181 goto error_rsv_establish; 182 } 183 184 + result = wusbhc_devconnect_start(wusbhc); 185 if (result < 0) { 186 dev_err(dev, "error enabling device connections: %d\n", result); 187 goto error_devconnect_start; ··· 199 dev_err(dev, "Cannot set DNTS parameters: %d\n", result); 200 goto error_set_num_dnts; 201 } 202 + result = wusbhc_mmc_start(wusbhc); 203 if (result < 0) { 204 dev_err(dev, "error starting wusbch: %d\n", result); 205 goto error_wusbhc_start; 206 } 207 + 208 return 0; 209 210 error_wusbhc_start: ··· 219 } 220 221 /* 222 * wusbhc_stop - stop transmitting MMCs 223 * @wusbhc: the HC to stop 224 * 225 + * Stops the WUSB channel and removes the cluster reservation. 226 */ 227 void wusbhc_stop(struct wusbhc *wusbhc) 228 { 229 + wusbhc_mmc_stop(wusbhc); 230 + wusbhc_sec_stop(wusbhc); 231 + wusbhc_devconnect_stop(wusbhc); 232 + wusbhc_rsv_terminate(wusbhc); 233 } 234 235 /* ··· 306 chid = NULL; 307 308 mutex_lock(&wusbhc->mutex); 309 + if (chid) { 310 + if (wusbhc->active) { 311 + mutex_unlock(&wusbhc->mutex); 312 + return -EBUSY; 313 + } 314 + wusbhc->chid = *chid; 315 } 316 mutex_unlock(&wusbhc->mutex); 317 + 318 + if (chid) 319 + result = uwb_radio_start(&wusbhc->pal); 320 + else 321 + uwb_radio_stop(&wusbhc->pal); 322 return result; 323 } 324 EXPORT_SYMBOL_GPL(wusbhc_chid_set);
+14 -2
drivers/usb/wusbcore/pal.c
··· 18 */ 19 #include "wusbhc.h" 20 21 /** 22 * wusbhc_pal_register - register the WUSB HC as a UWB PAL 23 * @wusbhc: the WUSB HC ··· 38 39 wusbhc->pal.name = "wusbhc"; 40 wusbhc->pal.device = wusbhc->usb_hcd.self.controller; 41 42 - return uwb_pal_register(wusbhc->uwb_rc, &wusbhc->pal); 43 } 44 45 /** ··· 50 */ 51 void wusbhc_pal_unregister(struct wusbhc *wusbhc) 52 { 53 - uwb_pal_unregister(wusbhc->uwb_rc, &wusbhc->pal); 54 }
··· 18 */ 19 #include "wusbhc.h" 20 21 + static void wusbhc_channel_changed(struct uwb_pal *pal, int channel) 22 + { 23 + struct wusbhc *wusbhc = container_of(pal, struct wusbhc, pal); 24 + 25 + if (channel < 0) 26 + wusbhc_stop(wusbhc); 27 + else 28 + wusbhc_start(wusbhc); 29 + } 30 + 31 /** 32 * wusbhc_pal_register - register the WUSB HC as a UWB PAL 33 * @wusbhc: the WUSB HC ··· 28 29 wusbhc->pal.name = "wusbhc"; 30 wusbhc->pal.device = wusbhc->usb_hcd.self.controller; 31 + wusbhc->pal.rc = wusbhc->uwb_rc; 32 + wusbhc->pal.channel_changed = wusbhc_channel_changed; 33 34 + return uwb_pal_register(&wusbhc->pal); 35 } 36 37 /** ··· 38 */ 39 void wusbhc_pal_unregister(struct wusbhc *wusbhc) 40 { 41 + uwb_pal_unregister(&wusbhc->pal); 42 }
+12 -9
drivers/usb/wusbcore/reservation.c
··· 48 { 49 struct wusbhc *wusbhc = rsv->pal_priv; 50 struct device *dev = wusbhc->dev; 51 char buf[72]; 52 53 switch (rsv->state) { 54 case UWB_RSV_STATE_O_ESTABLISHED: 55 - bitmap_scnprintf(buf, sizeof(buf), rsv->mas.bm, UWB_NUM_MAS); 56 dev_dbg(dev, "established reservation: %s\n", buf); 57 - wusbhc_bwa_set(wusbhc, rsv->stream, &rsv->mas); 58 break; 59 case UWB_RSV_STATE_NONE: 60 dev_dbg(dev, "removed reservation\n"); 61 wusbhc_bwa_set(wusbhc, 0, NULL); 62 - wusbhc->rsv = NULL; 63 break; 64 default: 65 dev_dbg(dev, "unexpected reservation state: %d\n", rsv->state); ··· 87 bcid.data[0] = wusbhc->cluster_id; 88 bcid.data[1] = 0; 89 90 - rsv->owner = &rc->uwb_dev; 91 rsv->target.type = UWB_RSV_TARGET_DEVADDR; 92 rsv->target.devaddr = bcid; 93 rsv->type = UWB_DRP_TYPE_PRIVATE; 94 - rsv->max_mas = 256; 95 - rsv->min_mas = 16; /* one MAS per zone? */ 96 - rsv->sparsity = 16; /* at least one MAS in each zone? */ 97 rsv->is_multicast = true; 98 99 ret = uwb_rsv_establish(rsv); ··· 105 106 107 /** 108 - * wusbhc_rsv_terminate - terminate any cluster reservation 109 * @wusbhc: the WUSB host whose reservation is to be terminated 110 */ 111 void wusbhc_rsv_terminate(struct wusbhc *wusbhc) 112 { 113 - if (wusbhc->rsv) 114 uwb_rsv_terminate(wusbhc->rsv); 115 }
··· 48 { 49 struct wusbhc *wusbhc = rsv->pal_priv; 50 struct device *dev = wusbhc->dev; 51 + struct uwb_mas_bm mas; 52 char buf[72]; 53 54 switch (rsv->state) { 55 case UWB_RSV_STATE_O_ESTABLISHED: 56 + uwb_rsv_get_usable_mas(rsv, &mas); 57 + bitmap_scnprintf(buf, sizeof(buf), mas.bm, UWB_NUM_MAS); 58 dev_dbg(dev, "established reservation: %s\n", buf); 59 + wusbhc_bwa_set(wusbhc, rsv->stream, &mas); 60 break; 61 case UWB_RSV_STATE_NONE: 62 dev_dbg(dev, "removed reservation\n"); 63 wusbhc_bwa_set(wusbhc, 0, NULL); 64 break; 65 default: 66 dev_dbg(dev, "unexpected reservation state: %d\n", rsv->state); ··· 86 bcid.data[0] = wusbhc->cluster_id; 87 bcid.data[1] = 0; 88 89 rsv->target.type = UWB_RSV_TARGET_DEVADDR; 90 rsv->target.devaddr = bcid; 91 rsv->type = UWB_DRP_TYPE_PRIVATE; 92 + rsv->max_mas = 256; /* try to get as much as possible */ 93 + rsv->min_mas = 15; /* one MAS per zone */ 94 + rsv->max_interval = 1; /* max latency is one zone */ 95 rsv->is_multicast = true; 96 97 ret = uwb_rsv_establish(rsv); ··· 105 106 107 /** 108 + * wusbhc_rsv_terminate - terminate the cluster reservation 109 * @wusbhc: the WUSB host whose reservation is to be terminated 110 */ 111 void wusbhc_rsv_terminate(struct wusbhc *wusbhc) 112 { 113 + if (wusbhc->rsv) { 114 uwb_rsv_terminate(wusbhc->rsv); 115 + uwb_rsv_destroy(wusbhc->rsv); 116 + wusbhc->rsv = NULL; 117 + } 118 }
+37 -67
drivers/usb/wusbcore/rh.c
··· 71 */ 72 #include "wusbhc.h" 73 74 - #define D_LOCAL 0 75 - #include <linux/uwb/debug.h> 76 - 77 /* 78 * Reset a fake port 79 * 80 - * This can be called to reset a port from any other state or to reset 81 - * it when connecting. In Wireless USB they are different; when doing 82 - * a new connect that involves going over the authentication. When 83 - * just reseting, its a different story. 84 * 85 - * The Linux USB stack resets a port twice before it considers it 86 - * enabled, so we have to detect and ignore that. 87 * 88 * @wusbhc is assumed referenced and @wusbhc->mutex unlocked. 89 * ··· 98 { 99 int result = 0; 100 struct wusb_port *port = wusb_port_by_idx(wusbhc, port_idx); 101 102 - d_fnstart(3, wusbhc->dev, "(wusbhc %p port_idx %u)\n", 103 - wusbhc, port_idx); 104 - if (port->reset_count == 0) { 105 - wusbhc_devconnect_auth(wusbhc, port_idx); 106 - port->reset_count++; 107 - } else if (port->reset_count == 1) 108 - /* see header */ 109 - d_printf(2, wusbhc->dev, "Ignoring second reset on port_idx " 110 - "%u\n", port_idx); 111 else 112 - result = wusbhc_dev_reset(wusbhc, port_idx); 113 - d_fnend(3, wusbhc->dev, "(wusbhc %p port_idx %u) = %d\n", 114 - wusbhc, port_idx, result); 115 return result; 116 } 117 ··· 139 size_t cnt, size; 140 unsigned long *buf = (unsigned long *) _buf; 141 142 - d_fnstart(1, wusbhc->dev, "(wusbhc %p)\n", wusbhc); 143 /* WE DON'T LOCK, see comment */ 144 size = wusbhc->ports_max + 1 /* hub bit */; 145 size = (size + 8 - 1) / 8; /* round to bytes */ ··· 147 set_bit(cnt + 1, buf); 148 else 149 clear_bit(cnt + 1, buf); 150 - d_fnend(1, wusbhc->dev, "(wusbhc %p) %u, buffer:\n", wusbhc, (int)size); 151 - d_dump(1, wusbhc->dev, _buf, size); 152 return size; 153 } 154 EXPORT_SYMBOL_GPL(wusbhc_rh_status_data); ··· 195 static int wusbhc_rh_clear_hub_feat(struct wusbhc *wusbhc, u16 feature) 196 { 197 int result; 198 - struct device *dev = wusbhc->dev; 199 200 - d_fnstart(4, dev, "(%p, feature 0x%04u)\n", wusbhc, feature); 201 switch (feature) { 202 case C_HUB_LOCAL_POWER: 203 /* FIXME: maybe plug bit 0 to the power input status, ··· 207 default: 208 result = -EPIPE; 209 } 210 - d_fnend(4, dev, "(%p, feature 0x%04u), %d\n", wusbhc, feature, result); 211 return result; 212 } 213 ··· 233 static int wusbhc_rh_set_port_feat(struct wusbhc *wusbhc, u16 feature, 234 u8 selector, u8 port_idx) 235 { 236 - int result = -EINVAL; 237 struct device *dev = wusbhc->dev; 238 239 - d_fnstart(4, dev, "(feat 0x%04u, selector 0x%u, port_idx %d)\n", 240 - feature, selector, port_idx); 241 - 242 if (port_idx > wusbhc->ports_max) 243 - goto error; 244 245 switch (feature) { 246 /* According to USB2.0[11.24.2.13]p2, these features ··· 246 case USB_PORT_FEAT_C_SUSPEND: 247 case USB_PORT_FEAT_C_CONNECTION: 248 case USB_PORT_FEAT_C_RESET: 249 - result = 0; 250 - break; 251 - 252 case USB_PORT_FEAT_POWER: 253 /* No such thing, but we fake it works */ 254 mutex_lock(&wusbhc->mutex); 255 wusb_port_by_idx(wusbhc, port_idx)->status |= USB_PORT_STAT_POWER; 256 mutex_unlock(&wusbhc->mutex); 257 - result = 0; 258 - break; 259 case USB_PORT_FEAT_RESET: 260 - result = wusbhc_rh_port_reset(wusbhc, port_idx); 261 - break; 262 case USB_PORT_FEAT_ENABLE: 263 case USB_PORT_FEAT_SUSPEND: 264 dev_err(dev, "(port_idx %d) set feat %d/%d UNIMPLEMENTED\n", 265 port_idx, feature, selector); 266 - result = -ENOSYS; 267 - break; 268 default: 269 dev_err(dev, "(port_idx %d) set feat %d/%d UNKNOWN\n", 270 port_idx, feature, selector); 271 - result = -EPIPE; 272 - break; 273 } 274 - error: 275 - d_fnend(4, dev, "(feat 0x%04u, selector 0x%u, port_idx %d) = %d\n", 276 - feature, selector, port_idx, result); 277 - return result; 278 } 279 280 /* ··· 277 static int wusbhc_rh_clear_port_feat(struct wusbhc *wusbhc, u16 feature, 278 u8 selector, u8 port_idx) 279 { 280 - int result = -EINVAL; 281 struct device *dev = wusbhc->dev; 282 283 - d_fnstart(4, dev, "(wusbhc %p feat 0x%04x selector %d port_idx %d)\n", 284 - wusbhc, feature, selector, port_idx); 285 - 286 if (port_idx > wusbhc->ports_max) 287 - goto error; 288 289 mutex_lock(&wusbhc->mutex); 290 - result = 0; 291 switch (feature) { 292 case USB_PORT_FEAT_POWER: /* fake port always on */ 293 /* According to USB2.0[11.24.2.7.1.4], no need to implement? */ ··· 303 break; 304 case USB_PORT_FEAT_SUSPEND: 305 case USB_PORT_FEAT_C_SUSPEND: 306 - case 0xffff: /* ??? FIXME */ 307 dev_err(dev, "(port_idx %d) Clear feat %d/%d UNIMPLEMENTED\n", 308 port_idx, feature, selector); 309 - /* dump_stack(); */ 310 result = -ENOSYS; 311 break; 312 default: ··· 314 break; 315 } 316 mutex_unlock(&wusbhc->mutex); 317 - error: 318 - d_fnend(4, dev, "(wusbhc %p feat 0x%04x selector %d port_idx %d) = " 319 - "%d\n", wusbhc, feature, selector, port_idx, result); 320 return result; 321 } 322 ··· 326 static int wusbhc_rh_get_port_status(struct wusbhc *wusbhc, u16 port_idx, 327 u32 *_buf, u16 wLength) 328 { 329 - int result = -EINVAL; 330 u16 *buf = (u16 *) _buf; 331 332 - d_fnstart(1, wusbhc->dev, "(wusbhc %p port_idx %u wLength %u)\n", 333 - wusbhc, port_idx, wLength); 334 if (port_idx > wusbhc->ports_max) 335 - goto error; 336 mutex_lock(&wusbhc->mutex); 337 buf[0] = cpu_to_le16(wusb_port_by_idx(wusbhc, port_idx)->status); 338 buf[1] = cpu_to_le16(wusb_port_by_idx(wusbhc, port_idx)->change); 339 - result = 0; 340 mutex_unlock(&wusbhc->mutex); 341 - error: 342 - d_fnend(1, wusbhc->dev, "(wusbhc %p) = %d, buffer:\n", wusbhc, result); 343 - d_dump(1, wusbhc->dev, _buf, wLength); 344 - return result; 345 } 346 347 /*
··· 71 */ 72 #include "wusbhc.h" 73 74 /* 75 * Reset a fake port 76 * 77 + * Using a Reset Device IE is too heavyweight as it causes the device 78 + * to enter the UnConnected state and leave the cluster, this can mean 79 + * that when the device reconnects it is connected to a different fake 80 + * port. 81 * 82 + * Instead, reset authenticated devices with a SetAddress(0), followed 83 + * by a SetAddresss(AuthAddr). 84 + * 85 + * For unauthenticated devices just pretend to reset but do nothing. 86 + * If the device initialization continues to fail it will eventually 87 + * time out after TrustTimeout and enter the UnConnected state. 88 * 89 * @wusbhc is assumed referenced and @wusbhc->mutex unlocked. 90 * ··· 97 { 98 int result = 0; 99 struct wusb_port *port = wusb_port_by_idx(wusbhc, port_idx); 100 + struct wusb_dev *wusb_dev = port->wusb_dev; 101 102 + port->status |= USB_PORT_STAT_RESET; 103 + port->change |= USB_PORT_STAT_C_RESET; 104 + 105 + if (wusb_dev->addr & WUSB_DEV_ADDR_UNAUTH) 106 + result = 0; 107 else 108 + result = wusb_dev_update_address(wusbhc, wusb_dev); 109 + 110 + port->status &= ~USB_PORT_STAT_RESET; 111 + port->status |= USB_PORT_STAT_ENABLE; 112 + port->change |= USB_PORT_STAT_C_RESET | USB_PORT_STAT_C_ENABLE; 113 + 114 return result; 115 } 116 ··· 138 size_t cnt, size; 139 unsigned long *buf = (unsigned long *) _buf; 140 141 /* WE DON'T LOCK, see comment */ 142 size = wusbhc->ports_max + 1 /* hub bit */; 143 size = (size + 8 - 1) / 8; /* round to bytes */ ··· 147 set_bit(cnt + 1, buf); 148 else 149 clear_bit(cnt + 1, buf); 150 return size; 151 } 152 EXPORT_SYMBOL_GPL(wusbhc_rh_status_data); ··· 197 static int wusbhc_rh_clear_hub_feat(struct wusbhc *wusbhc, u16 feature) 198 { 199 int result; 200 201 switch (feature) { 202 case C_HUB_LOCAL_POWER: 203 /* FIXME: maybe plug bit 0 to the power input status, ··· 211 default: 212 result = -EPIPE; 213 } 214 return result; 215 } 216 ··· 238 static int wusbhc_rh_set_port_feat(struct wusbhc *wusbhc, u16 feature, 239 u8 selector, u8 port_idx) 240 { 241 struct device *dev = wusbhc->dev; 242 243 if (port_idx > wusbhc->ports_max) 244 + return -EINVAL; 245 246 switch (feature) { 247 /* According to USB2.0[11.24.2.13]p2, these features ··· 255 case USB_PORT_FEAT_C_SUSPEND: 256 case USB_PORT_FEAT_C_CONNECTION: 257 case USB_PORT_FEAT_C_RESET: 258 + return 0; 259 case USB_PORT_FEAT_POWER: 260 /* No such thing, but we fake it works */ 261 mutex_lock(&wusbhc->mutex); 262 wusb_port_by_idx(wusbhc, port_idx)->status |= USB_PORT_STAT_POWER; 263 mutex_unlock(&wusbhc->mutex); 264 + return 0; 265 case USB_PORT_FEAT_RESET: 266 + return wusbhc_rh_port_reset(wusbhc, port_idx); 267 case USB_PORT_FEAT_ENABLE: 268 case USB_PORT_FEAT_SUSPEND: 269 dev_err(dev, "(port_idx %d) set feat %d/%d UNIMPLEMENTED\n", 270 port_idx, feature, selector); 271 + return -ENOSYS; 272 default: 273 dev_err(dev, "(port_idx %d) set feat %d/%d UNKNOWN\n", 274 port_idx, feature, selector); 275 + return -EPIPE; 276 } 277 + 278 + return 0; 279 } 280 281 /* ··· 294 static int wusbhc_rh_clear_port_feat(struct wusbhc *wusbhc, u16 feature, 295 u8 selector, u8 port_idx) 296 { 297 + int result = 0; 298 struct device *dev = wusbhc->dev; 299 300 if (port_idx > wusbhc->ports_max) 301 + return -EINVAL; 302 303 mutex_lock(&wusbhc->mutex); 304 switch (feature) { 305 case USB_PORT_FEAT_POWER: /* fake port always on */ 306 /* According to USB2.0[11.24.2.7.1.4], no need to implement? */ ··· 324 break; 325 case USB_PORT_FEAT_SUSPEND: 326 case USB_PORT_FEAT_C_SUSPEND: 327 dev_err(dev, "(port_idx %d) Clear feat %d/%d UNIMPLEMENTED\n", 328 port_idx, feature, selector); 329 result = -ENOSYS; 330 break; 331 default: ··· 337 break; 338 } 339 mutex_unlock(&wusbhc->mutex); 340 + 341 return result; 342 } 343 ··· 351 static int wusbhc_rh_get_port_status(struct wusbhc *wusbhc, u16 port_idx, 352 u32 *_buf, u16 wLength) 353 { 354 u16 *buf = (u16 *) _buf; 355 356 if (port_idx > wusbhc->ports_max) 357 + return -EINVAL; 358 + 359 mutex_lock(&wusbhc->mutex); 360 buf[0] = cpu_to_le16(wusb_port_by_idx(wusbhc, port_idx)->status); 361 buf[1] = cpu_to_le16(wusb_port_by_idx(wusbhc, port_idx)->change); 362 mutex_unlock(&wusbhc->mutex); 363 + 364 + return 0; 365 } 366 367 /*
+6 -72
drivers/usb/wusbcore/security.c
··· 27 #include <linux/random.h> 28 #include "wusbhc.h" 29 30 - /* 31 - * DEBUG & SECURITY WARNING!!!! 32 - * 33 - * If you enable this past 1, the debug code will weaken the 34 - * cryptographic safety of the system (on purpose, for debugging). 35 - * 36 - * Weaken means: 37 - * we print secret keys and intermediate values all the way, 38 - */ 39 - #undef D_LOCAL 40 - #define D_LOCAL 2 41 - #include <linux/uwb/debug.h> 42 - 43 static void wusbhc_set_gtk_callback(struct urb *urb); 44 static void wusbhc_gtk_rekey_done_work(struct work_struct *work); 45 ··· 206 const void *itr, *top; 207 char buf[64]; 208 209 - d_fnstart(3, dev, "(usb_dev %p, wusb_dev %p)\n", usb_dev, wusb_dev); 210 result = usb_get_descriptor(usb_dev, USB_DT_SECURITY, 211 0, &secd, sizeof(secd)); 212 if (result < sizeof(secd)) { ··· 214 goto error_secd; 215 } 216 secd_size = le16_to_cpu(secd.wTotalLength); 217 - d_printf(5, dev, "got %d bytes of sec descriptor, total is %d\n", 218 - result, secd_size); 219 secd_buf = kmalloc(secd_size, GFP_KERNEL); 220 if (secd_buf == NULL) { 221 dev_err(dev, "Can't allocate space for security descriptors\n"); ··· 226 "not enough data: %d\n", result); 227 goto error_secd_all; 228 } 229 - d_printf(5, dev, "got %d bytes of sec descriptors\n", result); 230 bytes = 0; 231 itr = secd_buf + sizeof(secd); 232 top = secd_buf + result; ··· 262 goto error_no_ccm1; 263 } 264 wusb_dev->ccm1_etd = *ccm1_etd; 265 - dev_info(dev, "supported encryption: %s; using %s (0x%02x/%02x)\n", 266 - buf, wusb_et_name(ccm1_etd->bEncryptionType), 267 - ccm1_etd->bEncryptionValue, ccm1_etd->bAuthKeyIndex); 268 result = 0; 269 kfree(secd_buf); 270 out: 271 - d_fnend(3, dev, "(usb_dev %p, wusb_dev %p) = %d\n", 272 - usb_dev, wusb_dev, result); 273 return result; 274 275 ··· 284 /* Nothing so far */ 285 } 286 287 - static void hs_printk(unsigned level, struct device *dev, 288 - struct usb_handshake *hs) 289 - { 290 - d_printf(level, dev, 291 - " bMessageNumber: %u\n" 292 - " bStatus: %u\n" 293 - " tTKID: %02x %02x %02x\n" 294 - " CDID: %02x %02x %02x %02x %02x %02x %02x %02x\n" 295 - " %02x %02x %02x %02x %02x %02x %02x %02x\n" 296 - " nonce: %02x %02x %02x %02x %02x %02x %02x %02x\n" 297 - " %02x %02x %02x %02x %02x %02x %02x %02x\n" 298 - " MIC: %02x %02x %02x %02x %02x %02x %02x %02x\n", 299 - hs->bMessageNumber, hs->bStatus, 300 - hs->tTKID[2], hs->tTKID[1], hs->tTKID[0], 301 - hs->CDID[0], hs->CDID[1], hs->CDID[2], hs->CDID[3], 302 - hs->CDID[4], hs->CDID[5], hs->CDID[6], hs->CDID[7], 303 - hs->CDID[8], hs->CDID[9], hs->CDID[10], hs->CDID[11], 304 - hs->CDID[12], hs->CDID[13], hs->CDID[14], hs->CDID[15], 305 - hs->nonce[0], hs->nonce[1], hs->nonce[2], hs->nonce[3], 306 - hs->nonce[4], hs->nonce[5], hs->nonce[6], hs->nonce[7], 307 - hs->nonce[8], hs->nonce[9], hs->nonce[10], hs->nonce[11], 308 - hs->nonce[12], hs->nonce[13], hs->nonce[14], hs->nonce[15], 309 - hs->MIC[0], hs->MIC[1], hs->MIC[2], hs->MIC[3], 310 - hs->MIC[4], hs->MIC[5], hs->MIC[6], hs->MIC[7]); 311 - } 312 - 313 /** 314 * Update the address of an unauthenticated WUSB device 315 * ··· 293 * Before the device's address (as known by it) was usb_dev->devnum | 294 * 0x80 (unauthenticated address). With this we update it to usb_dev->devnum. 295 */ 296 - static int wusb_dev_update_address(struct wusbhc *wusbhc, 297 - struct wusb_dev *wusb_dev) 298 { 299 int result = -ENOMEM; 300 struct usb_device *usb_dev = wusb_dev->usb_dev; ··· 376 get_random_bytes(&hs[0].nonce, sizeof(hs[0].nonce)); 377 memset(hs[0].MIC, 0, sizeof(hs[0].MIC)); /* Per WUSB1.0[T7-22] */ 378 379 - d_printf(1, dev, "I: sending hs1:\n"); 380 - hs_printk(2, dev, &hs[0]); 381 - 382 result = usb_control_msg( 383 usb_dev, usb_sndctrlpipe(usb_dev, 0), 384 USB_REQ_SET_HANDSHAKE, ··· 396 dev_err(dev, "Handshake2: request failed: %d\n", result); 397 goto error_hs2; 398 } 399 - d_printf(1, dev, "got HS2:\n"); 400 - hs_printk(2, dev, &hs[1]); 401 402 result = -EINVAL; 403 if (hs[1].bMessageNumber != 2) { ··· 436 result); 437 goto error_hs2; 438 } 439 - d_printf(2, dev, "KCK:\n"); 440 - d_dump(2, dev, keydvt_out.kck, sizeof(keydvt_out.kck)); 441 - d_printf(2, dev, "PTK:\n"); 442 - d_dump(2, dev, keydvt_out.ptk, sizeof(keydvt_out.ptk)); 443 444 /* Compute MIC and verify it */ 445 result = wusb_oob_mic(mic, keydvt_out.kck, &ccm_n, &hs[1]); ··· 445 goto error_hs2; 446 } 447 448 - d_printf(2, dev, "MIC:\n"); 449 - d_dump(2, dev, mic, sizeof(mic)); 450 if (memcmp(hs[1].MIC, mic, sizeof(hs[1].MIC))) { 451 dev_err(dev, "Handshake2 failed: MIC mismatch\n"); 452 goto error_hs2; ··· 464 goto error_hs2; 465 } 466 467 - d_printf(1, dev, "I: sending hs3:\n"); 468 - hs_printk(2, dev, &hs[2]); 469 - 470 result = usb_control_msg( 471 usb_dev, usb_sndctrlpipe(usb_dev, 0), 472 USB_REQ_SET_HANDSHAKE, ··· 474 goto error_hs3; 475 } 476 477 - d_printf(1, dev, "I: turning on encryption on host for device\n"); 478 - d_dump(2, dev, keydvt_out.ptk, sizeof(keydvt_out.ptk)); 479 result = wusbhc->set_ptk(wusbhc, wusb_dev->port_idx, tkid, 480 keydvt_out.ptk, sizeof(keydvt_out.ptk)); 481 if (result < 0) 482 goto error_wusbhc_set_ptk; 483 484 - d_printf(1, dev, "I: setting a GTK\n"); 485 result = wusb_dev_set_gtk(wusbhc, wusb_dev); 486 if (result < 0) { 487 dev_err(dev, "Set GTK for device: request failed: %d\n", ··· 488 489 /* Update the device's address from unauth to auth */ 490 if (usb_dev->authenticated == 0) { 491 - d_printf(1, dev, "I: updating addres to auth from non-auth\n"); 492 result = wusb_dev_update_address(wusbhc, wusb_dev); 493 if (result < 0) 494 goto error_dev_update_address; 495 } 496 result = 0; 497 - d_printf(1, dev, "I: 4way handshke done, device authenticated\n"); 498 499 error_dev_update_address: 500 error_wusbhc_set_gtk: ··· 506 memset(&keydvt_in, 0, sizeof(keydvt_in)); 507 memset(&ccm_n, 0, sizeof(ccm_n)); 508 memset(mic, 0, sizeof(mic)); 509 - if (result < 0) { 510 - /* error path */ 511 wusb_dev_set_encryption(usb_dev, 0); 512 - } 513 error_dev_set_encryption: 514 kfree(hs); 515 error_kzalloc:
··· 27 #include <linux/random.h> 28 #include "wusbhc.h" 29 30 static void wusbhc_set_gtk_callback(struct urb *urb); 31 static void wusbhc_gtk_rekey_done_work(struct work_struct *work); 32 ··· 219 const void *itr, *top; 220 char buf[64]; 221 222 result = usb_get_descriptor(usb_dev, USB_DT_SECURITY, 223 0, &secd, sizeof(secd)); 224 if (result < sizeof(secd)) { ··· 228 goto error_secd; 229 } 230 secd_size = le16_to_cpu(secd.wTotalLength); 231 secd_buf = kmalloc(secd_size, GFP_KERNEL); 232 if (secd_buf == NULL) { 233 dev_err(dev, "Can't allocate space for security descriptors\n"); ··· 242 "not enough data: %d\n", result); 243 goto error_secd_all; 244 } 245 bytes = 0; 246 itr = secd_buf + sizeof(secd); 247 top = secd_buf + result; ··· 279 goto error_no_ccm1; 280 } 281 wusb_dev->ccm1_etd = *ccm1_etd; 282 + dev_dbg(dev, "supported encryption: %s; using %s (0x%02x/%02x)\n", 283 + buf, wusb_et_name(ccm1_etd->bEncryptionType), 284 + ccm1_etd->bEncryptionValue, ccm1_etd->bAuthKeyIndex); 285 result = 0; 286 kfree(secd_buf); 287 out: 288 return result; 289 290 ··· 303 /* Nothing so far */ 304 } 305 306 /** 307 * Update the address of an unauthenticated WUSB device 308 * ··· 338 * Before the device's address (as known by it) was usb_dev->devnum | 339 * 0x80 (unauthenticated address). With this we update it to usb_dev->devnum. 340 */ 341 + int wusb_dev_update_address(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) 342 { 343 int result = -ENOMEM; 344 struct usb_device *usb_dev = wusb_dev->usb_dev; ··· 422 get_random_bytes(&hs[0].nonce, sizeof(hs[0].nonce)); 423 memset(hs[0].MIC, 0, sizeof(hs[0].MIC)); /* Per WUSB1.0[T7-22] */ 424 425 result = usb_control_msg( 426 usb_dev, usb_sndctrlpipe(usb_dev, 0), 427 USB_REQ_SET_HANDSHAKE, ··· 445 dev_err(dev, "Handshake2: request failed: %d\n", result); 446 goto error_hs2; 447 } 448 449 result = -EINVAL; 450 if (hs[1].bMessageNumber != 2) { ··· 487 result); 488 goto error_hs2; 489 } 490 491 /* Compute MIC and verify it */ 492 result = wusb_oob_mic(mic, keydvt_out.kck, &ccm_n, &hs[1]); ··· 500 goto error_hs2; 501 } 502 503 if (memcmp(hs[1].MIC, mic, sizeof(hs[1].MIC))) { 504 dev_err(dev, "Handshake2 failed: MIC mismatch\n"); 505 goto error_hs2; ··· 521 goto error_hs2; 522 } 523 524 result = usb_control_msg( 525 usb_dev, usb_sndctrlpipe(usb_dev, 0), 526 USB_REQ_SET_HANDSHAKE, ··· 534 goto error_hs3; 535 } 536 537 result = wusbhc->set_ptk(wusbhc, wusb_dev->port_idx, tkid, 538 keydvt_out.ptk, sizeof(keydvt_out.ptk)); 539 if (result < 0) 540 goto error_wusbhc_set_ptk; 541 542 result = wusb_dev_set_gtk(wusbhc, wusb_dev); 543 if (result < 0) { 544 dev_err(dev, "Set GTK for device: request failed: %d\n", ··· 551 552 /* Update the device's address from unauth to auth */ 553 if (usb_dev->authenticated == 0) { 554 result = wusb_dev_update_address(wusbhc, wusb_dev); 555 if (result < 0) 556 goto error_dev_update_address; 557 } 558 result = 0; 559 + dev_info(dev, "device authenticated\n"); 560 561 error_dev_update_address: 562 error_wusbhc_set_gtk: ··· 570 memset(&keydvt_in, 0, sizeof(keydvt_in)); 571 memset(&ccm_n, 0, sizeof(ccm_n)); 572 memset(mic, 0, sizeof(mic)); 573 + if (result < 0) 574 wusb_dev_set_encryption(usb_dev, 0); 575 error_dev_set_encryption: 576 kfree(hs); 577 error_kzalloc:
+5 -11
drivers/usb/wusbcore/wa-nep.c
··· 51 */ 52 #include <linux/workqueue.h> 53 #include <linux/ctype.h> 54 - #include <linux/uwb/debug.h> 55 #include "wa-hc.h" 56 #include "wusbhc.h" 57 ··· 139 /* FIXME: unimplemented WA NOTIFs */ 140 /* fallthru */ 141 default: 142 - if (printk_ratelimit()) { 143 - dev_err(dev, "HWA: unknown notification 0x%x, " 144 - "%zu bytes; discarding\n", 145 - notif_hdr->bNotifyType, 146 - (size_t)notif_hdr->bLength); 147 - dump_bytes(dev, notif_hdr, 16); 148 - } 149 break; 150 } 151 } ··· 157 * discard the data, as this should not happen. 158 */ 159 exhausted_buffer: 160 - if (!printk_ratelimit()) 161 - goto out; 162 dev_warn(dev, "HWA: device sent short notification, " 163 "%d bytes missing; discarding %d bytes.\n", 164 missing, (int)size); 165 - dump_bytes(dev, itr, size); 166 goto out; 167 } 168
··· 51 */ 52 #include <linux/workqueue.h> 53 #include <linux/ctype.h> 54 + 55 #include "wa-hc.h" 56 #include "wusbhc.h" 57 ··· 139 /* FIXME: unimplemented WA NOTIFs */ 140 /* fallthru */ 141 default: 142 + dev_err(dev, "HWA: unknown notification 0x%x, " 143 + "%zu bytes; discarding\n", 144 + notif_hdr->bNotifyType, 145 + (size_t)notif_hdr->bLength); 146 break; 147 } 148 } ··· 160 * discard the data, as this should not happen. 161 */ 162 exhausted_buffer: 163 dev_warn(dev, "HWA: device sent short notification, " 164 "%d bytes missing; discarding %d bytes.\n", 165 missing, (int)size); 166 goto out; 167 } 168
+17 -51
drivers/usb/wusbcore/wa-rpipe.c
··· 60 #include <linux/init.h> 61 #include <asm/atomic.h> 62 #include <linux/bitmap.h> 63 #include "wusbhc.h" 64 #include "wa-hc.h" 65 - 66 - #define D_LOCAL 0 67 - #include <linux/uwb/debug.h> 68 - 69 70 static int __rpipe_get_descr(struct wahc *wa, 71 struct usb_rpipe_descriptor *descr, u16 index) ··· 73 /* Get the RPIPE descriptor -- we cannot use the usb_get_descriptor() 74 * function because the arguments are different. 75 */ 76 - d_printf(1, dev, "rpipe %u: get descr\n", index); 77 result = usb_control_msg( 78 wa->usb_dev, usb_rcvctrlpipe(wa->usb_dev, 0), 79 USB_REQ_GET_DESCRIPTOR, ··· 111 /* we cannot use the usb_get_descriptor() function because the 112 * arguments are different. 113 */ 114 - d_printf(1, dev, "rpipe %u: set descr\n", index); 115 result = usb_control_msg( 116 wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), 117 USB_REQ_SET_DESCRIPTOR, ··· 169 { 170 struct wa_rpipe *rpipe = container_of(_rpipe, struct wa_rpipe, refcnt); 171 u8 index = le16_to_cpu(rpipe->descr.wRPipeIndex); 172 - d_fnstart(1, NULL, "(rpipe %p %u)\n", rpipe, index); 173 if (rpipe->ep) 174 rpipe->ep->hcpriv = NULL; 175 rpipe_put_idx(rpipe->wa, index); 176 wa_put(rpipe->wa); 177 kfree(rpipe); 178 - d_fnend(1, NULL, "(rpipe %p %u)\n", rpipe, index); 179 } 180 EXPORT_SYMBOL_GPL(rpipe_destroy); 181 ··· 196 struct wa_rpipe *rpipe; 197 struct device *dev = &wa->usb_iface->dev; 198 199 - d_fnstart(3, dev, "(wa %p crs 0x%02x)\n", wa, crs); 200 rpipe = kzalloc(sizeof(*rpipe), gfp); 201 if (rpipe == NULL) 202 return -ENOMEM; ··· 216 } 217 *prpipe = NULL; 218 kfree(rpipe); 219 - d_fnend(3, dev, "(wa %p crs 0x%02x) = -ENXIO\n", wa, crs); 220 return -ENXIO; 221 222 found: 223 set_bit(rpipe_idx, wa->rpipe_bm); 224 rpipe->wa = wa_get(wa); 225 *prpipe = rpipe; 226 - d_fnstart(3, dev, "(wa %p crs 0x%02x) = 0\n", wa, crs); 227 return 0; 228 } 229 ··· 230 int result; 231 struct device *dev = &wa->usb_iface->dev; 232 233 - d_printf(1, dev, "rpipe %u: reset\n", index); 234 result = usb_control_msg( 235 wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), 236 USB_REQ_RPIPE_RESET, ··· 266 struct usb_descriptor_header *hdr; 267 struct usb_wireless_ep_comp_descriptor *epcd; 268 269 - d_fnstart(3, dev, "(ep %p)\n", ep); 270 if (ep->desc.bEndpointAddress == 0) { 271 epcd = &epc0; 272 goto out; ··· 299 itr_size -= hdr->bDescriptorType; 300 } 301 out: 302 - d_fnend(3, dev, "(ep %p) = %p\n", ep, epcd); 303 return epcd; 304 } 305 ··· 317 struct usb_wireless_ep_comp_descriptor *epcd; 318 u8 unauth; 319 320 - d_fnstart(3, dev, "(rpipe %p wa %p ep %p, urb %p)\n", 321 - rpipe, wa, ep, urb); 322 epcd = rpipe_epc_find(dev, ep); 323 if (epcd == NULL) { 324 dev_err(dev, "ep 0x%02x: can't find companion descriptor\n", ··· 336 /* FIXME: use maximum speed as supported or recommended by device */ 337 rpipe->descr.bSpeed = usb_pipeendpoint(urb->pipe) == 0 ? 338 UWB_PHY_RATE_53 : UWB_PHY_RATE_200; 339 - d_printf(2, dev, "addr %u (0x%02x) rpipe #%u ep# %u speed %d\n", 340 - urb->dev->devnum, urb->dev->devnum | unauth, 341 - le16_to_cpu(rpipe->descr.wRPipeIndex), 342 - usb_pipeendpoint(urb->pipe), rpipe->descr.bSpeed); 343 /* see security.c:wusb_update_address() */ 344 if (unlikely(urb->dev->devnum == 0x80)) 345 rpipe->descr.bDeviceAddress = 0; ··· 372 } 373 result = 0; 374 error: 375 - d_fnend(3, dev, "(rpipe %p wa %p ep %p urb %p) = %d\n", 376 - rpipe, wa, ep, urb, result); 377 return result; 378 } 379 ··· 391 u8 unauth = (usb_dev->wusb && !usb_dev->authenticated) ? 0x80 : 0; 392 u8 portnum = wusb_port_no_to_idx(urb->dev->portnum); 393 394 - d_fnstart(3, dev, "(rpipe %p wa %p ep %p, urb %p)\n", 395 - rpipe, wa, ep, urb); 396 #define AIM_CHECK(rdf, val, text) \ 397 do { \ 398 if (rpipe->descr.rdf != (val)) { \ ··· 435 struct wa_rpipe *rpipe; 436 u8 eptype; 437 438 - d_fnstart(3, dev, "(wa %p ep %p urb %p gfp 0x%08x)\n", wa, ep, urb, 439 - gfp); 440 mutex_lock(&wa->rpipe_mutex); 441 rpipe = ep->hcpriv; 442 if (rpipe != NULL) { ··· 444 goto error; 445 } 446 __rpipe_get(rpipe); 447 - d_printf(2, dev, "ep 0x%02x: reusing rpipe %u\n", 448 - ep->desc.bEndpointAddress, 449 - le16_to_cpu(rpipe->descr.wRPipeIndex)); 450 } else { 451 /* hmm, assign idle rpipe, aim it */ 452 result = -ENOBUFS; ··· 462 ep->hcpriv = rpipe; 463 rpipe->ep = ep; 464 __rpipe_get(rpipe); /* for caching into ep->hcpriv */ 465 - d_printf(2, dev, "ep 0x%02x: using rpipe %u\n", 466 - ep->desc.bEndpointAddress, 467 - le16_to_cpu(rpipe->descr.wRPipeIndex)); 468 } 469 - d_dump(4, dev, &rpipe->descr, sizeof(rpipe->descr)); 470 error: 471 mutex_unlock(&wa->rpipe_mutex); 472 - d_fnend(3, dev, "(wa %p ep %p urb %p gfp 0x%08x)\n", wa, ep, urb, gfp); 473 return result; 474 } 475 ··· 487 void wa_rpipes_destroy(struct wahc *wa) 488 { 489 struct device *dev = &wa->usb_iface->dev; 490 - d_fnstart(3, dev, "(wa %p)\n", wa); 491 if (!bitmap_empty(wa->rpipe_bm, wa->rpipes)) { 492 char buf[256]; 493 WARN_ON(1); ··· 495 dev_err(dev, "BUG: pipes not released on exit: %s\n", buf); 496 } 497 kfree(wa->rpipe_bm); 498 - d_fnend(3, dev, "(wa %p)\n", wa); 499 } 500 501 /* ··· 509 */ 510 void rpipe_ep_disable(struct wahc *wa, struct usb_host_endpoint *ep) 511 { 512 - struct device *dev = &wa->usb_iface->dev; 513 struct wa_rpipe *rpipe; 514 - d_fnstart(2, dev, "(wa %p ep %p)\n", wa, ep); 515 mutex_lock(&wa->rpipe_mutex); 516 rpipe = ep->hcpriv; 517 if (rpipe != NULL) { 518 - unsigned rc = atomic_read(&rpipe->refcnt.refcount); 519 - int result; 520 u16 index = le16_to_cpu(rpipe->descr.wRPipeIndex); 521 522 - if (rc != 1) 523 - d_printf(1, dev, "(wa %p ep %p) rpipe %p refcnt %u\n", 524 - wa, ep, rpipe, rc); 525 - 526 - d_printf(1, dev, "rpipe %u: abort\n", index); 527 - result = usb_control_msg( 528 wa->usb_dev, usb_rcvctrlpipe(wa->usb_dev, 0), 529 USB_REQ_RPIPE_ABORT, 530 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_RPIPE, 531 0, index, NULL, 0, 1000 /* FIXME: arbitrary */); 532 - if (result < 0 && result != -ENODEV /* dev is gone */) 533 - d_printf(1, dev, "(wa %p rpipe %u): abort failed: %d\n", 534 - wa, index, result); 535 rpipe_put(rpipe); 536 } 537 mutex_unlock(&wa->rpipe_mutex); 538 - d_fnend(2, dev, "(wa %p ep %p)\n", wa, ep); 539 - return; 540 } 541 EXPORT_SYMBOL_GPL(rpipe_ep_disable);
··· 60 #include <linux/init.h> 61 #include <asm/atomic.h> 62 #include <linux/bitmap.h> 63 + 64 #include "wusbhc.h" 65 #include "wa-hc.h" 66 67 static int __rpipe_get_descr(struct wahc *wa, 68 struct usb_rpipe_descriptor *descr, u16 index) ··· 76 /* Get the RPIPE descriptor -- we cannot use the usb_get_descriptor() 77 * function because the arguments are different. 78 */ 79 result = usb_control_msg( 80 wa->usb_dev, usb_rcvctrlpipe(wa->usb_dev, 0), 81 USB_REQ_GET_DESCRIPTOR, ··· 115 /* we cannot use the usb_get_descriptor() function because the 116 * arguments are different. 117 */ 118 result = usb_control_msg( 119 wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), 120 USB_REQ_SET_DESCRIPTOR, ··· 174 { 175 struct wa_rpipe *rpipe = container_of(_rpipe, struct wa_rpipe, refcnt); 176 u8 index = le16_to_cpu(rpipe->descr.wRPipeIndex); 177 + 178 if (rpipe->ep) 179 rpipe->ep->hcpriv = NULL; 180 rpipe_put_idx(rpipe->wa, index); 181 wa_put(rpipe->wa); 182 kfree(rpipe); 183 } 184 EXPORT_SYMBOL_GPL(rpipe_destroy); 185 ··· 202 struct wa_rpipe *rpipe; 203 struct device *dev = &wa->usb_iface->dev; 204 205 rpipe = kzalloc(sizeof(*rpipe), gfp); 206 if (rpipe == NULL) 207 return -ENOMEM; ··· 223 } 224 *prpipe = NULL; 225 kfree(rpipe); 226 return -ENXIO; 227 228 found: 229 set_bit(rpipe_idx, wa->rpipe_bm); 230 rpipe->wa = wa_get(wa); 231 *prpipe = rpipe; 232 return 0; 233 } 234 ··· 239 int result; 240 struct device *dev = &wa->usb_iface->dev; 241 242 result = usb_control_msg( 243 wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), 244 USB_REQ_RPIPE_RESET, ··· 276 struct usb_descriptor_header *hdr; 277 struct usb_wireless_ep_comp_descriptor *epcd; 278 279 if (ep->desc.bEndpointAddress == 0) { 280 epcd = &epc0; 281 goto out; ··· 310 itr_size -= hdr->bDescriptorType; 311 } 312 out: 313 return epcd; 314 } 315 ··· 329 struct usb_wireless_ep_comp_descriptor *epcd; 330 u8 unauth; 331 332 epcd = rpipe_epc_find(dev, ep); 333 if (epcd == NULL) { 334 dev_err(dev, "ep 0x%02x: can't find companion descriptor\n", ··· 350 /* FIXME: use maximum speed as supported or recommended by device */ 351 rpipe->descr.bSpeed = usb_pipeendpoint(urb->pipe) == 0 ? 352 UWB_PHY_RATE_53 : UWB_PHY_RATE_200; 353 + 354 + dev_dbg(dev, "addr %u (0x%02x) rpipe #%u ep# %u speed %d\n", 355 + urb->dev->devnum, urb->dev->devnum | unauth, 356 + le16_to_cpu(rpipe->descr.wRPipeIndex), 357 + usb_pipeendpoint(urb->pipe), rpipe->descr.bSpeed); 358 + 359 /* see security.c:wusb_update_address() */ 360 if (unlikely(urb->dev->devnum == 0x80)) 361 rpipe->descr.bDeviceAddress = 0; ··· 384 } 385 result = 0; 386 error: 387 return result; 388 } 389 ··· 405 u8 unauth = (usb_dev->wusb && !usb_dev->authenticated) ? 0x80 : 0; 406 u8 portnum = wusb_port_no_to_idx(urb->dev->portnum); 407 408 #define AIM_CHECK(rdf, val, text) \ 409 do { \ 410 if (rpipe->descr.rdf != (val)) { \ ··· 451 struct wa_rpipe *rpipe; 452 u8 eptype; 453 454 mutex_lock(&wa->rpipe_mutex); 455 rpipe = ep->hcpriv; 456 if (rpipe != NULL) { ··· 462 goto error; 463 } 464 __rpipe_get(rpipe); 465 + dev_dbg(dev, "ep 0x%02x: reusing rpipe %u\n", 466 + ep->desc.bEndpointAddress, 467 + le16_to_cpu(rpipe->descr.wRPipeIndex)); 468 } else { 469 /* hmm, assign idle rpipe, aim it */ 470 result = -ENOBUFS; ··· 480 ep->hcpriv = rpipe; 481 rpipe->ep = ep; 482 __rpipe_get(rpipe); /* for caching into ep->hcpriv */ 483 + dev_dbg(dev, "ep 0x%02x: using rpipe %u\n", 484 + ep->desc.bEndpointAddress, 485 + le16_to_cpu(rpipe->descr.wRPipeIndex)); 486 } 487 error: 488 mutex_unlock(&wa->rpipe_mutex); 489 return result; 490 } 491 ··· 507 void wa_rpipes_destroy(struct wahc *wa) 508 { 509 struct device *dev = &wa->usb_iface->dev; 510 + 511 if (!bitmap_empty(wa->rpipe_bm, wa->rpipes)) { 512 char buf[256]; 513 WARN_ON(1); ··· 515 dev_err(dev, "BUG: pipes not released on exit: %s\n", buf); 516 } 517 kfree(wa->rpipe_bm); 518 } 519 520 /* ··· 530 */ 531 void rpipe_ep_disable(struct wahc *wa, struct usb_host_endpoint *ep) 532 { 533 struct wa_rpipe *rpipe; 534 + 535 mutex_lock(&wa->rpipe_mutex); 536 rpipe = ep->hcpriv; 537 if (rpipe != NULL) { 538 u16 index = le16_to_cpu(rpipe->descr.wRPipeIndex); 539 540 + usb_control_msg( 541 wa->usb_dev, usb_rcvctrlpipe(wa->usb_dev, 0), 542 USB_REQ_RPIPE_ABORT, 543 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_RPIPE, 544 0, index, NULL, 0, 1000 /* FIXME: arbitrary */); 545 rpipe_put(rpipe); 546 } 547 mutex_unlock(&wa->rpipe_mutex); 548 } 549 EXPORT_SYMBOL_GPL(rpipe_ep_disable);
+43 -137
drivers/usb/wusbcore/wa-xfer.c
··· 82 #include <linux/init.h> 83 #include <linux/spinlock.h> 84 #include <linux/hash.h> 85 #include "wa-hc.h" 86 #include "wusbhc.h" 87 - 88 - #undef D_LOCAL 89 - #define D_LOCAL 0 /* 0 disabled, > 0 different levels... */ 90 - #include <linux/uwb/debug.h> 91 92 enum { 93 WA_SEGS_MAX = 255, ··· 177 } 178 } 179 kfree(xfer); 180 - d_printf(2, NULL, "xfer %p destroyed\n", xfer); 181 } 182 183 static void wa_xfer_get(struct wa_xfer *xfer) ··· 186 187 static void wa_xfer_put(struct wa_xfer *xfer) 188 { 189 - d_fnstart(3, NULL, "(xfer %p) -- ref count bef put %d\n", 190 - xfer, atomic_read(&xfer->refcnt.refcount)); 191 kref_put(&xfer->refcnt, wa_xfer_destroy); 192 - d_fnend(3, NULL, "(xfer %p) = void\n", xfer); 193 } 194 195 /* ··· 202 static void wa_xfer_giveback(struct wa_xfer *xfer) 203 { 204 unsigned long flags; 205 - d_fnstart(3, NULL, "(xfer %p)\n", xfer); 206 spin_lock_irqsave(&xfer->wa->xfer_list_lock, flags); 207 list_del_init(&xfer->list_node); 208 spin_unlock_irqrestore(&xfer->wa->xfer_list_lock, flags); ··· 210 wusbhc_giveback_urb(xfer->wa->wusb, xfer->urb, xfer->result); 211 wa_put(xfer->wa); 212 wa_xfer_put(xfer); 213 - d_fnend(3, NULL, "(xfer %p) = void\n", xfer); 214 } 215 216 /* ··· 219 */ 220 static void wa_xfer_completion(struct wa_xfer *xfer) 221 { 222 - d_fnstart(3, NULL, "(xfer %p)\n", xfer); 223 if (xfer->wusb_dev) 224 wusb_dev_put(xfer->wusb_dev); 225 rpipe_put(xfer->ep->hcpriv); 226 wa_xfer_giveback(xfer); 227 - d_fnend(3, NULL, "(xfer %p) = void\n", xfer); 228 - return; 229 } 230 231 /* ··· 232 */ 233 static unsigned __wa_xfer_is_done(struct wa_xfer *xfer) 234 { 235 unsigned result, cnt; 236 struct wa_seg *seg; 237 struct urb *urb = xfer->urb; 238 unsigned found_short = 0; 239 240 - d_fnstart(3, NULL, "(xfer %p)\n", xfer); 241 result = xfer->segs_done == xfer->segs_submitted; 242 if (result == 0) 243 goto out; ··· 247 switch (seg->status) { 248 case WA_SEG_DONE: 249 if (found_short && seg->result > 0) { 250 - if (printk_ratelimit()) 251 - printk(KERN_ERR "xfer %p#%u: bad short " 252 - "segments (%zu)\n", xfer, cnt, 253 - seg->result); 254 urb->status = -EINVAL; 255 goto out; 256 } ··· 256 if (seg->result < xfer->seg_size 257 && cnt != xfer->segs-1) 258 found_short = 1; 259 - d_printf(2, NULL, "xfer %p#%u: DONE short %d " 260 - "result %zu urb->actual_length %d\n", 261 - xfer, seg->index, found_short, seg->result, 262 - urb->actual_length); 263 break; 264 case WA_SEG_ERROR: 265 xfer->result = seg->result; 266 - d_printf(2, NULL, "xfer %p#%u: ERROR result %zu\n", 267 - xfer, seg->index, seg->result); 268 goto out; 269 case WA_SEG_ABORTED: 270 - WARN_ON(urb->status != -ECONNRESET 271 - && urb->status != -ENOENT); 272 - d_printf(2, NULL, "xfer %p#%u ABORTED: result %d\n", 273 - xfer, seg->index, urb->status); 274 xfer->result = urb->status; 275 goto out; 276 default: 277 - /* if (printk_ratelimit()) */ 278 - printk(KERN_ERR "xfer %p#%u: " 279 - "is_done bad state %d\n", 280 - xfer, cnt, seg->status); 281 xfer->result = -EINVAL; 282 - WARN_ON(1); 283 goto out; 284 } 285 } 286 xfer->result = 0; 287 out: 288 - d_fnend(3, NULL, "(xfer %p) = void\n", xfer); 289 return result; 290 } 291 ··· 405 struct urb *urb = xfer->urb; 406 struct wa_rpipe *rpipe = xfer->ep->hcpriv; 407 408 - d_fnstart(3, dev, "(xfer %p [rpipe %p] urb %p)\n", 409 - xfer, rpipe, urb); 410 switch (rpipe->descr.bmAttribute & 0x3) { 411 case USB_ENDPOINT_XFER_CONTROL: 412 *pxfer_type = WA_XFER_TYPE_CTL; ··· 451 if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL) 452 xfer->segs = 1; 453 error: 454 - d_fnend(3, dev, "(xfer %p [rpipe %p] urb %p) = %d\n", 455 - xfer, rpipe, urb, (int)result); 456 return result; 457 } 458 459 - /** Fill in the common request header and xfer-type specific data. */ 460 static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer, 461 struct wa_xfer_hdr *xfer_hdr0, 462 enum wa_xfer_type xfer_type, ··· 511 unsigned rpipe_ready = 0; 512 u8 done = 0; 513 514 - d_fnstart(3, NULL, "(urb %p [%d])\n", urb, urb->status); 515 switch (urb->status) { 516 case 0: 517 spin_lock_irqsave(&xfer->lock, flags); 518 wa = xfer->wa; 519 dev = &wa->usb_iface->dev; 520 - d_printf(2, dev, "xfer %p#%u: data out done (%d bytes)\n", 521 - xfer, seg->index, urb->actual_length); 522 if (seg->status < WA_SEG_PENDING) 523 seg->status = WA_SEG_PENDING; 524 seg->result = urb->actual_length; ··· 531 wa = xfer->wa; 532 dev = &wa->usb_iface->dev; 533 rpipe = xfer->ep->hcpriv; 534 - if (printk_ratelimit()) 535 - dev_err(dev, "xfer %p#%u: data out error %d\n", 536 - xfer, seg->index, urb->status); 537 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS, 538 EDC_ERROR_TIMEFRAME)){ 539 dev_err(dev, "DTO: URB max acceptable errors " ··· 553 if (rpipe_ready) 554 wa_xfer_delayed_run(rpipe); 555 } 556 - d_fnend(3, NULL, "(urb %p [%d]) = void\n", urb, urb->status); 557 } 558 559 /* ··· 584 unsigned rpipe_ready; 585 u8 done = 0; 586 587 - d_fnstart(3, NULL, "(urb %p [%d])\n", urb, urb->status); 588 switch (urb->status) { 589 case 0: 590 spin_lock_irqsave(&xfer->lock, flags); 591 wa = xfer->wa; 592 dev = &wa->usb_iface->dev; 593 - d_printf(2, dev, "xfer %p#%u: request done\n", 594 - xfer, seg->index); 595 if (xfer->is_inbound && seg->status < WA_SEG_PENDING) 596 seg->status = WA_SEG_PENDING; 597 spin_unlock_irqrestore(&xfer->lock, flags); ··· 624 if (rpipe_ready) 625 wa_xfer_delayed_run(rpipe); 626 } 627 - d_fnend(3, NULL, "(urb %p [%d]) = void\n", urb, urb->status); 628 } 629 630 /* ··· 721 size_t xfer_hdr_size, cnt, transfer_size; 722 struct wa_xfer_hdr *xfer_hdr0, *xfer_hdr; 723 724 - d_fnstart(3, dev, "(xfer %p [rpipe %p] urb %p)\n", 725 - xfer, xfer->ep->hcpriv, urb); 726 - 727 result = __wa_xfer_setup_sizes(xfer, &xfer_type); 728 if (result < 0) 729 goto error_setup_sizes; ··· 756 result = 0; 757 error_setup_segs: 758 error_setup_sizes: 759 - d_fnend(3, dev, "(xfer %p [rpipe %p] urb %p) = %d\n", 760 - xfer, xfer->ep->hcpriv, urb, result); 761 return result; 762 } 763 ··· 809 struct wa_xfer *xfer; 810 unsigned long flags; 811 812 - d_fnstart(1, dev, "(rpipe #%d) %d segments available\n", 813 - le16_to_cpu(rpipe->descr.wRPipeIndex), 814 - atomic_read(&rpipe->segs_available)); 815 spin_lock_irqsave(&rpipe->seg_lock, flags); 816 while (atomic_read(&rpipe->segs_available) > 0 817 && !list_empty(&rpipe->seg_list)) { ··· 817 list_del(&seg->list_node); 818 xfer = seg->xfer; 819 result = __wa_seg_submit(rpipe, xfer, seg); 820 - d_printf(1, dev, "xfer %p#%u submitted from delayed " 821 - "[%d segments available] %d\n", 822 - xfer, seg->index, 823 - atomic_read(&rpipe->segs_available), result); 824 if (unlikely(result < 0)) { 825 spin_unlock_irqrestore(&rpipe->seg_lock, flags); 826 spin_lock_irqsave(&xfer->lock, flags); ··· 829 } 830 } 831 spin_unlock_irqrestore(&rpipe->seg_lock, flags); 832 - d_fnend(1, dev, "(rpipe #%d) = void, %d segments available\n", 833 - le16_to_cpu(rpipe->descr.wRPipeIndex), 834 - atomic_read(&rpipe->segs_available)); 835 - 836 } 837 838 /* ··· 851 u8 available; 852 u8 empty; 853 854 - d_fnstart(3, dev, "(xfer %p [rpipe %p])\n", 855 - xfer, xfer->ep->hcpriv); 856 - 857 spin_lock_irqsave(&wa->xfer_list_lock, flags); 858 list_add_tail(&xfer->list_node, &wa->xfer_list); 859 spin_unlock_irqrestore(&wa->xfer_list_lock, flags); ··· 862 available = atomic_read(&rpipe->segs_available); 863 empty = list_empty(&rpipe->seg_list); 864 seg = xfer->seg[cnt]; 865 - d_printf(2, dev, "xfer %p#%u: available %u empty %u (%s)\n", 866 - xfer, cnt, available, empty, 867 - available == 0 || !empty ? "delayed" : "submitted"); 868 if (available == 0 || !empty) { 869 - d_printf(1, dev, "xfer %p#%u: delayed\n", xfer, cnt); 870 seg->status = WA_SEG_DELAYED; 871 list_add_tail(&seg->list_node, &rpipe->seg_list); 872 } else { 873 result = __wa_seg_submit(rpipe, xfer, seg); 874 - if (result < 0) 875 goto error_seg_submit; 876 } 877 xfer->segs_submitted++; 878 } 879 - spin_unlock_irqrestore(&rpipe->seg_lock, flags); 880 - d_fnend(3, dev, "(xfer %p [rpipe %p]) = void\n", xfer, 881 - xfer->ep->hcpriv); 882 - return result; 883 - 884 error_seg_submit: 885 - __wa_xfer_abort(xfer); 886 spin_unlock_irqrestore(&rpipe->seg_lock, flags); 887 - d_fnend(3, dev, "(xfer %p [rpipe %p]) = void\n", xfer, 888 - xfer->ep->hcpriv); 889 return result; 890 } 891 ··· 912 struct urb *urb = xfer->urb; 913 struct wahc *wa = xfer->wa; 914 struct wusbhc *wusbhc = wa->wusb; 915 - struct device *dev = &wa->usb_iface->dev; 916 struct wusb_dev *wusb_dev; 917 unsigned done; 918 919 - d_fnstart(3, dev, "(wa %p urb %p)\n", wa, urb); 920 result = rpipe_get_by_ep(wa, xfer->ep, urb, xfer->gfp); 921 if (result < 0) 922 goto error_rpipe_get; ··· 943 if (result < 0) 944 goto error_xfer_submit; 945 spin_unlock_irqrestore(&xfer->lock, flags); 946 - d_fnend(3, dev, "(wa %p urb %p) = void\n", wa, urb); 947 return; 948 949 /* this is basically wa_xfer_completion() broken up wa_xfer_giveback() ··· 960 error_rpipe_get: 961 xfer->result = result; 962 wa_xfer_giveback(xfer); 963 - d_fnend(3, dev, "(wa %p urb %p) = (void) %d\n", wa, urb, result); 964 return; 965 966 error_xfer_submit: ··· 968 spin_unlock_irqrestore(&xfer->lock, flags); 969 if (done) 970 wa_xfer_completion(xfer); 971 - d_fnend(3, dev, "(wa %p urb %p) = (void) %d\n", wa, urb, result); 972 - return; 973 } 974 975 /* ··· 983 void wa_urb_enqueue_run(struct work_struct *ws) 984 { 985 struct wahc *wa = container_of(ws, struct wahc, xfer_work); 986 - struct device *dev = &wa->usb_iface->dev; 987 struct wa_xfer *xfer, *next; 988 struct urb *urb; 989 990 - d_fnstart(3, dev, "(wa %p)\n", wa); 991 spin_lock_irq(&wa->xfer_list_lock); 992 list_for_each_entry_safe(xfer, next, &wa->xfer_delayed_list, 993 list_node) { ··· 999 spin_lock_irq(&wa->xfer_list_lock); 1000 } 1001 spin_unlock_irq(&wa->xfer_list_lock); 1002 - d_fnend(3, dev, "(wa %p) = void\n", wa); 1003 } 1004 EXPORT_SYMBOL_GPL(wa_urb_enqueue_run); 1005 ··· 1023 unsigned long my_flags; 1024 unsigned cant_sleep = irqs_disabled() | in_atomic(); 1025 1026 - d_fnstart(3, dev, "(wa %p ep %p urb %p [%d] gfp 0x%x)\n", 1027 - wa, ep, urb, urb->transfer_buffer_length, gfp); 1028 - 1029 if (urb->transfer_buffer == NULL 1030 && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP) 1031 && urb->transfer_buffer_length != 0) { ··· 1044 xfer->gfp = gfp; 1045 xfer->ep = ep; 1046 urb->hcpriv = xfer; 1047 - d_printf(2, dev, "xfer %p urb %p pipe 0x%02x [%d bytes] %s %s %s\n", 1048 - xfer, urb, urb->pipe, urb->transfer_buffer_length, 1049 - urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? "dma" : "nodma", 1050 - urb->pipe & USB_DIR_IN ? "inbound" : "outbound", 1051 - cant_sleep ? "deferred" : "inline"); 1052 if (cant_sleep) { 1053 usb_get_urb(urb); 1054 spin_lock_irqsave(&wa->xfer_list_lock, my_flags); ··· 1060 } else { 1061 wa_urb_enqueue_b(xfer); 1062 } 1063 - d_fnend(3, dev, "(wa %p ep %p urb %p [%d] gfp 0x%x) = 0\n", 1064 - wa, ep, urb, urb->transfer_buffer_length, gfp); 1065 return 0; 1066 1067 error_dequeued: 1068 kfree(xfer); 1069 error_kmalloc: 1070 - d_fnend(3, dev, "(wa %p ep %p urb %p [%d] gfp 0x%x) = %d\n", 1071 - wa, ep, urb, urb->transfer_buffer_length, gfp, result); 1072 return result; 1073 } 1074 EXPORT_SYMBOL_GPL(wa_urb_enqueue); ··· 1089 */ 1090 int wa_urb_dequeue(struct wahc *wa, struct urb *urb) 1091 { 1092 - struct device *dev = &wa->usb_iface->dev; 1093 unsigned long flags, flags2; 1094 struct wa_xfer *xfer; 1095 struct wa_seg *seg; ··· 1096 unsigned cnt; 1097 unsigned rpipe_ready = 0; 1098 1099 - d_fnstart(3, dev, "(wa %p, urb %p)\n", wa, urb); 1100 - 1101 - d_printf(1, dev, "xfer %p urb %p: aborting\n", urb->hcpriv, urb); 1102 xfer = urb->hcpriv; 1103 if (xfer == NULL) { 1104 /* NOthing setup yet enqueue will see urb->status != ··· 1164 wa_xfer_completion(xfer); 1165 if (rpipe_ready) 1166 wa_xfer_delayed_run(rpipe); 1167 - d_fnend(3, dev, "(wa %p, urb %p) = 0\n", wa, urb); 1168 return 0; 1169 1170 out_unlock: 1171 spin_unlock_irqrestore(&xfer->lock, flags); 1172 out: 1173 - d_fnend(3, dev, "(wa %p, urb %p) = 0\n", wa, urb); 1174 return 0; 1175 1176 dequeue_delayed: ··· 1178 spin_unlock_irqrestore(&xfer->lock, flags); 1179 wa_xfer_giveback(xfer); 1180 usb_put_urb(urb); /* we got a ref in enqueue() */ 1181 - d_fnend(3, dev, "(wa %p, urb %p) = 0\n", wa, urb); 1182 return 0; 1183 } 1184 EXPORT_SYMBOL_GPL(wa_urb_dequeue); ··· 1253 u8 usb_status; 1254 unsigned rpipe_ready = 0; 1255 1256 - d_fnstart(3, dev, "(wa %p xfer %p)\n", wa, xfer); 1257 spin_lock_irqsave(&xfer->lock, flags); 1258 seg_idx = xfer_result->bTransferSegment & 0x7f; 1259 if (unlikely(seg_idx >= xfer->segs)) ··· 1260 seg = xfer->seg[seg_idx]; 1261 rpipe = xfer->ep->hcpriv; 1262 usb_status = xfer_result->bTransferStatus; 1263 - d_printf(2, dev, "xfer %p#%u: bTransferStatus 0x%02x (seg %u)\n", 1264 - xfer, seg_idx, usb_status, seg->status); 1265 if (seg->status == WA_SEG_ABORTED 1266 || seg->status == WA_SEG_ERROR) /* already handled */ 1267 goto segment_aborted; ··· 1317 wa_xfer_completion(xfer); 1318 if (rpipe_ready) 1319 wa_xfer_delayed_run(rpipe); 1320 - d_fnend(3, dev, "(wa %p xfer %p) = void\n", wa, xfer); 1321 return; 1322 - 1323 1324 error_submit_buf_in: 1325 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) { ··· 1340 wa_xfer_completion(xfer); 1341 if (rpipe_ready) 1342 wa_xfer_delayed_run(rpipe); 1343 - d_fnend(3, dev, "(wa %p xfer %p) = void [segment/DTI-submit error]\n", 1344 - wa, xfer); 1345 return; 1346 - 1347 1348 error_bad_seg: 1349 spin_unlock_irqrestore(&xfer->lock, flags); ··· 1352 "exceeded, resetting device\n"); 1353 wa_reset_all(wa); 1354 } 1355 - d_fnend(3, dev, "(wa %p xfer %p) = void [bad seg]\n", wa, xfer); 1356 return; 1357 - 1358 1359 segment_aborted: 1360 /* nothing to do, as the aborter did the completion */ 1361 spin_unlock_irqrestore(&xfer->lock, flags); 1362 - d_fnend(3, dev, "(wa %p xfer %p) = void [segment aborted]\n", 1363 - wa, xfer); 1364 - return; 1365 - 1366 } 1367 1368 /* ··· 1380 unsigned long flags; 1381 u8 done = 0; 1382 1383 - d_fnstart(3, NULL, "(urb %p [%d])\n", urb, urb->status); 1384 switch (urb->status) { 1385 case 0: 1386 spin_lock_irqsave(&xfer->lock, flags); 1387 wa = xfer->wa; 1388 dev = &wa->usb_iface->dev; 1389 rpipe = xfer->ep->hcpriv; 1390 - d_printf(2, dev, "xfer %p#%u: data in done (%zu bytes)\n", 1391 - xfer, seg->index, (size_t)urb->actual_length); 1392 seg->status = WA_SEG_DONE; 1393 seg->result = urb->actual_length; 1394 xfer->segs_done++; ··· 1428 if (rpipe_ready) 1429 wa_xfer_delayed_run(rpipe); 1430 } 1431 - d_fnend(3, NULL, "(urb %p [%d]) = void\n", urb, urb->status); 1432 } 1433 1434 /* ··· 1466 struct wa_xfer *xfer; 1467 u8 usb_status; 1468 1469 - d_fnstart(3, dev, "(%p)\n", wa); 1470 BUG_ON(wa->dti_urb != urb); 1471 switch (wa->dti_urb->status) { 1472 case 0: 1473 /* We have a xfer result buffer; check it */ 1474 - d_printf(2, dev, "DTI: xfer result %d bytes at %p\n", 1475 - urb->actual_length, urb->transfer_buffer); 1476 - d_dump(3, dev, urb->transfer_buffer, urb->actual_length); 1477 if (wa->dti_urb->actual_length != sizeof(*xfer_result)) { 1478 dev_err(dev, "DTI Error: xfer result--bad size " 1479 "xfer result (%d bytes vs %zu needed)\n", ··· 1533 wa_reset_all(wa); 1534 } 1535 out: 1536 - d_fnend(3, dev, "(%p) = void\n", wa); 1537 return; 1538 } 1539 ··· 1563 struct wa_notif_xfer *notif_xfer; 1564 const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd; 1565 1566 - d_fnstart(4, dev, "(%p, %p)\n", wa, notif_hdr); 1567 notif_xfer = container_of(notif_hdr, struct wa_notif_xfer, hdr); 1568 BUG_ON(notif_hdr->bNotifyType != WA_NOTIF_TRANSFER); 1569 ··· 1602 goto error_dti_urb_submit; 1603 } 1604 out: 1605 - d_fnend(4, dev, "(%p, %p) = void\n", wa, notif_hdr); 1606 return; 1607 1608 error_dti_urb_submit: ··· 1612 error_dti_urb_alloc: 1613 error: 1614 wa_reset_all(wa); 1615 - d_fnend(4, dev, "(%p, %p) = void\n", wa, notif_hdr); 1616 - return; 1617 }
··· 82 #include <linux/init.h> 83 #include <linux/spinlock.h> 84 #include <linux/hash.h> 85 + 86 #include "wa-hc.h" 87 #include "wusbhc.h" 88 89 enum { 90 WA_SEGS_MAX = 255, ··· 180 } 181 } 182 kfree(xfer); 183 } 184 185 static void wa_xfer_get(struct wa_xfer *xfer) ··· 190 191 static void wa_xfer_put(struct wa_xfer *xfer) 192 { 193 kref_put(&xfer->refcnt, wa_xfer_destroy); 194 } 195 196 /* ··· 209 static void wa_xfer_giveback(struct wa_xfer *xfer) 210 { 211 unsigned long flags; 212 + 213 spin_lock_irqsave(&xfer->wa->xfer_list_lock, flags); 214 list_del_init(&xfer->list_node); 215 spin_unlock_irqrestore(&xfer->wa->xfer_list_lock, flags); ··· 217 wusbhc_giveback_urb(xfer->wa->wusb, xfer->urb, xfer->result); 218 wa_put(xfer->wa); 219 wa_xfer_put(xfer); 220 } 221 222 /* ··· 227 */ 228 static void wa_xfer_completion(struct wa_xfer *xfer) 229 { 230 if (xfer->wusb_dev) 231 wusb_dev_put(xfer->wusb_dev); 232 rpipe_put(xfer->ep->hcpriv); 233 wa_xfer_giveback(xfer); 234 } 235 236 /* ··· 243 */ 244 static unsigned __wa_xfer_is_done(struct wa_xfer *xfer) 245 { 246 + struct device *dev = &xfer->wa->usb_iface->dev; 247 unsigned result, cnt; 248 struct wa_seg *seg; 249 struct urb *urb = xfer->urb; 250 unsigned found_short = 0; 251 252 result = xfer->segs_done == xfer->segs_submitted; 253 if (result == 0) 254 goto out; ··· 258 switch (seg->status) { 259 case WA_SEG_DONE: 260 if (found_short && seg->result > 0) { 261 + dev_dbg(dev, "xfer %p#%u: bad short segments (%zu)\n", 262 + xfer, cnt, seg->result); 263 urb->status = -EINVAL; 264 goto out; 265 } ··· 269 if (seg->result < xfer->seg_size 270 && cnt != xfer->segs-1) 271 found_short = 1; 272 + dev_dbg(dev, "xfer %p#%u: DONE short %d " 273 + "result %zu urb->actual_length %d\n", 274 + xfer, seg->index, found_short, seg->result, 275 + urb->actual_length); 276 break; 277 case WA_SEG_ERROR: 278 xfer->result = seg->result; 279 + dev_dbg(dev, "xfer %p#%u: ERROR result %zu\n", 280 + xfer, seg->index, seg->result); 281 goto out; 282 case WA_SEG_ABORTED: 283 + dev_dbg(dev, "xfer %p#%u ABORTED: result %d\n", 284 + xfer, seg->index, urb->status); 285 xfer->result = urb->status; 286 goto out; 287 default: 288 + dev_warn(dev, "xfer %p#%u: is_done bad state %d\n", 289 + xfer, cnt, seg->status); 290 xfer->result = -EINVAL; 291 goto out; 292 } 293 } 294 xfer->result = 0; 295 out: 296 return result; 297 } 298 ··· 424 struct urb *urb = xfer->urb; 425 struct wa_rpipe *rpipe = xfer->ep->hcpriv; 426 427 switch (rpipe->descr.bmAttribute & 0x3) { 428 case USB_ENDPOINT_XFER_CONTROL: 429 *pxfer_type = WA_XFER_TYPE_CTL; ··· 472 if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL) 473 xfer->segs = 1; 474 error: 475 return result; 476 } 477 478 + /* Fill in the common request header and xfer-type specific data. */ 479 static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer, 480 struct wa_xfer_hdr *xfer_hdr0, 481 enum wa_xfer_type xfer_type, ··· 534 unsigned rpipe_ready = 0; 535 u8 done = 0; 536 537 switch (urb->status) { 538 case 0: 539 spin_lock_irqsave(&xfer->lock, flags); 540 wa = xfer->wa; 541 dev = &wa->usb_iface->dev; 542 + dev_dbg(dev, "xfer %p#%u: data out done (%d bytes)\n", 543 + xfer, seg->index, urb->actual_length); 544 if (seg->status < WA_SEG_PENDING) 545 seg->status = WA_SEG_PENDING; 546 seg->result = urb->actual_length; ··· 555 wa = xfer->wa; 556 dev = &wa->usb_iface->dev; 557 rpipe = xfer->ep->hcpriv; 558 + dev_dbg(dev, "xfer %p#%u: data out error %d\n", 559 + xfer, seg->index, urb->status); 560 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS, 561 EDC_ERROR_TIMEFRAME)){ 562 dev_err(dev, "DTO: URB max acceptable errors " ··· 578 if (rpipe_ready) 579 wa_xfer_delayed_run(rpipe); 580 } 581 } 582 583 /* ··· 610 unsigned rpipe_ready; 611 u8 done = 0; 612 613 switch (urb->status) { 614 case 0: 615 spin_lock_irqsave(&xfer->lock, flags); 616 wa = xfer->wa; 617 dev = &wa->usb_iface->dev; 618 + dev_dbg(dev, "xfer %p#%u: request done\n", xfer, seg->index); 619 if (xfer->is_inbound && seg->status < WA_SEG_PENDING) 620 seg->status = WA_SEG_PENDING; 621 spin_unlock_irqrestore(&xfer->lock, flags); ··· 652 if (rpipe_ready) 653 wa_xfer_delayed_run(rpipe); 654 } 655 } 656 657 /* ··· 750 size_t xfer_hdr_size, cnt, transfer_size; 751 struct wa_xfer_hdr *xfer_hdr0, *xfer_hdr; 752 753 result = __wa_xfer_setup_sizes(xfer, &xfer_type); 754 if (result < 0) 755 goto error_setup_sizes; ··· 788 result = 0; 789 error_setup_segs: 790 error_setup_sizes: 791 return result; 792 } 793 ··· 843 struct wa_xfer *xfer; 844 unsigned long flags; 845 846 spin_lock_irqsave(&rpipe->seg_lock, flags); 847 while (atomic_read(&rpipe->segs_available) > 0 848 && !list_empty(&rpipe->seg_list)) { ··· 854 list_del(&seg->list_node); 855 xfer = seg->xfer; 856 result = __wa_seg_submit(rpipe, xfer, seg); 857 + dev_dbg(dev, "xfer %p#%u submitted from delayed [%d segments available] %d\n", 858 + xfer, seg->index, atomic_read(&rpipe->segs_available), result); 859 if (unlikely(result < 0)) { 860 spin_unlock_irqrestore(&rpipe->seg_lock, flags); 861 spin_lock_irqsave(&xfer->lock, flags); ··· 868 } 869 } 870 spin_unlock_irqrestore(&rpipe->seg_lock, flags); 871 } 872 873 /* ··· 894 u8 available; 895 u8 empty; 896 897 spin_lock_irqsave(&wa->xfer_list_lock, flags); 898 list_add_tail(&xfer->list_node, &wa->xfer_list); 899 spin_unlock_irqrestore(&wa->xfer_list_lock, flags); ··· 908 available = atomic_read(&rpipe->segs_available); 909 empty = list_empty(&rpipe->seg_list); 910 seg = xfer->seg[cnt]; 911 + dev_dbg(dev, "xfer %p#%u: available %u empty %u (%s)\n", 912 + xfer, cnt, available, empty, 913 + available == 0 || !empty ? "delayed" : "submitted"); 914 if (available == 0 || !empty) { 915 + dev_dbg(dev, "xfer %p#%u: delayed\n", xfer, cnt); 916 seg->status = WA_SEG_DELAYED; 917 list_add_tail(&seg->list_node, &rpipe->seg_list); 918 } else { 919 result = __wa_seg_submit(rpipe, xfer, seg); 920 + if (result < 0) { 921 + __wa_xfer_abort(xfer); 922 goto error_seg_submit; 923 + } 924 } 925 xfer->segs_submitted++; 926 } 927 error_seg_submit: 928 spin_unlock_irqrestore(&rpipe->seg_lock, flags); 929 return result; 930 } 931 ··· 964 struct urb *urb = xfer->urb; 965 struct wahc *wa = xfer->wa; 966 struct wusbhc *wusbhc = wa->wusb; 967 struct wusb_dev *wusb_dev; 968 unsigned done; 969 970 result = rpipe_get_by_ep(wa, xfer->ep, urb, xfer->gfp); 971 if (result < 0) 972 goto error_rpipe_get; ··· 997 if (result < 0) 998 goto error_xfer_submit; 999 spin_unlock_irqrestore(&xfer->lock, flags); 1000 return; 1001 1002 /* this is basically wa_xfer_completion() broken up wa_xfer_giveback() ··· 1015 error_rpipe_get: 1016 xfer->result = result; 1017 wa_xfer_giveback(xfer); 1018 return; 1019 1020 error_xfer_submit: ··· 1024 spin_unlock_irqrestore(&xfer->lock, flags); 1025 if (done) 1026 wa_xfer_completion(xfer); 1027 } 1028 1029 /* ··· 1041 void wa_urb_enqueue_run(struct work_struct *ws) 1042 { 1043 struct wahc *wa = container_of(ws, struct wahc, xfer_work); 1044 struct wa_xfer *xfer, *next; 1045 struct urb *urb; 1046 1047 spin_lock_irq(&wa->xfer_list_lock); 1048 list_for_each_entry_safe(xfer, next, &wa->xfer_delayed_list, 1049 list_node) { ··· 1059 spin_lock_irq(&wa->xfer_list_lock); 1060 } 1061 spin_unlock_irq(&wa->xfer_list_lock); 1062 } 1063 EXPORT_SYMBOL_GPL(wa_urb_enqueue_run); 1064 ··· 1084 unsigned long my_flags; 1085 unsigned cant_sleep = irqs_disabled() | in_atomic(); 1086 1087 if (urb->transfer_buffer == NULL 1088 && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP) 1089 && urb->transfer_buffer_length != 0) { ··· 1108 xfer->gfp = gfp; 1109 xfer->ep = ep; 1110 urb->hcpriv = xfer; 1111 + 1112 + dev_dbg(dev, "xfer %p urb %p pipe 0x%02x [%d bytes] %s %s %s\n", 1113 + xfer, urb, urb->pipe, urb->transfer_buffer_length, 1114 + urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? "dma" : "nodma", 1115 + urb->pipe & USB_DIR_IN ? "inbound" : "outbound", 1116 + cant_sleep ? "deferred" : "inline"); 1117 + 1118 if (cant_sleep) { 1119 usb_get_urb(urb); 1120 spin_lock_irqsave(&wa->xfer_list_lock, my_flags); ··· 1122 } else { 1123 wa_urb_enqueue_b(xfer); 1124 } 1125 return 0; 1126 1127 error_dequeued: 1128 kfree(xfer); 1129 error_kmalloc: 1130 return result; 1131 } 1132 EXPORT_SYMBOL_GPL(wa_urb_enqueue); ··· 1155 */ 1156 int wa_urb_dequeue(struct wahc *wa, struct urb *urb) 1157 { 1158 unsigned long flags, flags2; 1159 struct wa_xfer *xfer; 1160 struct wa_seg *seg; ··· 1163 unsigned cnt; 1164 unsigned rpipe_ready = 0; 1165 1166 xfer = urb->hcpriv; 1167 if (xfer == NULL) { 1168 /* NOthing setup yet enqueue will see urb->status != ··· 1234 wa_xfer_completion(xfer); 1235 if (rpipe_ready) 1236 wa_xfer_delayed_run(rpipe); 1237 return 0; 1238 1239 out_unlock: 1240 spin_unlock_irqrestore(&xfer->lock, flags); 1241 out: 1242 return 0; 1243 1244 dequeue_delayed: ··· 1250 spin_unlock_irqrestore(&xfer->lock, flags); 1251 wa_xfer_giveback(xfer); 1252 usb_put_urb(urb); /* we got a ref in enqueue() */ 1253 return 0; 1254 } 1255 EXPORT_SYMBOL_GPL(wa_urb_dequeue); ··· 1326 u8 usb_status; 1327 unsigned rpipe_ready = 0; 1328 1329 spin_lock_irqsave(&xfer->lock, flags); 1330 seg_idx = xfer_result->bTransferSegment & 0x7f; 1331 if (unlikely(seg_idx >= xfer->segs)) ··· 1334 seg = xfer->seg[seg_idx]; 1335 rpipe = xfer->ep->hcpriv; 1336 usb_status = xfer_result->bTransferStatus; 1337 + dev_dbg(dev, "xfer %p#%u: bTransferStatus 0x%02x (seg %u)\n", 1338 + xfer, seg_idx, usb_status, seg->status); 1339 if (seg->status == WA_SEG_ABORTED 1340 || seg->status == WA_SEG_ERROR) /* already handled */ 1341 goto segment_aborted; ··· 1391 wa_xfer_completion(xfer); 1392 if (rpipe_ready) 1393 wa_xfer_delayed_run(rpipe); 1394 return; 1395 1396 error_submit_buf_in: 1397 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) { ··· 1416 wa_xfer_completion(xfer); 1417 if (rpipe_ready) 1418 wa_xfer_delayed_run(rpipe); 1419 return; 1420 1421 error_bad_seg: 1422 spin_unlock_irqrestore(&xfer->lock, flags); ··· 1431 "exceeded, resetting device\n"); 1432 wa_reset_all(wa); 1433 } 1434 return; 1435 1436 segment_aborted: 1437 /* nothing to do, as the aborter did the completion */ 1438 spin_unlock_irqrestore(&xfer->lock, flags); 1439 } 1440 1441 /* ··· 1465 unsigned long flags; 1466 u8 done = 0; 1467 1468 switch (urb->status) { 1469 case 0: 1470 spin_lock_irqsave(&xfer->lock, flags); 1471 wa = xfer->wa; 1472 dev = &wa->usb_iface->dev; 1473 rpipe = xfer->ep->hcpriv; 1474 + dev_dbg(dev, "xfer %p#%u: data in done (%zu bytes)\n", 1475 + xfer, seg->index, (size_t)urb->actual_length); 1476 seg->status = WA_SEG_DONE; 1477 seg->result = urb->actual_length; 1478 xfer->segs_done++; ··· 1514 if (rpipe_ready) 1515 wa_xfer_delayed_run(rpipe); 1516 } 1517 } 1518 1519 /* ··· 1553 struct wa_xfer *xfer; 1554 u8 usb_status; 1555 1556 BUG_ON(wa->dti_urb != urb); 1557 switch (wa->dti_urb->status) { 1558 case 0: 1559 /* We have a xfer result buffer; check it */ 1560 + dev_dbg(dev, "DTI: xfer result %d bytes at %p\n", 1561 + urb->actual_length, urb->transfer_buffer); 1562 if (wa->dti_urb->actual_length != sizeof(*xfer_result)) { 1563 dev_err(dev, "DTI Error: xfer result--bad size " 1564 "xfer result (%d bytes vs %zu needed)\n", ··· 1622 wa_reset_all(wa); 1623 } 1624 out: 1625 return; 1626 } 1627 ··· 1653 struct wa_notif_xfer *notif_xfer; 1654 const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd; 1655 1656 notif_xfer = container_of(notif_hdr, struct wa_notif_xfer, hdr); 1657 BUG_ON(notif_hdr->bNotifyType != WA_NOTIF_TRANSFER); 1658 ··· 1693 goto error_dti_urb_submit; 1694 } 1695 out: 1696 return; 1697 1698 error_dti_urb_submit: ··· 1704 error_dti_urb_alloc: 1705 error: 1706 wa_reset_all(wa); 1707 }
+19 -17
drivers/usb/wusbcore/wusbhc.h
··· 64 #include <linux/uwb.h> 65 #include <linux/usb/wusb.h> 66 67 68 /** 69 * Wireless USB device ··· 154 u16 status; 155 u16 change; 156 struct wusb_dev *wusb_dev; /* connected device's info */ 157 - unsigned reset_count; 158 u32 ptk_tkid; 159 }; 160 ··· 204 * @mmcies_max Max number of Information Elements this HC can send 205 * in its MMC. Read-only. 206 * 207 * @mmcie_add HC specific operation (WHCI or HWA) for adding an 208 * MMCIE. 209 * 210 * @mmcie_rm HC specific operation (WHCI or HWA) for removing an 211 * MMCIE. 212 - * 213 - * @enc_types Array which describes the encryptions methods 214 - * supported by the host as described in WUSB1.0 -- 215 - * one entry per supported method. As of WUSB1.0 there 216 - * is only four methods, we make space for eight just in 217 - * case they decide to add some more (and pray they do 218 - * it in sequential order). if 'enc_types[enc_method] 219 - * != 0', then it is supported by the host. enc_method 220 - * is USB_ENC_TYPE*. 221 * 222 * @set_ptk: Set the PTK and enable encryption for a device. Or, if 223 * the supplied key is NULL, disable encryption for that ··· 252 struct uwb_pal pal; 253 254 unsigned trust_timeout; /* in jiffies */ 255 - struct wuie_host_info *wuie_host_info; /* Includes CHID */ 256 257 struct mutex mutex; /* locks everything else */ 258 u16 cluster_id; /* Wireless USB Cluster ID */ ··· 273 u8 mmcies_max; 274 /* FIXME: make wusbhc_ops? */ 275 int (*start)(struct wusbhc *wusbhc); 276 - void (*stop)(struct wusbhc *wusbhc); 277 int (*mmcie_add)(struct wusbhc *wusbhc, u8 interval, u8 repeat_cnt, 278 u8 handle, struct wuie_hdr *wuie); 279 int (*mmcie_rm)(struct wusbhc *wusbhc, u8 handle); ··· 377 usb_put_hcd(&wusbhc->usb_hcd); 378 } 379 380 - int wusbhc_start(struct wusbhc *wusbhc, const struct wusb_ckhdid *chid); 381 void wusbhc_stop(struct wusbhc *wusbhc); 382 extern int wusbhc_chid_set(struct wusbhc *, const struct wusb_ckhdid *); 383 384 /* Device connect handling */ 385 extern int wusbhc_devconnect_create(struct wusbhc *); 386 extern void wusbhc_devconnect_destroy(struct wusbhc *); 387 - extern int wusbhc_devconnect_start(struct wusbhc *wusbhc, 388 - const struct wusb_ckhdid *chid); 389 extern void wusbhc_devconnect_stop(struct wusbhc *wusbhc); 390 - extern int wusbhc_devconnect_auth(struct wusbhc *, u8); 391 extern void wusbhc_handle_dn(struct wusbhc *, u8 srcaddr, 392 struct wusb_dn_hdr *dn_hdr, size_t size); 393 - extern int wusbhc_dev_reset(struct wusbhc *wusbhc, u8 port); 394 extern void __wusbhc_dev_disable(struct wusbhc *wusbhc, u8 port); 395 extern int wusb_usb_ncb(struct notifier_block *nb, unsigned long val, 396 void *priv); ··· 433 extern int wusb_dev_4way_handshake(struct wusbhc *, struct wusb_dev *, 434 struct wusb_ckhdid *ck); 435 void wusbhc_gtk_rekey(struct wusbhc *wusbhc); 436 437 438 /* WUSB Cluster ID handling */
··· 64 #include <linux/uwb.h> 65 #include <linux/usb/wusb.h> 66 67 + /* 68 + * Time from a WUSB channel stop request to the last transmitted MMC. 69 + * 70 + * This needs to be > 4.096 ms in case no MMCs can be transmitted in 71 + * zone 0. 72 + */ 73 + #define WUSB_CHANNEL_STOP_DELAY_MS 8 74 75 /** 76 * Wireless USB device ··· 147 u16 status; 148 u16 change; 149 struct wusb_dev *wusb_dev; /* connected device's info */ 150 u32 ptk_tkid; 151 }; 152 ··· 198 * @mmcies_max Max number of Information Elements this HC can send 199 * in its MMC. Read-only. 200 * 201 + * @start Start the WUSB channel. 202 + * 203 + * @stop Stop the WUSB channel after the specified number of 204 + * milliseconds. Channel Stop IEs should be transmitted 205 + * as required by [WUSB] 4.16.2.1. 206 + * 207 * @mmcie_add HC specific operation (WHCI or HWA) for adding an 208 * MMCIE. 209 * 210 * @mmcie_rm HC specific operation (WHCI or HWA) for removing an 211 * MMCIE. 212 * 213 * @set_ptk: Set the PTK and enable encryption for a device. Or, if 214 * the supplied key is NULL, disable encryption for that ··· 249 struct uwb_pal pal; 250 251 unsigned trust_timeout; /* in jiffies */ 252 + struct wusb_ckhdid chid; 253 + struct wuie_host_info *wuie_host_info; 254 255 struct mutex mutex; /* locks everything else */ 256 u16 cluster_id; /* Wireless USB Cluster ID */ ··· 269 u8 mmcies_max; 270 /* FIXME: make wusbhc_ops? */ 271 int (*start)(struct wusbhc *wusbhc); 272 + void (*stop)(struct wusbhc *wusbhc, int delay); 273 int (*mmcie_add)(struct wusbhc *wusbhc, u8 interval, u8 repeat_cnt, 274 u8 handle, struct wuie_hdr *wuie); 275 int (*mmcie_rm)(struct wusbhc *wusbhc, u8 handle); ··· 373 usb_put_hcd(&wusbhc->usb_hcd); 374 } 375 376 + int wusbhc_start(struct wusbhc *wusbhc); 377 void wusbhc_stop(struct wusbhc *wusbhc); 378 extern int wusbhc_chid_set(struct wusbhc *, const struct wusb_ckhdid *); 379 380 /* Device connect handling */ 381 extern int wusbhc_devconnect_create(struct wusbhc *); 382 extern void wusbhc_devconnect_destroy(struct wusbhc *); 383 + extern int wusbhc_devconnect_start(struct wusbhc *wusbhc); 384 extern void wusbhc_devconnect_stop(struct wusbhc *wusbhc); 385 extern void wusbhc_handle_dn(struct wusbhc *, u8 srcaddr, 386 struct wusb_dn_hdr *dn_hdr, size_t size); 387 extern void __wusbhc_dev_disable(struct wusbhc *wusbhc, u8 port); 388 extern int wusb_usb_ncb(struct notifier_block *nb, unsigned long val, 389 void *priv); ··· 432 extern int wusb_dev_4way_handshake(struct wusbhc *, struct wusb_dev *, 433 struct wusb_ckhdid *ck); 434 void wusbhc_gtk_rekey(struct wusbhc *wusbhc); 435 + int wusb_dev_update_address(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev); 436 437 438 /* WUSB Cluster ID handling */
+3
drivers/uwb/Makefile
··· 6 7 uwb-objs := \ 8 address.o \ 9 beacon.o \ 10 driver.o \ 11 drp.o \ ··· 14 drp-ie.o \ 15 est.o \ 16 ie.o \ 17 lc-dev.o \ 18 lc-rc.o \ 19 neh.o \ 20 pal.o \ 21 reset.o \ 22 rsv.o \ 23 scan.o \
··· 6 7 uwb-objs := \ 8 address.o \ 9 + allocator.o \ 10 beacon.o \ 11 driver.o \ 12 drp.o \ ··· 13 drp-ie.o \ 14 est.o \ 15 ie.o \ 16 + ie-rcv.o \ 17 lc-dev.o \ 18 lc-rc.o \ 19 neh.o \ 20 pal.o \ 21 + radio.o \ 22 reset.o \ 23 rsv.o \ 24 scan.o \
+1 -1
drivers/uwb/address.c
··· 28 #include <linux/device.h> 29 #include <linux/random.h> 30 #include <linux/etherdevice.h> 31 - #include <linux/uwb/debug.h> 32 #include "uwb-internal.h" 33 34
··· 28 #include <linux/device.h> 29 #include <linux/random.h> 30 #include <linux/etherdevice.h> 31 + 32 #include "uwb-internal.h" 33 34
+386
drivers/uwb/allocator.c
···
··· 1 + /* 2 + * UWB reservation management. 3 + * 4 + * Copyright (C) 2008 Cambridge Silicon Radio Ltd. 5 + * 6 + * This program is free software; you can redistribute it and/or 7 + * modify it under the terms of the GNU General Public License version 8 + * 2 as published by the Free Software Foundation. 9 + * 10 + * This program is distributed in the hope that it will be useful, 11 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 + * GNU General Public License for more details. 14 + * 15 + * You should have received a copy of the GNU General Public License 16 + * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 + */ 18 + #include <linux/version.h> 19 + #include <linux/kernel.h> 20 + #include <linux/uwb.h> 21 + 22 + #include "uwb-internal.h" 23 + 24 + static void uwb_rsv_fill_column_alloc(struct uwb_rsv_alloc_info *ai) 25 + { 26 + int col, mas, safe_mas, unsafe_mas; 27 + unsigned char *bm = ai->bm; 28 + struct uwb_rsv_col_info *ci = ai->ci; 29 + unsigned char c; 30 + 31 + for (col = ci->csi.start_col; col < UWB_NUM_ZONES; col += ci->csi.interval) { 32 + 33 + safe_mas = ci->csi.safe_mas_per_col; 34 + unsafe_mas = ci->csi.unsafe_mas_per_col; 35 + 36 + for (mas = 0; mas < UWB_MAS_PER_ZONE; mas++ ) { 37 + if (bm[col * UWB_MAS_PER_ZONE + mas] == 0) { 38 + 39 + if (safe_mas > 0) { 40 + safe_mas--; 41 + c = UWB_RSV_MAS_SAFE; 42 + } else if (unsafe_mas > 0) { 43 + unsafe_mas--; 44 + c = UWB_RSV_MAS_UNSAFE; 45 + } else { 46 + break; 47 + } 48 + bm[col * UWB_MAS_PER_ZONE + mas] = c; 49 + } 50 + } 51 + } 52 + } 53 + 54 + static void uwb_rsv_fill_row_alloc(struct uwb_rsv_alloc_info *ai) 55 + { 56 + int mas, col, rows; 57 + unsigned char *bm = ai->bm; 58 + struct uwb_rsv_row_info *ri = &ai->ri; 59 + unsigned char c; 60 + 61 + rows = 1; 62 + c = UWB_RSV_MAS_SAFE; 63 + for (mas = UWB_MAS_PER_ZONE - 1; mas >= 0; mas--) { 64 + if (ri->avail[mas] == 1) { 65 + 66 + if (rows > ri->used_rows) { 67 + break; 68 + } else if (rows > 7) { 69 + c = UWB_RSV_MAS_UNSAFE; 70 + } 71 + 72 + for (col = 0; col < UWB_NUM_ZONES; col++) { 73 + if (bm[col * UWB_NUM_ZONES + mas] != UWB_RSV_MAS_NOT_AVAIL) { 74 + bm[col * UWB_NUM_ZONES + mas] = c; 75 + if(c == UWB_RSV_MAS_SAFE) 76 + ai->safe_allocated_mases++; 77 + else 78 + ai->unsafe_allocated_mases++; 79 + } 80 + } 81 + rows++; 82 + } 83 + } 84 + ai->total_allocated_mases = ai->safe_allocated_mases + ai->unsafe_allocated_mases; 85 + } 86 + 87 + /* 88 + * Find the best column set for a given availability, interval, num safe mas and 89 + * num unsafe mas. 90 + * 91 + * The different sets are tried in order as shown below, depending on the interval. 92 + * 93 + * interval = 16 94 + * deep = 0 95 + * set 1 -> { 8 } 96 + * deep = 1 97 + * set 1 -> { 4 } 98 + * set 2 -> { 12 } 99 + * deep = 2 100 + * set 1 -> { 2 } 101 + * set 2 -> { 6 } 102 + * set 3 -> { 10 } 103 + * set 4 -> { 14 } 104 + * deep = 3 105 + * set 1 -> { 1 } 106 + * set 2 -> { 3 } 107 + * set 3 -> { 5 } 108 + * set 4 -> { 7 } 109 + * set 5 -> { 9 } 110 + * set 6 -> { 11 } 111 + * set 7 -> { 13 } 112 + * set 8 -> { 15 } 113 + * 114 + * interval = 8 115 + * deep = 0 116 + * set 1 -> { 4 12 } 117 + * deep = 1 118 + * set 1 -> { 2 10 } 119 + * set 2 -> { 6 14 } 120 + * deep = 2 121 + * set 1 -> { 1 9 } 122 + * set 2 -> { 3 11 } 123 + * set 3 -> { 5 13 } 124 + * set 4 -> { 7 15 } 125 + * 126 + * interval = 4 127 + * deep = 0 128 + * set 1 -> { 2 6 10 14 } 129 + * deep = 1 130 + * set 1 -> { 1 5 9 13 } 131 + * set 2 -> { 3 7 11 15 } 132 + * 133 + * interval = 2 134 + * deep = 0 135 + * set 1 -> { 1 3 5 7 9 11 13 15 } 136 + */ 137 + static int uwb_rsv_find_best_column_set(struct uwb_rsv_alloc_info *ai, int interval, 138 + int num_safe_mas, int num_unsafe_mas) 139 + { 140 + struct uwb_rsv_col_info *ci = ai->ci; 141 + struct uwb_rsv_col_set_info *csi = &ci->csi; 142 + struct uwb_rsv_col_set_info tmp_csi; 143 + int deep, set, col, start_col_deep, col_start_set; 144 + int start_col, max_mas_in_set, lowest_max_mas_in_deep; 145 + int n_mas; 146 + int found = UWB_RSV_ALLOC_NOT_FOUND; 147 + 148 + tmp_csi.start_col = 0; 149 + start_col_deep = interval; 150 + n_mas = num_unsafe_mas + num_safe_mas; 151 + 152 + for (deep = 0; ((interval >> deep) & 0x1) == 0; deep++) { 153 + start_col_deep /= 2; 154 + col_start_set = 0; 155 + lowest_max_mas_in_deep = UWB_MAS_PER_ZONE; 156 + 157 + for (set = 1; set <= (1 << deep); set++) { 158 + max_mas_in_set = 0; 159 + start_col = start_col_deep + col_start_set; 160 + for (col = start_col; col < UWB_NUM_ZONES; col += interval) { 161 + 162 + if (ci[col].max_avail_safe >= num_safe_mas && 163 + ci[col].max_avail_unsafe >= n_mas) { 164 + if (ci[col].highest_mas[n_mas] > max_mas_in_set) 165 + max_mas_in_set = ci[col].highest_mas[n_mas]; 166 + } else { 167 + max_mas_in_set = 0; 168 + break; 169 + } 170 + } 171 + if ((lowest_max_mas_in_deep > max_mas_in_set) && max_mas_in_set) { 172 + lowest_max_mas_in_deep = max_mas_in_set; 173 + 174 + tmp_csi.start_col = start_col; 175 + } 176 + col_start_set += (interval >> deep); 177 + } 178 + 179 + if (lowest_max_mas_in_deep < 8) { 180 + csi->start_col = tmp_csi.start_col; 181 + found = UWB_RSV_ALLOC_FOUND; 182 + break; 183 + } else if ((lowest_max_mas_in_deep > 8) && 184 + (lowest_max_mas_in_deep != UWB_MAS_PER_ZONE) && 185 + (found == UWB_RSV_ALLOC_NOT_FOUND)) { 186 + csi->start_col = tmp_csi.start_col; 187 + found = UWB_RSV_ALLOC_FOUND; 188 + } 189 + } 190 + 191 + if (found == UWB_RSV_ALLOC_FOUND) { 192 + csi->interval = interval; 193 + csi->safe_mas_per_col = num_safe_mas; 194 + csi->unsafe_mas_per_col = num_unsafe_mas; 195 + 196 + ai->safe_allocated_mases = (UWB_NUM_ZONES / interval) * num_safe_mas; 197 + ai->unsafe_allocated_mases = (UWB_NUM_ZONES / interval) * num_unsafe_mas; 198 + ai->total_allocated_mases = ai->safe_allocated_mases + ai->unsafe_allocated_mases; 199 + ai->interval = interval; 200 + } 201 + return found; 202 + } 203 + 204 + static void get_row_descriptors(struct uwb_rsv_alloc_info *ai) 205 + { 206 + unsigned char *bm = ai->bm; 207 + struct uwb_rsv_row_info *ri = &ai->ri; 208 + int col, mas; 209 + 210 + ri->free_rows = 16; 211 + for (mas = 0; mas < UWB_MAS_PER_ZONE; mas ++) { 212 + ri->avail[mas] = 1; 213 + for (col = 1; col < UWB_NUM_ZONES; col++) { 214 + if (bm[col * UWB_NUM_ZONES + mas] == UWB_RSV_MAS_NOT_AVAIL) { 215 + ri->free_rows--; 216 + ri->avail[mas]=0; 217 + break; 218 + } 219 + } 220 + } 221 + } 222 + 223 + static void uwb_rsv_fill_column_info(unsigned char *bm, int column, struct uwb_rsv_col_info *rci) 224 + { 225 + int mas; 226 + int block_count = 0, start_block = 0; 227 + int previous_avail = 0; 228 + int available = 0; 229 + int safe_mas_in_row[UWB_MAS_PER_ZONE] = { 230 + 8, 7, 6, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 2, 1, 231 + }; 232 + 233 + rci->max_avail_safe = 0; 234 + 235 + for (mas = 0; mas < UWB_MAS_PER_ZONE; mas ++) { 236 + if (!bm[column * UWB_NUM_ZONES + mas]) { 237 + available++; 238 + rci->max_avail_unsafe = available; 239 + 240 + rci->highest_mas[available] = mas; 241 + 242 + if (previous_avail) { 243 + block_count++; 244 + if ((block_count > safe_mas_in_row[start_block]) && 245 + (!rci->max_avail_safe)) 246 + rci->max_avail_safe = available - 1; 247 + } else { 248 + previous_avail = 1; 249 + start_block = mas; 250 + block_count = 1; 251 + } 252 + } else { 253 + previous_avail = 0; 254 + } 255 + } 256 + if (!rci->max_avail_safe) 257 + rci->max_avail_safe = rci->max_avail_unsafe; 258 + } 259 + 260 + static void get_column_descriptors(struct uwb_rsv_alloc_info *ai) 261 + { 262 + unsigned char *bm = ai->bm; 263 + struct uwb_rsv_col_info *ci = ai->ci; 264 + int col; 265 + 266 + for (col = 1; col < UWB_NUM_ZONES; col++) { 267 + uwb_rsv_fill_column_info(bm, col, &ci[col]); 268 + } 269 + } 270 + 271 + static int uwb_rsv_find_best_row_alloc(struct uwb_rsv_alloc_info *ai) 272 + { 273 + int n_rows; 274 + int max_rows = ai->max_mas / UWB_USABLE_MAS_PER_ROW; 275 + int min_rows = ai->min_mas / UWB_USABLE_MAS_PER_ROW; 276 + if (ai->min_mas % UWB_USABLE_MAS_PER_ROW) 277 + min_rows++; 278 + for (n_rows = max_rows; n_rows >= min_rows; n_rows--) { 279 + if (n_rows <= ai->ri.free_rows) { 280 + ai->ri.used_rows = n_rows; 281 + ai->interval = 1; /* row reservation */ 282 + uwb_rsv_fill_row_alloc(ai); 283 + return UWB_RSV_ALLOC_FOUND; 284 + } 285 + } 286 + return UWB_RSV_ALLOC_NOT_FOUND; 287 + } 288 + 289 + static int uwb_rsv_find_best_col_alloc(struct uwb_rsv_alloc_info *ai, int interval) 290 + { 291 + int n_safe, n_unsafe, n_mas; 292 + int n_column = UWB_NUM_ZONES / interval; 293 + int max_per_zone = ai->max_mas / n_column; 294 + int min_per_zone = ai->min_mas / n_column; 295 + 296 + if (ai->min_mas % n_column) 297 + min_per_zone++; 298 + 299 + if (min_per_zone > UWB_MAS_PER_ZONE) { 300 + return UWB_RSV_ALLOC_NOT_FOUND; 301 + } 302 + 303 + if (max_per_zone > UWB_MAS_PER_ZONE) { 304 + max_per_zone = UWB_MAS_PER_ZONE; 305 + } 306 + 307 + for (n_mas = max_per_zone; n_mas >= min_per_zone; n_mas--) { 308 + if (uwb_rsv_find_best_column_set(ai, interval, 0, n_mas) == UWB_RSV_ALLOC_NOT_FOUND) 309 + continue; 310 + for (n_safe = n_mas; n_safe >= 0; n_safe--) { 311 + n_unsafe = n_mas - n_safe; 312 + if (uwb_rsv_find_best_column_set(ai, interval, n_safe, n_unsafe) == UWB_RSV_ALLOC_FOUND) { 313 + uwb_rsv_fill_column_alloc(ai); 314 + return UWB_RSV_ALLOC_FOUND; 315 + } 316 + } 317 + } 318 + return UWB_RSV_ALLOC_NOT_FOUND; 319 + } 320 + 321 + int uwb_rsv_find_best_allocation(struct uwb_rsv *rsv, struct uwb_mas_bm *available, 322 + struct uwb_mas_bm *result) 323 + { 324 + struct uwb_rsv_alloc_info *ai; 325 + int interval; 326 + int bit_index; 327 + 328 + ai = kzalloc(sizeof(struct uwb_rsv_alloc_info), GFP_KERNEL); 329 + 330 + ai->min_mas = rsv->min_mas; 331 + ai->max_mas = rsv->max_mas; 332 + ai->max_interval = rsv->max_interval; 333 + 334 + 335 + /* fill the not available vector from the available bm */ 336 + for (bit_index = 0; bit_index < UWB_NUM_MAS; bit_index++) { 337 + if (!test_bit(bit_index, available->bm)) 338 + ai->bm[bit_index] = UWB_RSV_MAS_NOT_AVAIL; 339 + } 340 + 341 + if (ai->max_interval == 1) { 342 + get_row_descriptors(ai); 343 + if (uwb_rsv_find_best_row_alloc(ai) == UWB_RSV_ALLOC_FOUND) 344 + goto alloc_found; 345 + else 346 + goto alloc_not_found; 347 + } 348 + 349 + get_column_descriptors(ai); 350 + 351 + for (interval = 16; interval >= 2; interval>>=1) { 352 + if (interval > ai->max_interval) 353 + continue; 354 + if (uwb_rsv_find_best_col_alloc(ai, interval) == UWB_RSV_ALLOC_FOUND) 355 + goto alloc_found; 356 + } 357 + 358 + /* try row reservation if no column is found */ 359 + get_row_descriptors(ai); 360 + if (uwb_rsv_find_best_row_alloc(ai) == UWB_RSV_ALLOC_FOUND) 361 + goto alloc_found; 362 + else 363 + goto alloc_not_found; 364 + 365 + alloc_found: 366 + bitmap_zero(result->bm, UWB_NUM_MAS); 367 + bitmap_zero(result->unsafe_bm, UWB_NUM_MAS); 368 + /* fill the safe and unsafe bitmaps */ 369 + for (bit_index = 0; bit_index < UWB_NUM_MAS; bit_index++) { 370 + if (ai->bm[bit_index] == UWB_RSV_MAS_SAFE) 371 + set_bit(bit_index, result->bm); 372 + else if (ai->bm[bit_index] == UWB_RSV_MAS_UNSAFE) 373 + set_bit(bit_index, result->unsafe_bm); 374 + } 375 + bitmap_or(result->bm, result->bm, result->unsafe_bm, UWB_NUM_MAS); 376 + 377 + result->safe = ai->safe_allocated_mases; 378 + result->unsafe = ai->unsafe_allocated_mases; 379 + 380 + kfree(ai); 381 + return UWB_RSV_ALLOC_FOUND; 382 + 383 + alloc_not_found: 384 + kfree(ai); 385 + return UWB_RSV_ALLOC_NOT_FOUND; 386 + }
+50 -84
drivers/uwb/beacon.c
··· 22 * 23 * FIXME: docs 24 */ 25 - 26 #include <linux/kernel.h> 27 #include <linux/init.h> 28 #include <linux/module.h> 29 #include <linux/device.h> 30 #include <linux/err.h> 31 #include <linux/kdev_t.h> 32 #include "uwb-internal.h" 33 34 - #define D_LOCAL 0 35 - #include <linux/uwb/debug.h> 36 - 37 - /** Start Beaconing command structure */ 38 struct uwb_rc_cmd_start_beacon { 39 struct uwb_rccb rccb; 40 __le16 wBPSTOffset; ··· 116 int result; 117 struct device *dev = &rc->uwb_dev.dev; 118 119 - mutex_lock(&rc->uwb_dev.mutex); 120 if (channel < 0) 121 channel = -1; 122 if (channel == -1) ··· 124 /* channel >= 0...dah */ 125 result = uwb_rc_start_beacon(rc, bpst_offset, channel); 126 if (result < 0) 127 - goto out_up; 128 if (le16_to_cpu(rc->ies->wIELength) > 0) { 129 result = uwb_rc_set_ie(rc, rc->ies); 130 if (result < 0) { ··· 133 result = uwb_rc_stop_beacon(rc); 134 channel = -1; 135 bpst_offset = 0; 136 - } else 137 - result = 0; 138 } 139 } 140 141 - if (result < 0) 142 - goto out_up; 143 - rc->beaconing = channel; 144 - 145 - uwb_notify(rc, NULL, uwb_bg_joined(rc) ? UWB_NOTIF_BG_JOIN : UWB_NOTIF_BG_LEAVE); 146 - 147 - out_up: 148 - mutex_unlock(&rc->uwb_dev.mutex); 149 return result; 150 } 151 ··· 157 * FIXME: use something faster for search than a list 158 */ 159 160 - struct uwb_beca uwb_beca = { 161 - .list = LIST_HEAD_INIT(uwb_beca.list), 162 - .mutex = __MUTEX_INITIALIZER(uwb_beca.mutex) 163 - }; 164 - 165 - 166 void uwb_bce_kfree(struct kref *_bce) 167 { 168 struct uwb_beca_e *bce = container_of(_bce, struct uwb_beca_e, refcnt); ··· 168 169 /* Find a beacon by dev addr in the cache */ 170 static 171 - struct uwb_beca_e *__uwb_beca_find_bydev(const struct uwb_dev_addr *dev_addr) 172 { 173 struct uwb_beca_e *bce, *next; 174 - list_for_each_entry_safe(bce, next, &uwb_beca.list, node) { 175 - d_printf(6, NULL, "looking for addr %02x:%02x in %02x:%02x\n", 176 - dev_addr->data[0], dev_addr->data[1], 177 - bce->dev_addr.data[0], bce->dev_addr.data[1]); 178 if (!memcmp(&bce->dev_addr, dev_addr, sizeof(bce->dev_addr))) 179 goto out; 180 } ··· 183 184 /* Find a beacon by dev addr in the cache */ 185 static 186 - struct uwb_beca_e *__uwb_beca_find_bymac(const struct uwb_mac_addr *mac_addr) 187 { 188 struct uwb_beca_e *bce, *next; 189 - list_for_each_entry_safe(bce, next, &uwb_beca.list, node) { 190 if (!memcmp(bce->mac_addr, mac_addr->data, 191 sizeof(struct uwb_mac_addr))) 192 goto out; ··· 211 struct uwb_dev *found = NULL; 212 struct uwb_beca_e *bce; 213 214 - mutex_lock(&uwb_beca.mutex); 215 - bce = __uwb_beca_find_bydev(devaddr); 216 if (bce) 217 found = uwb_dev_try_get(rc, bce->uwb_dev); 218 - mutex_unlock(&uwb_beca.mutex); 219 220 return found; 221 } ··· 231 struct uwb_dev *found = NULL; 232 struct uwb_beca_e *bce; 233 234 - mutex_lock(&uwb_beca.mutex); 235 - bce = __uwb_beca_find_bymac(macaddr); 236 if (bce) 237 found = uwb_dev_try_get(rc, bce->uwb_dev); 238 - mutex_unlock(&uwb_beca.mutex); 239 240 return found; 241 } ··· 256 * @bf: Beacon frame (part of b, really) 257 * @ts_jiffies: Timestamp (in jiffies) when the beacon was received 258 */ 259 - struct uwb_beca_e *__uwb_beca_add(struct uwb_rc_evt_beacon *be, 260 struct uwb_beacon_frame *bf, 261 unsigned long ts_jiffies) 262 { ··· 270 uwb_beca_e_init(bce); 271 bce->ts_jiffies = ts_jiffies; 272 bce->uwb_dev = NULL; 273 - list_add(&bce->node, &uwb_beca.list); 274 return bce; 275 } 276 ··· 279 * 280 * Remove associated devicest too. 281 */ 282 - void uwb_beca_purge(void) 283 { 284 struct uwb_beca_e *bce, *next; 285 unsigned long expires; 286 287 - mutex_lock(&uwb_beca.mutex); 288 - list_for_each_entry_safe(bce, next, &uwb_beca.list, node) { 289 expires = bce->ts_jiffies + msecs_to_jiffies(beacon_timeout_ms); 290 if (time_after(jiffies, expires)) { 291 uwbd_dev_offair(bce); 292 - list_del(&bce->node); 293 - uwb_bce_put(bce); 294 } 295 } 296 - mutex_unlock(&uwb_beca.mutex); 297 } 298 299 /* Clean up the whole beacon cache. Called on shutdown */ 300 - void uwb_beca_release(void) 301 { 302 struct uwb_beca_e *bce, *next; 303 - mutex_lock(&uwb_beca.mutex); 304 - list_for_each_entry_safe(bce, next, &uwb_beca.list, node) { 305 list_del(&bce->node); 306 uwb_bce_put(bce); 307 } 308 - mutex_unlock(&uwb_beca.mutex); 309 } 310 311 static void uwb_beacon_print(struct uwb_rc *rc, struct uwb_rc_evt_beacon *be, ··· 332 ssize_t result = 0; 333 struct uwb_rc_evt_beacon *be; 334 struct uwb_beacon_frame *bf; 335 - struct uwb_buf_ctx ctx = { 336 - .buf = buf, 337 - .bytes = 0, 338 - .size = size 339 - }; 340 341 mutex_lock(&bce->mutex); 342 be = bce->be; 343 - if (be == NULL) 344 - goto out; 345 - bf = (void *) be->BeaconInfo; 346 - uwb_ie_for_each(uwb_dev, uwb_ie_dump_hex, &ctx, 347 - bf->IEData, be->wBeaconInfoLength - sizeof(*bf)); 348 - result = ctx.bytes; 349 - out: 350 mutex_unlock(&bce->mutex); 351 return result; 352 } 353 ··· 420 if (uwb_mac_addr_bcast(&bf->Device_Identifier)) 421 return 0; 422 423 - mutex_lock(&uwb_beca.mutex); 424 - bce = __uwb_beca_find_bymac(&bf->Device_Identifier); 425 if (bce == NULL) { 426 /* Not in there, a new device is pinging */ 427 uwb_beacon_print(evt->rc, be, bf); 428 - bce = __uwb_beca_add(be, bf, evt->ts_jiffies); 429 if (bce == NULL) { 430 - mutex_unlock(&uwb_beca.mutex); 431 return -ENOMEM; 432 } 433 } 434 - mutex_unlock(&uwb_beca.mutex); 435 436 mutex_lock(&bce->mutex); 437 /* purge old beacon data */ ··· 571 return result; 572 } 573 574 - /** 575 - * uwb_bg_joined - is the RC in a beacon group? 576 - * @rc: the radio controller 577 - * 578 - * Returns true if the radio controller is in a beacon group (even if 579 - * it's the sole member). 580 - */ 581 - int uwb_bg_joined(struct uwb_rc *rc) 582 - { 583 - return rc->beaconing != -1; 584 - } 585 - EXPORT_SYMBOL_GPL(uwb_bg_joined); 586 - 587 /* 588 * Print beaconing state. 589 */ ··· 589 590 /* 591 * Start beaconing on the specified channel, or stop beaconing. 592 - * 593 - * The BPST offset of when to start searching for a beacon group to 594 - * join may be specified. 595 */ 596 static ssize_t uwb_rc_beacon_store(struct device *dev, 597 struct device_attribute *attr, ··· 597 struct uwb_dev *uwb_dev = to_uwb_dev(dev); 598 struct uwb_rc *rc = uwb_dev->rc; 599 int channel; 600 - unsigned bpst_offset = 0; 601 ssize_t result = -EINVAL; 602 603 - result = sscanf(buf, "%d %u\n", &channel, &bpst_offset); 604 if (result >= 1) 605 - result = uwb_rc_beacon(rc, channel, bpst_offset); 606 607 return result < 0 ? result : size; 608 }
··· 22 * 23 * FIXME: docs 24 */ 25 #include <linux/kernel.h> 26 #include <linux/init.h> 27 #include <linux/module.h> 28 #include <linux/device.h> 29 #include <linux/err.h> 30 #include <linux/kdev_t.h> 31 + 32 #include "uwb-internal.h" 33 34 + /* Start Beaconing command structure */ 35 struct uwb_rc_cmd_start_beacon { 36 struct uwb_rccb rccb; 37 __le16 wBPSTOffset; ··· 119 int result; 120 struct device *dev = &rc->uwb_dev.dev; 121 122 if (channel < 0) 123 channel = -1; 124 if (channel == -1) ··· 128 /* channel >= 0...dah */ 129 result = uwb_rc_start_beacon(rc, bpst_offset, channel); 130 if (result < 0) 131 + return result; 132 if (le16_to_cpu(rc->ies->wIELength) > 0) { 133 result = uwb_rc_set_ie(rc, rc->ies); 134 if (result < 0) { ··· 137 result = uwb_rc_stop_beacon(rc); 138 channel = -1; 139 bpst_offset = 0; 140 + } 141 } 142 } 143 144 + if (result >= 0) 145 + rc->beaconing = channel; 146 return result; 147 } 148 ··· 168 * FIXME: use something faster for search than a list 169 */ 170 171 void uwb_bce_kfree(struct kref *_bce) 172 { 173 struct uwb_beca_e *bce = container_of(_bce, struct uwb_beca_e, refcnt); ··· 185 186 /* Find a beacon by dev addr in the cache */ 187 static 188 + struct uwb_beca_e *__uwb_beca_find_bydev(struct uwb_rc *rc, 189 + const struct uwb_dev_addr *dev_addr) 190 { 191 struct uwb_beca_e *bce, *next; 192 + list_for_each_entry_safe(bce, next, &rc->uwb_beca.list, node) { 193 if (!memcmp(&bce->dev_addr, dev_addr, sizeof(bce->dev_addr))) 194 goto out; 195 } ··· 202 203 /* Find a beacon by dev addr in the cache */ 204 static 205 + struct uwb_beca_e *__uwb_beca_find_bymac(struct uwb_rc *rc, 206 + const struct uwb_mac_addr *mac_addr) 207 { 208 struct uwb_beca_e *bce, *next; 209 + list_for_each_entry_safe(bce, next, &rc->uwb_beca.list, node) { 210 if (!memcmp(bce->mac_addr, mac_addr->data, 211 sizeof(struct uwb_mac_addr))) 212 goto out; ··· 229 struct uwb_dev *found = NULL; 230 struct uwb_beca_e *bce; 231 232 + mutex_lock(&rc->uwb_beca.mutex); 233 + bce = __uwb_beca_find_bydev(rc, devaddr); 234 if (bce) 235 found = uwb_dev_try_get(rc, bce->uwb_dev); 236 + mutex_unlock(&rc->uwb_beca.mutex); 237 238 return found; 239 } ··· 249 struct uwb_dev *found = NULL; 250 struct uwb_beca_e *bce; 251 252 + mutex_lock(&rc->uwb_beca.mutex); 253 + bce = __uwb_beca_find_bymac(rc, macaddr); 254 if (bce) 255 found = uwb_dev_try_get(rc, bce->uwb_dev); 256 + mutex_unlock(&rc->uwb_beca.mutex); 257 258 return found; 259 } ··· 274 * @bf: Beacon frame (part of b, really) 275 * @ts_jiffies: Timestamp (in jiffies) when the beacon was received 276 */ 277 + static 278 + struct uwb_beca_e *__uwb_beca_add(struct uwb_rc *rc, 279 + struct uwb_rc_evt_beacon *be, 280 struct uwb_beacon_frame *bf, 281 unsigned long ts_jiffies) 282 { ··· 286 uwb_beca_e_init(bce); 287 bce->ts_jiffies = ts_jiffies; 288 bce->uwb_dev = NULL; 289 + list_add(&bce->node, &rc->uwb_beca.list); 290 return bce; 291 } 292 ··· 295 * 296 * Remove associated devicest too. 297 */ 298 + void uwb_beca_purge(struct uwb_rc *rc) 299 { 300 struct uwb_beca_e *bce, *next; 301 unsigned long expires; 302 303 + mutex_lock(&rc->uwb_beca.mutex); 304 + list_for_each_entry_safe(bce, next, &rc->uwb_beca.list, node) { 305 expires = bce->ts_jiffies + msecs_to_jiffies(beacon_timeout_ms); 306 if (time_after(jiffies, expires)) { 307 uwbd_dev_offair(bce); 308 } 309 } 310 + mutex_unlock(&rc->uwb_beca.mutex); 311 } 312 313 /* Clean up the whole beacon cache. Called on shutdown */ 314 + void uwb_beca_release(struct uwb_rc *rc) 315 { 316 struct uwb_beca_e *bce, *next; 317 + 318 + mutex_lock(&rc->uwb_beca.mutex); 319 + list_for_each_entry_safe(bce, next, &rc->uwb_beca.list, node) { 320 list_del(&bce->node); 321 uwb_bce_put(bce); 322 } 323 + mutex_unlock(&rc->uwb_beca.mutex); 324 } 325 326 static void uwb_beacon_print(struct uwb_rc *rc, struct uwb_rc_evt_beacon *be, ··· 349 ssize_t result = 0; 350 struct uwb_rc_evt_beacon *be; 351 struct uwb_beacon_frame *bf; 352 + int ies_len; 353 + struct uwb_ie_hdr *ies; 354 355 mutex_lock(&bce->mutex); 356 + 357 be = bce->be; 358 + if (be) { 359 + bf = (struct uwb_beacon_frame *)bce->be->BeaconInfo; 360 + ies_len = be->wBeaconInfoLength - sizeof(struct uwb_beacon_frame); 361 + ies = (struct uwb_ie_hdr *)bf->IEData; 362 + 363 + result = uwb_ie_dump_hex(ies, ies_len, buf, size); 364 + } 365 + 366 mutex_unlock(&bce->mutex); 367 + 368 return result; 369 } 370 ··· 437 if (uwb_mac_addr_bcast(&bf->Device_Identifier)) 438 return 0; 439 440 + mutex_lock(&rc->uwb_beca.mutex); 441 + bce = __uwb_beca_find_bymac(rc, &bf->Device_Identifier); 442 if (bce == NULL) { 443 /* Not in there, a new device is pinging */ 444 uwb_beacon_print(evt->rc, be, bf); 445 + bce = __uwb_beca_add(rc, be, bf, evt->ts_jiffies); 446 if (bce == NULL) { 447 + mutex_unlock(&rc->uwb_beca.mutex); 448 return -ENOMEM; 449 } 450 } 451 + mutex_unlock(&rc->uwb_beca.mutex); 452 453 mutex_lock(&bce->mutex); 454 /* purge old beacon data */ ··· 588 return result; 589 } 590 591 /* 592 * Print beaconing state. 593 */ ··· 619 620 /* 621 * Start beaconing on the specified channel, or stop beaconing. 622 */ 623 static ssize_t uwb_rc_beacon_store(struct device *dev, 624 struct device_attribute *attr, ··· 630 struct uwb_dev *uwb_dev = to_uwb_dev(dev); 631 struct uwb_rc *rc = uwb_dev->rc; 632 int channel; 633 ssize_t result = -EINVAL; 634 635 + result = sscanf(buf, "%d", &channel); 636 if (result >= 1) 637 + result = uwb_radio_force_channel(rc, channel); 638 639 return result < 0 ? result : size; 640 }
+1 -3
drivers/uwb/driver.c
··· 53 #include <linux/err.h> 54 #include <linux/kdev_t.h> 55 #include <linux/random.h> 56 - #include <linux/uwb/debug.h> 57 #include "uwb-internal.h" 58 59 ··· 118 result = class_register(&uwb_rc_class); 119 if (result < 0) 120 goto error_uwb_rc_class_register; 121 - uwbd_start(); 122 uwb_dbg_init(); 123 return 0; 124 ··· 131 static void __exit uwb_subsys_exit(void) 132 { 133 uwb_dbg_exit(); 134 - uwbd_stop(); 135 class_unregister(&uwb_rc_class); 136 uwb_est_destroy(); 137 return;
··· 53 #include <linux/err.h> 54 #include <linux/kdev_t.h> 55 #include <linux/random.h> 56 + 57 #include "uwb-internal.h" 58 59 ··· 118 result = class_register(&uwb_rc_class); 119 if (result < 0) 120 goto error_uwb_rc_class_register; 121 uwb_dbg_init(); 122 return 0; 123 ··· 132 static void __exit uwb_subsys_exit(void) 133 { 134 uwb_dbg_exit(); 135 class_unregister(&uwb_rc_class); 136 uwb_est_destroy(); 137 return;
+3 -1
drivers/uwb/drp-avail.c
··· 58 * 59 * avail = global & local & pending 60 */ 61 - static void uwb_drp_available(struct uwb_rc *rc, struct uwb_mas_bm *avail) 62 { 63 bitmap_and(avail->bm, rc->drp_avail.global, rc->drp_avail.local, UWB_NUM_MAS); 64 bitmap_and(avail->bm, avail->bm, rc->drp_avail.pending, UWB_NUM_MAS); ··· 105 bitmap_or(rc->drp_avail.local, rc->drp_avail.local, mas->bm, UWB_NUM_MAS); 106 bitmap_or(rc->drp_avail.pending, rc->drp_avail.pending, mas->bm, UWB_NUM_MAS); 107 rc->drp_avail.ie_valid = false; 108 } 109 110 /** ··· 281 mutex_lock(&rc->rsvs_mutex); 282 bitmap_copy(rc->drp_avail.global, bmp, UWB_NUM_MAS); 283 rc->drp_avail.ie_valid = false; 284 mutex_unlock(&rc->rsvs_mutex); 285 286 uwb_rsv_sched_update(rc);
··· 58 * 59 * avail = global & local & pending 60 */ 61 + void uwb_drp_available(struct uwb_rc *rc, struct uwb_mas_bm *avail) 62 { 63 bitmap_and(avail->bm, rc->drp_avail.global, rc->drp_avail.local, UWB_NUM_MAS); 64 bitmap_and(avail->bm, avail->bm, rc->drp_avail.pending, UWB_NUM_MAS); ··· 105 bitmap_or(rc->drp_avail.local, rc->drp_avail.local, mas->bm, UWB_NUM_MAS); 106 bitmap_or(rc->drp_avail.pending, rc->drp_avail.pending, mas->bm, UWB_NUM_MAS); 107 rc->drp_avail.ie_valid = false; 108 + uwb_rsv_handle_drp_avail_change(rc); 109 } 110 111 /** ··· 280 mutex_lock(&rc->rsvs_mutex); 281 bitmap_copy(rc->drp_avail.global, bmp, UWB_NUM_MAS); 282 rc->drp_avail.ie_valid = false; 283 + uwb_rsv_handle_drp_avail_change(rc); 284 mutex_unlock(&rc->rsvs_mutex); 285 286 uwb_rsv_sched_update(rc);
+123 -38
drivers/uwb/drp-ie.c
··· 16 * You should have received a copy of the GNU General Public License 17 * along with this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 - #include <linux/version.h> 20 #include <linux/kernel.h> 21 #include <linux/random.h> 22 #include <linux/uwb.h> 23 24 #include "uwb-internal.h" 25 26 /* 27 * Allocate a DRP IE. ··· 123 static struct uwb_ie_drp *uwb_drp_ie_alloc(void) 124 { 125 struct uwb_ie_drp *drp_ie; 126 - unsigned tiebreaker; 127 128 drp_ie = kzalloc(sizeof(struct uwb_ie_drp) + 129 UWB_NUM_ZONES * sizeof(struct uwb_drp_alloc), 130 GFP_KERNEL); 131 if (drp_ie) { 132 drp_ie->hdr.element_id = UWB_IE_DRP; 133 - 134 - get_random_bytes(&tiebreaker, sizeof(unsigned)); 135 - uwb_ie_drp_set_tiebreaker(drp_ie, tiebreaker & 1); 136 } 137 return drp_ie; 138 } ··· 189 */ 190 int uwb_drp_ie_update(struct uwb_rsv *rsv) 191 { 192 - struct device *dev = &rsv->rc->uwb_dev.dev; 193 struct uwb_ie_drp *drp_ie; 194 - int reason_code, status; 195 196 - switch (rsv->state) { 197 - case UWB_RSV_STATE_NONE: 198 kfree(rsv->drp_ie); 199 rsv->drp_ie = NULL; 200 return 0; 201 - case UWB_RSV_STATE_O_INITIATED: 202 - reason_code = UWB_DRP_REASON_ACCEPTED; 203 - status = 0; 204 - break; 205 - case UWB_RSV_STATE_O_PENDING: 206 - reason_code = UWB_DRP_REASON_ACCEPTED; 207 - status = 0; 208 - break; 209 - case UWB_RSV_STATE_O_MODIFIED: 210 - reason_code = UWB_DRP_REASON_MODIFIED; 211 - status = 1; 212 - break; 213 - case UWB_RSV_STATE_O_ESTABLISHED: 214 - reason_code = UWB_DRP_REASON_ACCEPTED; 215 - status = 1; 216 - break; 217 - case UWB_RSV_STATE_T_ACCEPTED: 218 - reason_code = UWB_DRP_REASON_ACCEPTED; 219 - status = 1; 220 - break; 221 - case UWB_RSV_STATE_T_DENIED: 222 - reason_code = UWB_DRP_REASON_DENIED; 223 - status = 0; 224 - break; 225 - default: 226 - dev_dbg(dev, "rsv with unhandled state (%d)\n", rsv->state); 227 - return -EINVAL; 228 } 229 230 if (rsv->drp_ie == NULL) { 231 rsv->drp_ie = uwb_drp_ie_alloc(); ··· 208 } 209 drp_ie = rsv->drp_ie; 210 211 uwb_ie_drp_set_owner(drp_ie, uwb_rsv_is_owner(rsv)); 212 - uwb_ie_drp_set_status(drp_ie, status); 213 - uwb_ie_drp_set_reason_code(drp_ie, reason_code); 214 uwb_ie_drp_set_stream_index(drp_ie, rsv->stream); 215 uwb_ie_drp_set_type(drp_ie, rsv->type); 216 ··· 229 drp_ie->dev_addr = rsv->owner->dev_addr; 230 231 uwb_drp_ie_from_bm(drp_ie, &rsv->mas); 232 233 rsv->ie_valid = true; 234 return 0; ··· 301 u8 zone; 302 u16 zone_mask; 303 304 for (cnt = 0; cnt < numallocs; cnt++) { 305 alloc = &drp_ie->allocs[cnt]; 306 zone_bm = le16_to_cpu(alloc->zone_bm); ··· 314 } 315 } 316 }
··· 16 * You should have received a copy of the GNU General Public License 17 * along with this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 #include <linux/kernel.h> 20 #include <linux/random.h> 21 #include <linux/uwb.h> 22 23 #include "uwb-internal.h" 24 + 25 + 26 + /* 27 + * Return the reason code for a reservations's DRP IE. 28 + */ 29 + int uwb_rsv_reason_code(struct uwb_rsv *rsv) 30 + { 31 + static const int reason_codes[] = { 32 + [UWB_RSV_STATE_O_INITIATED] = UWB_DRP_REASON_ACCEPTED, 33 + [UWB_RSV_STATE_O_PENDING] = UWB_DRP_REASON_ACCEPTED, 34 + [UWB_RSV_STATE_O_MODIFIED] = UWB_DRP_REASON_MODIFIED, 35 + [UWB_RSV_STATE_O_ESTABLISHED] = UWB_DRP_REASON_ACCEPTED, 36 + [UWB_RSV_STATE_O_TO_BE_MOVED] = UWB_DRP_REASON_ACCEPTED, 37 + [UWB_RSV_STATE_O_MOVE_COMBINING] = UWB_DRP_REASON_MODIFIED, 38 + [UWB_RSV_STATE_O_MOVE_REDUCING] = UWB_DRP_REASON_MODIFIED, 39 + [UWB_RSV_STATE_O_MOVE_EXPANDING] = UWB_DRP_REASON_ACCEPTED, 40 + [UWB_RSV_STATE_T_ACCEPTED] = UWB_DRP_REASON_ACCEPTED, 41 + [UWB_RSV_STATE_T_CONFLICT] = UWB_DRP_REASON_CONFLICT, 42 + [UWB_RSV_STATE_T_PENDING] = UWB_DRP_REASON_PENDING, 43 + [UWB_RSV_STATE_T_DENIED] = UWB_DRP_REASON_DENIED, 44 + [UWB_RSV_STATE_T_RESIZED] = UWB_DRP_REASON_ACCEPTED, 45 + [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = UWB_DRP_REASON_ACCEPTED, 46 + [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = UWB_DRP_REASON_CONFLICT, 47 + [UWB_RSV_STATE_T_EXPANDING_PENDING] = UWB_DRP_REASON_PENDING, 48 + [UWB_RSV_STATE_T_EXPANDING_DENIED] = UWB_DRP_REASON_DENIED, 49 + }; 50 + 51 + return reason_codes[rsv->state]; 52 + } 53 + 54 + /* 55 + * Return the reason code for a reservations's companion DRP IE . 56 + */ 57 + int uwb_rsv_companion_reason_code(struct uwb_rsv *rsv) 58 + { 59 + static const int companion_reason_codes[] = { 60 + [UWB_RSV_STATE_O_MOVE_EXPANDING] = UWB_DRP_REASON_ACCEPTED, 61 + [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = UWB_DRP_REASON_ACCEPTED, 62 + [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = UWB_DRP_REASON_CONFLICT, 63 + [UWB_RSV_STATE_T_EXPANDING_PENDING] = UWB_DRP_REASON_PENDING, 64 + [UWB_RSV_STATE_T_EXPANDING_DENIED] = UWB_DRP_REASON_DENIED, 65 + }; 66 + 67 + return companion_reason_codes[rsv->state]; 68 + } 69 + 70 + /* 71 + * Return the status bit for a reservations's DRP IE. 72 + */ 73 + int uwb_rsv_status(struct uwb_rsv *rsv) 74 + { 75 + static const int statuses[] = { 76 + [UWB_RSV_STATE_O_INITIATED] = 0, 77 + [UWB_RSV_STATE_O_PENDING] = 0, 78 + [UWB_RSV_STATE_O_MODIFIED] = 1, 79 + [UWB_RSV_STATE_O_ESTABLISHED] = 1, 80 + [UWB_RSV_STATE_O_TO_BE_MOVED] = 0, 81 + [UWB_RSV_STATE_O_MOVE_COMBINING] = 1, 82 + [UWB_RSV_STATE_O_MOVE_REDUCING] = 1, 83 + [UWB_RSV_STATE_O_MOVE_EXPANDING] = 1, 84 + [UWB_RSV_STATE_T_ACCEPTED] = 1, 85 + [UWB_RSV_STATE_T_CONFLICT] = 0, 86 + [UWB_RSV_STATE_T_PENDING] = 0, 87 + [UWB_RSV_STATE_T_DENIED] = 0, 88 + [UWB_RSV_STATE_T_RESIZED] = 1, 89 + [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = 1, 90 + [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = 1, 91 + [UWB_RSV_STATE_T_EXPANDING_PENDING] = 1, 92 + [UWB_RSV_STATE_T_EXPANDING_DENIED] = 1, 93 + 94 + }; 95 + 96 + return statuses[rsv->state]; 97 + } 98 + 99 + /* 100 + * Return the status bit for a reservations's companion DRP IE . 101 + */ 102 + int uwb_rsv_companion_status(struct uwb_rsv *rsv) 103 + { 104 + static const int companion_statuses[] = { 105 + [UWB_RSV_STATE_O_MOVE_EXPANDING] = 0, 106 + [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = 1, 107 + [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = 0, 108 + [UWB_RSV_STATE_T_EXPANDING_PENDING] = 0, 109 + [UWB_RSV_STATE_T_EXPANDING_DENIED] = 0, 110 + }; 111 + 112 + return companion_statuses[rsv->state]; 113 + } 114 115 /* 116 * Allocate a DRP IE. ··· 34 static struct uwb_ie_drp *uwb_drp_ie_alloc(void) 35 { 36 struct uwb_ie_drp *drp_ie; 37 38 drp_ie = kzalloc(sizeof(struct uwb_ie_drp) + 39 UWB_NUM_ZONES * sizeof(struct uwb_drp_alloc), 40 GFP_KERNEL); 41 if (drp_ie) { 42 drp_ie->hdr.element_id = UWB_IE_DRP; 43 } 44 return drp_ie; 45 } ··· 104 */ 105 int uwb_drp_ie_update(struct uwb_rsv *rsv) 106 { 107 struct uwb_ie_drp *drp_ie; 108 + struct uwb_rsv_move *mv; 109 + int unsafe; 110 111 + if (rsv->state == UWB_RSV_STATE_NONE) { 112 kfree(rsv->drp_ie); 113 rsv->drp_ie = NULL; 114 return 0; 115 } 116 + 117 + unsafe = rsv->mas.unsafe ? 1 : 0; 118 119 if (rsv->drp_ie == NULL) { 120 rsv->drp_ie = uwb_drp_ie_alloc(); ··· 149 } 150 drp_ie = rsv->drp_ie; 151 152 + uwb_ie_drp_set_unsafe(drp_ie, unsafe); 153 + uwb_ie_drp_set_tiebreaker(drp_ie, rsv->tiebreaker); 154 uwb_ie_drp_set_owner(drp_ie, uwb_rsv_is_owner(rsv)); 155 + uwb_ie_drp_set_status(drp_ie, uwb_rsv_status(rsv)); 156 + uwb_ie_drp_set_reason_code(drp_ie, uwb_rsv_reason_code(rsv)); 157 uwb_ie_drp_set_stream_index(drp_ie, rsv->stream); 158 uwb_ie_drp_set_type(drp_ie, rsv->type); 159 ··· 168 drp_ie->dev_addr = rsv->owner->dev_addr; 169 170 uwb_drp_ie_from_bm(drp_ie, &rsv->mas); 171 + 172 + if (uwb_rsv_has_two_drp_ies(rsv)) { 173 + mv = &rsv->mv; 174 + if (mv->companion_drp_ie == NULL) { 175 + mv->companion_drp_ie = uwb_drp_ie_alloc(); 176 + if (mv->companion_drp_ie == NULL) 177 + return -ENOMEM; 178 + } 179 + drp_ie = mv->companion_drp_ie; 180 + 181 + /* keep all the same configuration of the main drp_ie */ 182 + memcpy(drp_ie, rsv->drp_ie, sizeof(struct uwb_ie_drp)); 183 + 184 + 185 + /* FIXME: handle properly the unsafe bit */ 186 + uwb_ie_drp_set_unsafe(drp_ie, 1); 187 + uwb_ie_drp_set_status(drp_ie, uwb_rsv_companion_status(rsv)); 188 + uwb_ie_drp_set_reason_code(drp_ie, uwb_rsv_companion_reason_code(rsv)); 189 + 190 + uwb_drp_ie_from_bm(drp_ie, &mv->companion_mas); 191 + } 192 193 rsv->ie_valid = true; 194 return 0; ··· 219 u8 zone; 220 u16 zone_mask; 221 222 + bitmap_zero(bm->bm, UWB_NUM_MAS); 223 + 224 for (cnt = 0; cnt < numallocs; cnt++) { 225 alloc = &drp_ie->allocs[cnt]; 226 zone_bm = le16_to_cpu(alloc->zone_bm); ··· 230 } 231 } 232 } 233 +
+530 -177
drivers/uwb/drp.c
··· 23 #include <linux/delay.h> 24 #include "uwb-internal.h" 25 26 /** 27 * Construct and send the SET DRP IE 28 * ··· 90 * 91 * A DRP Availability IE is appended. 92 * 93 - * rc->uwb_dev.mutex is held 94 * 95 * FIXME We currently ignore the returned value indicating the remaining space 96 * in beacon. This could be used to deny reservation requests earlier if 97 * determined that they would cause the beacon space to be exceeded. 98 */ 99 - static 100 - int uwb_rc_gen_send_drp_ie(struct uwb_rc *rc) 101 { 102 int result; 103 - struct device *dev = &rc->uwb_dev.dev; 104 struct uwb_rc_cmd_set_drp_ie *cmd; 105 - struct uwb_rc_evt_set_drp_ie reply; 106 struct uwb_rsv *rsv; 107 int num_bytes = 0; 108 u8 *IEDataptr; 109 110 result = -ENOMEM; 111 /* First traverse all reservations to determine memory needed. */ 112 list_for_each_entry(rsv, &rc->reservations, rc_node) { 113 - if (rsv->drp_ie != NULL) 114 num_bytes += rsv->drp_ie->hdr.length + 2; 115 } 116 num_bytes += sizeof(rc->drp_avail.ie); 117 cmd = kzalloc(sizeof(*cmd) + num_bytes, GFP_KERNEL); ··· 126 cmd->wIELength = num_bytes; 127 IEDataptr = (u8 *)&cmd->IEData[0]; 128 129 /* Next traverse all reservations to place IEs in allocated memory. */ 130 list_for_each_entry(rsv, &rc->reservations, rc_node) { 131 if (rsv->drp_ie != NULL) { 132 memcpy(IEDataptr, rsv->drp_ie, 133 rsv->drp_ie->hdr.length + 2); 134 IEDataptr += rsv->drp_ie->hdr.length + 2; 135 } 136 } 137 - memcpy(IEDataptr, &rc->drp_avail.ie, sizeof(rc->drp_avail.ie)); 138 139 - reply.rceb.bEventType = UWB_RC_CET_GENERAL; 140 - reply.rceb.wEvent = UWB_RC_CMD_SET_DRP_IE; 141 - result = uwb_rc_cmd(rc, "SET-DRP-IE", &cmd->rccb, 142 - sizeof(*cmd) + num_bytes, &reply.rceb, 143 - sizeof(reply)); 144 - if (result < 0) 145 - goto error_cmd; 146 - result = le16_to_cpu(reply.wRemainingSpace); 147 - if (reply.bResultCode != UWB_RC_RES_SUCCESS) { 148 - dev_err(&rc->uwb_dev.dev, "SET-DRP-IE: command execution " 149 - "failed: %s (%d). RemainingSpace in beacon " 150 - "= %d\n", uwb_rc_strerror(reply.bResultCode), 151 - reply.bResultCode, result); 152 - result = -EIO; 153 - } else { 154 - dev_dbg(dev, "SET-DRP-IE sent. RemainingSpace in beacon " 155 - "= %d.\n", result); 156 - result = 0; 157 - } 158 - error_cmd: 159 kfree(cmd); 160 error: 161 return result; 162 - 163 } 164 - /** 165 - * Send all DRP IEs associated with this host 166 * 167 - * @returns: >= 0 number of bytes still available in the beacon 168 - * < 0 errno code on error. 169 - * 170 - * As per the protocol we obtain the host controller device lock to access 171 - * bandwidth structures. 172 */ 173 - int uwb_rc_send_all_drp_ie(struct uwb_rc *rc) 174 { 175 - int result; 176 177 - mutex_lock(&rc->uwb_dev.mutex); 178 - result = uwb_rc_gen_send_drp_ie(rc); 179 - mutex_unlock(&rc->uwb_dev.mutex); 180 - return result; 181 - } 182 - 183 - void uwb_drp_handle_timeout(struct uwb_rsv *rsv) 184 - { 185 - struct device *dev = &rsv->rc->uwb_dev.dev; 186 - 187 - dev_dbg(dev, "reservation timeout in state %s (%d)\n", 188 - uwb_rsv_state_str(rsv->state), rsv->state); 189 - 190 - switch (rsv->state) { 191 - case UWB_RSV_STATE_O_INITIATED: 192 - if (rsv->is_multicast) { 193 - uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); 194 - return; 195 - } 196 - break; 197 - case UWB_RSV_STATE_O_ESTABLISHED: 198 - if (rsv->is_multicast) 199 - return; 200 - break; 201 - default: 202 - break; 203 } 204 - uwb_rsv_remove(rsv); 205 } 206 207 /* 208 * Based on the DRP IE, transition a target reservation to a new 209 * state. 210 */ 211 static void uwb_drp_process_target(struct uwb_rc *rc, struct uwb_rsv *rsv, 212 - struct uwb_ie_drp *drp_ie) 213 { 214 struct device *dev = &rc->uwb_dev.dev; 215 int status; 216 enum uwb_drp_reason reason_code; 217 - 218 status = uwb_ie_drp_status(drp_ie); 219 reason_code = uwb_ie_drp_reason_code(drp_ie); 220 221 - if (status) { 222 - switch (reason_code) { 223 - case UWB_DRP_REASON_ACCEPTED: 224 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED); 225 break; 226 - case UWB_DRP_REASON_MODIFIED: 227 - dev_err(dev, "FIXME: unhandled reason code (%d/%d)\n", 228 - reason_code, status); 229 - break; 230 - default: 231 - dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n", 232 - reason_code, status); 233 } 234 - } else { 235 - switch (reason_code) { 236 - case UWB_DRP_REASON_ACCEPTED: 237 - /* New reservations are handled in uwb_rsv_find(). */ 238 - break; 239 - case UWB_DRP_REASON_DENIED: 240 - uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); 241 - break; 242 - case UWB_DRP_REASON_CONFLICT: 243 - case UWB_DRP_REASON_MODIFIED: 244 - dev_err(dev, "FIXME: unhandled reason code (%d/%d)\n", 245 - reason_code, status); 246 - break; 247 - default: 248 - dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n", 249 - reason_code, status); 250 } 251 } 252 } 253 ··· 450 * state. 451 */ 452 static void uwb_drp_process_owner(struct uwb_rc *rc, struct uwb_rsv *rsv, 453 - struct uwb_ie_drp *drp_ie) 454 { 455 struct device *dev = &rc->uwb_dev.dev; 456 int status; 457 enum uwb_drp_reason reason_code; 458 459 status = uwb_ie_drp_status(drp_ie); 460 reason_code = uwb_ie_drp_reason_code(drp_ie); 461 462 if (status) { 463 switch (reason_code) { 464 case UWB_DRP_REASON_ACCEPTED: 465 - uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); 466 - break; 467 - case UWB_DRP_REASON_MODIFIED: 468 - dev_err(dev, "FIXME: unhandled reason code (%d/%d)\n", 469 - reason_code, status); 470 break; 471 default: 472 dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n", ··· 518 uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); 519 break; 520 case UWB_DRP_REASON_CONFLICT: 521 - case UWB_DRP_REASON_MODIFIED: 522 - dev_err(dev, "FIXME: unhandled reason code (%d/%d)\n", 523 - reason_code, status); 524 break; 525 default: 526 dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n", ··· 530 } 531 } 532 533 /* 534 - * Process a received DRP IE, it's either for a reservation owned by 535 - * the RC or targeted at it (or it's for a WUSB cluster reservation). 536 */ 537 - static void uwb_drp_process(struct uwb_rc *rc, struct uwb_dev *src, 538 - struct uwb_ie_drp *drp_ie) 539 { 540 struct uwb_rsv *rsv; 541 ··· 646 */ 647 return; 648 } 649 - 650 /* 651 * Do nothing with DRP IEs for reservations that have been 652 * terminated. ··· 655 uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); 656 return; 657 } 658 - 659 if (uwb_ie_drp_owner(drp_ie)) 660 - uwb_drp_process_target(rc, rsv, drp_ie); 661 else 662 - uwb_drp_process_owner(rc, rsv, drp_ie); 663 } 664 665 666 /* 667 * Process all the DRP IEs (both DRP IEs and the DRP Availability IE) ··· 713 714 switch (ie_hdr->element_id) { 715 case UWB_IE_DRP_AVAILABILITY: 716 - /* FIXME: does something need to be done with this? */ 717 break; 718 case UWB_IE_DRP: 719 - uwb_drp_process(rc, src_dev, (struct uwb_ie_drp *)ie_hdr); 720 break; 721 default: 722 dev_warn(dev, "unexpected IE in DRP notification\n"); ··· 728 dev_warn(dev, "%d octets remaining in DRP notification\n", 729 (int)ielen); 730 } 731 - 732 - 733 - /* 734 - * Go through all the DRP IEs and find the ones that conflict with our 735 - * reservations. 736 - * 737 - * FIXME: must resolve the conflict according the the rules in 738 - * [ECMA-368]. 739 - */ 740 - static 741 - void uwb_drp_process_conflict_all(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt, 742 - size_t ielen, struct uwb_dev *src_dev) 743 - { 744 - struct device *dev = &rc->uwb_dev.dev; 745 - struct uwb_ie_hdr *ie_hdr; 746 - struct uwb_ie_drp *drp_ie; 747 - void *ptr; 748 - 749 - ptr = drp_evt->ie_data; 750 - for (;;) { 751 - ie_hdr = uwb_ie_next(&ptr, &ielen); 752 - if (!ie_hdr) 753 - break; 754 - 755 - drp_ie = container_of(ie_hdr, struct uwb_ie_drp, hdr); 756 - 757 - /* FIXME: check if this DRP IE conflicts. */ 758 - } 759 - 760 - if (ielen > 0) 761 - dev_warn(dev, "%d octets remaining in DRP notification\n", 762 - (int)ielen); 763 - } 764 - 765 - 766 - /* 767 - * Terminate all reservations owned by, or targeted at, 'uwb_dev'. 768 - */ 769 - static void uwb_drp_terminate_all(struct uwb_rc *rc, struct uwb_dev *uwb_dev) 770 - { 771 - struct uwb_rsv *rsv; 772 - 773 - list_for_each_entry(rsv, &rc->reservations, rc_node) { 774 - if (rsv->owner == uwb_dev 775 - || (rsv->target.type == UWB_RSV_TARGET_DEV && rsv->target.dev == uwb_dev)) 776 - uwb_rsv_remove(rsv); 777 - } 778 - } 779 - 780 781 /** 782 * uwbd_evt_handle_rc_drp - handle a DRP_IE event ··· 769 size_t ielength, bytes_left; 770 struct uwb_dev_addr src_addr; 771 struct uwb_dev *src_dev; 772 - int reason; 773 774 /* Is there enough data to decode the event (and any IEs in 775 its payload)? */ ··· 804 805 mutex_lock(&rc->rsvs_mutex); 806 807 - reason = uwb_rc_evt_drp_reason(drp_evt); 808 - 809 - switch (reason) { 810 - case UWB_DRP_NOTIF_DRP_IE_RCVD: 811 - uwb_drp_process_all(rc, drp_evt, ielength, src_dev); 812 - break; 813 - case UWB_DRP_NOTIF_CONFLICT: 814 - uwb_drp_process_conflict_all(rc, drp_evt, ielength, src_dev); 815 - break; 816 - case UWB_DRP_NOTIF_TERMINATE: 817 - uwb_drp_terminate_all(rc, src_dev); 818 - break; 819 - default: 820 - dev_warn(dev, "ignored DRP event with reason code: %d\n", reason); 821 - break; 822 - } 823 824 mutex_unlock(&rc->rsvs_mutex); 825
··· 23 #include <linux/delay.h> 24 #include "uwb-internal.h" 25 26 + 27 + /* DRP Conflict Actions ([ECMA-368 2nd Edition] 17.4.6) */ 28 + enum uwb_drp_conflict_action { 29 + /* Reservation is mantained, no action needed */ 30 + UWB_DRP_CONFLICT_MANTAIN = 0, 31 + 32 + /* the device shall not transmit frames in conflicting MASs in 33 + * the following superframe. If the device is the reservation 34 + * target, it shall also set the Reason Code in its DRP IE to 35 + * Conflict in its beacon in the following superframe. 36 + */ 37 + UWB_DRP_CONFLICT_ACT1, 38 + 39 + /* the device shall not set the Reservation Status bit to ONE 40 + * and shall not transmit frames in conflicting MASs. If the 41 + * device is the reservation target, it shall also set the 42 + * Reason Code in its DRP IE to Conflict. 43 + */ 44 + UWB_DRP_CONFLICT_ACT2, 45 + 46 + /* the device shall not transmit frames in conflicting MASs in 47 + * the following superframe. It shall remove the conflicting 48 + * MASs from the reservation or set the Reservation Status to 49 + * ZERO in its beacon in the following superframe. If the 50 + * device is the reservation target, it shall also set the 51 + * Reason Code in its DRP IE to Conflict. 52 + */ 53 + UWB_DRP_CONFLICT_ACT3, 54 + }; 55 + 56 + 57 + static void uwb_rc_set_drp_cmd_done(struct uwb_rc *rc, void *arg, 58 + struct uwb_rceb *reply, ssize_t reply_size) 59 + { 60 + struct uwb_rc_evt_set_drp_ie *r = (struct uwb_rc_evt_set_drp_ie *)reply; 61 + 62 + if (r != NULL) { 63 + if (r->bResultCode != UWB_RC_RES_SUCCESS) 64 + dev_err(&rc->uwb_dev.dev, "SET-DRP-IE failed: %s (%d)\n", 65 + uwb_rc_strerror(r->bResultCode), r->bResultCode); 66 + } else 67 + dev_err(&rc->uwb_dev.dev, "SET-DRP-IE: timeout\n"); 68 + 69 + spin_lock(&rc->rsvs_lock); 70 + if (rc->set_drp_ie_pending > 1) { 71 + rc->set_drp_ie_pending = 0; 72 + uwb_rsv_queue_update(rc); 73 + } else { 74 + rc->set_drp_ie_pending = 0; 75 + } 76 + spin_unlock(&rc->rsvs_lock); 77 + } 78 + 79 /** 80 * Construct and send the SET DRP IE 81 * ··· 37 * 38 * A DRP Availability IE is appended. 39 * 40 + * rc->rsvs_mutex is held 41 * 42 * FIXME We currently ignore the returned value indicating the remaining space 43 * in beacon. This could be used to deny reservation requests earlier if 44 * determined that they would cause the beacon space to be exceeded. 45 */ 46 + int uwb_rc_send_all_drp_ie(struct uwb_rc *rc) 47 { 48 int result; 49 struct uwb_rc_cmd_set_drp_ie *cmd; 50 struct uwb_rsv *rsv; 51 + struct uwb_rsv_move *mv; 52 int num_bytes = 0; 53 u8 *IEDataptr; 54 55 result = -ENOMEM; 56 /* First traverse all reservations to determine memory needed. */ 57 list_for_each_entry(rsv, &rc->reservations, rc_node) { 58 + if (rsv->drp_ie != NULL) { 59 num_bytes += rsv->drp_ie->hdr.length + 2; 60 + if (uwb_rsv_has_two_drp_ies(rsv) && 61 + (rsv->mv.companion_drp_ie != NULL)) { 62 + mv = &rsv->mv; 63 + num_bytes += mv->companion_drp_ie->hdr.length + 2; 64 + } 65 + } 66 } 67 num_bytes += sizeof(rc->drp_avail.ie); 68 cmd = kzalloc(sizeof(*cmd) + num_bytes, GFP_KERNEL); ··· 69 cmd->wIELength = num_bytes; 70 IEDataptr = (u8 *)&cmd->IEData[0]; 71 72 + /* FIXME: DRV avail IE is not always needed */ 73 + /* put DRP avail IE first */ 74 + memcpy(IEDataptr, &rc->drp_avail.ie, sizeof(rc->drp_avail.ie)); 75 + IEDataptr += sizeof(struct uwb_ie_drp_avail); 76 + 77 /* Next traverse all reservations to place IEs in allocated memory. */ 78 list_for_each_entry(rsv, &rc->reservations, rc_node) { 79 if (rsv->drp_ie != NULL) { 80 memcpy(IEDataptr, rsv->drp_ie, 81 rsv->drp_ie->hdr.length + 2); 82 IEDataptr += rsv->drp_ie->hdr.length + 2; 83 + 84 + if (uwb_rsv_has_two_drp_ies(rsv) && 85 + (rsv->mv.companion_drp_ie != NULL)) { 86 + mv = &rsv->mv; 87 + memcpy(IEDataptr, mv->companion_drp_ie, 88 + mv->companion_drp_ie->hdr.length + 2); 89 + IEDataptr += mv->companion_drp_ie->hdr.length + 2; 90 + } 91 } 92 } 93 94 + result = uwb_rc_cmd_async(rc, "SET-DRP-IE", &cmd->rccb, sizeof(*cmd) + num_bytes, 95 + UWB_RC_CET_GENERAL, UWB_RC_CMD_SET_DRP_IE, 96 + uwb_rc_set_drp_cmd_done, NULL); 97 + 98 + rc->set_drp_ie_pending = 1; 99 + 100 kfree(cmd); 101 error: 102 return result; 103 } 104 + 105 + /* 106 + * Evaluate the action to perform using conflict resolution rules 107 * 108 + * Return a uwb_drp_conflict_action. 109 */ 110 + static int evaluate_conflict_action(struct uwb_ie_drp *ext_drp_ie, int ext_beacon_slot, 111 + struct uwb_rsv *rsv, int our_status) 112 { 113 + int our_tie_breaker = rsv->tiebreaker; 114 + int our_type = rsv->type; 115 + int our_beacon_slot = rsv->rc->uwb_dev.beacon_slot; 116 117 + int ext_tie_breaker = uwb_ie_drp_tiebreaker(ext_drp_ie); 118 + int ext_status = uwb_ie_drp_status(ext_drp_ie); 119 + int ext_type = uwb_ie_drp_type(ext_drp_ie); 120 + 121 + 122 + /* [ECMA-368 2nd Edition] 17.4.6 */ 123 + if (ext_type == UWB_DRP_TYPE_PCA && our_type == UWB_DRP_TYPE_PCA) { 124 + return UWB_DRP_CONFLICT_MANTAIN; 125 } 126 + 127 + /* [ECMA-368 2nd Edition] 17.4.6-1 */ 128 + if (our_type == UWB_DRP_TYPE_ALIEN_BP) { 129 + return UWB_DRP_CONFLICT_MANTAIN; 130 + } 131 + 132 + /* [ECMA-368 2nd Edition] 17.4.6-2 */ 133 + if (ext_type == UWB_DRP_TYPE_ALIEN_BP) { 134 + /* here we know our_type != UWB_DRP_TYPE_ALIEN_BP */ 135 + return UWB_DRP_CONFLICT_ACT1; 136 + } 137 + 138 + /* [ECMA-368 2nd Edition] 17.4.6-3 */ 139 + if (our_status == 0 && ext_status == 1) { 140 + return UWB_DRP_CONFLICT_ACT2; 141 + } 142 + 143 + /* [ECMA-368 2nd Edition] 17.4.6-4 */ 144 + if (our_status == 1 && ext_status == 0) { 145 + return UWB_DRP_CONFLICT_MANTAIN; 146 + } 147 + 148 + /* [ECMA-368 2nd Edition] 17.4.6-5a */ 149 + if (our_tie_breaker == ext_tie_breaker && 150 + our_beacon_slot < ext_beacon_slot) { 151 + return UWB_DRP_CONFLICT_MANTAIN; 152 + } 153 + 154 + /* [ECMA-368 2nd Edition] 17.4.6-5b */ 155 + if (our_tie_breaker != ext_tie_breaker && 156 + our_beacon_slot > ext_beacon_slot) { 157 + return UWB_DRP_CONFLICT_MANTAIN; 158 + } 159 + 160 + if (our_status == 0) { 161 + if (our_tie_breaker == ext_tie_breaker) { 162 + /* [ECMA-368 2nd Edition] 17.4.6-6a */ 163 + if (our_beacon_slot > ext_beacon_slot) { 164 + return UWB_DRP_CONFLICT_ACT2; 165 + } 166 + } else { 167 + /* [ECMA-368 2nd Edition] 17.4.6-6b */ 168 + if (our_beacon_slot < ext_beacon_slot) { 169 + return UWB_DRP_CONFLICT_ACT2; 170 + } 171 + } 172 + } else { 173 + if (our_tie_breaker == ext_tie_breaker) { 174 + /* [ECMA-368 2nd Edition] 17.4.6-7a */ 175 + if (our_beacon_slot > ext_beacon_slot) { 176 + return UWB_DRP_CONFLICT_ACT3; 177 + } 178 + } else { 179 + /* [ECMA-368 2nd Edition] 17.4.6-7b */ 180 + if (our_beacon_slot < ext_beacon_slot) { 181 + return UWB_DRP_CONFLICT_ACT3; 182 + } 183 + } 184 + } 185 + return UWB_DRP_CONFLICT_MANTAIN; 186 } 187 188 + static void handle_conflict_normal(struct uwb_ie_drp *drp_ie, 189 + int ext_beacon_slot, 190 + struct uwb_rsv *rsv, 191 + struct uwb_mas_bm *conflicting_mas) 192 + { 193 + struct uwb_rc *rc = rsv->rc; 194 + struct uwb_rsv_move *mv = &rsv->mv; 195 + struct uwb_drp_backoff_win *bow = &rc->bow; 196 + int action; 197 + 198 + action = evaluate_conflict_action(drp_ie, ext_beacon_slot, rsv, uwb_rsv_status(rsv)); 199 + 200 + if (uwb_rsv_is_owner(rsv)) { 201 + switch(action) { 202 + case UWB_DRP_CONFLICT_ACT2: 203 + /* try move */ 204 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_TO_BE_MOVED); 205 + if (bow->can_reserve_extra_mases == false) 206 + uwb_rsv_backoff_win_increment(rc); 207 + 208 + break; 209 + case UWB_DRP_CONFLICT_ACT3: 210 + uwb_rsv_backoff_win_increment(rc); 211 + /* drop some mases with reason modified */ 212 + /* put in the companion the mases to be dropped */ 213 + bitmap_and(mv->companion_mas.bm, rsv->mas.bm, conflicting_mas->bm, UWB_NUM_MAS); 214 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED); 215 + default: 216 + break; 217 + } 218 + } else { 219 + switch(action) { 220 + case UWB_DRP_CONFLICT_ACT2: 221 + case UWB_DRP_CONFLICT_ACT3: 222 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT); 223 + default: 224 + break; 225 + } 226 + 227 + } 228 + 229 + } 230 + 231 + static void handle_conflict_expanding(struct uwb_ie_drp *drp_ie, int ext_beacon_slot, 232 + struct uwb_rsv *rsv, bool companion_only, 233 + struct uwb_mas_bm *conflicting_mas) 234 + { 235 + struct uwb_rc *rc = rsv->rc; 236 + struct uwb_drp_backoff_win *bow = &rc->bow; 237 + struct uwb_rsv_move *mv = &rsv->mv; 238 + int action; 239 + 240 + if (companion_only) { 241 + /* status of companion is 0 at this point */ 242 + action = evaluate_conflict_action(drp_ie, ext_beacon_slot, rsv, 0); 243 + if (uwb_rsv_is_owner(rsv)) { 244 + switch(action) { 245 + case UWB_DRP_CONFLICT_ACT2: 246 + case UWB_DRP_CONFLICT_ACT3: 247 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); 248 + rsv->needs_release_companion_mas = false; 249 + if (bow->can_reserve_extra_mases == false) 250 + uwb_rsv_backoff_win_increment(rc); 251 + uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas); 252 + } 253 + } else { /* rsv is target */ 254 + switch(action) { 255 + case UWB_DRP_CONFLICT_ACT2: 256 + case UWB_DRP_CONFLICT_ACT3: 257 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_EXPANDING_CONFLICT); 258 + /* send_drp_avail_ie = true; */ 259 + } 260 + } 261 + } else { /* also base part of the reservation is conflicting */ 262 + if (uwb_rsv_is_owner(rsv)) { 263 + uwb_rsv_backoff_win_increment(rc); 264 + /* remove companion part */ 265 + uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas); 266 + 267 + /* drop some mases with reason modified */ 268 + 269 + /* put in the companion the mases to be dropped */ 270 + bitmap_andnot(mv->companion_mas.bm, rsv->mas.bm, conflicting_mas->bm, UWB_NUM_MAS); 271 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED); 272 + } else { /* it is a target rsv */ 273 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT); 274 + /* send_drp_avail_ie = true; */ 275 + } 276 + } 277 + } 278 + 279 + static void uwb_drp_handle_conflict_rsv(struct uwb_rc *rc, struct uwb_rsv *rsv, 280 + struct uwb_rc_evt_drp *drp_evt, 281 + struct uwb_ie_drp *drp_ie, 282 + struct uwb_mas_bm *conflicting_mas) 283 + { 284 + struct uwb_rsv_move *mv; 285 + 286 + /* check if the conflicting reservation has two drp_ies */ 287 + if (uwb_rsv_has_two_drp_ies(rsv)) { 288 + mv = &rsv->mv; 289 + if (bitmap_intersects(rsv->mas.bm, conflicting_mas->bm, UWB_NUM_MAS)) { 290 + handle_conflict_expanding(drp_ie, drp_evt->beacon_slot_number, 291 + rsv, false, conflicting_mas); 292 + } else { 293 + if (bitmap_intersects(mv->companion_mas.bm, conflicting_mas->bm, UWB_NUM_MAS)) { 294 + handle_conflict_expanding(drp_ie, drp_evt->beacon_slot_number, 295 + rsv, true, conflicting_mas); 296 + } 297 + } 298 + } else if (bitmap_intersects(rsv->mas.bm, conflicting_mas->bm, UWB_NUM_MAS)) { 299 + handle_conflict_normal(drp_ie, drp_evt->beacon_slot_number, rsv, conflicting_mas); 300 + } 301 + } 302 + 303 + static void uwb_drp_handle_all_conflict_rsv(struct uwb_rc *rc, 304 + struct uwb_rc_evt_drp *drp_evt, 305 + struct uwb_ie_drp *drp_ie, 306 + struct uwb_mas_bm *conflicting_mas) 307 + { 308 + struct uwb_rsv *rsv; 309 + 310 + list_for_each_entry(rsv, &rc->reservations, rc_node) { 311 + uwb_drp_handle_conflict_rsv(rc, rsv, drp_evt, drp_ie, conflicting_mas); 312 + } 313 + } 314 + 315 /* 316 * Based on the DRP IE, transition a target reservation to a new 317 * state. 318 */ 319 static void uwb_drp_process_target(struct uwb_rc *rc, struct uwb_rsv *rsv, 320 + struct uwb_ie_drp *drp_ie, struct uwb_rc_evt_drp *drp_evt) 321 { 322 struct device *dev = &rc->uwb_dev.dev; 323 + struct uwb_rsv_move *mv = &rsv->mv; 324 int status; 325 enum uwb_drp_reason reason_code; 326 + struct uwb_mas_bm mas; 327 + 328 status = uwb_ie_drp_status(drp_ie); 329 reason_code = uwb_ie_drp_reason_code(drp_ie); 330 + uwb_drp_ie_to_bm(&mas, drp_ie); 331 332 + switch (reason_code) { 333 + case UWB_DRP_REASON_ACCEPTED: 334 + 335 + if (rsv->state == UWB_RSV_STATE_T_CONFLICT) { 336 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT); 337 + break; 338 + } 339 + 340 + if (rsv->state == UWB_RSV_STATE_T_EXPANDING_ACCEPTED) { 341 + /* drp_ie is companion */ 342 + if (!bitmap_equal(rsv->mas.bm, mas.bm, UWB_NUM_MAS)) 343 + /* stroke companion */ 344 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_EXPANDING_ACCEPTED); 345 + } else { 346 + if (!bitmap_equal(rsv->mas.bm, mas.bm, UWB_NUM_MAS)) { 347 + if (uwb_drp_avail_reserve_pending(rc, &mas) == -EBUSY) { 348 + /* FIXME: there is a conflict, find 349 + * the conflicting reservations and 350 + * take a sensible action. Consider 351 + * that in drp_ie there is the 352 + * "neighbour" */ 353 + uwb_drp_handle_all_conflict_rsv(rc, drp_evt, drp_ie, &mas); 354 + } else { 355 + /* accept the extra reservation */ 356 + bitmap_copy(mv->companion_mas.bm, mas.bm, UWB_NUM_MAS); 357 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_EXPANDING_ACCEPTED); 358 + } 359 + } else { 360 + if (status) { 361 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED); 362 + } 363 + } 364 + 365 + } 366 + break; 367 + 368 + case UWB_DRP_REASON_MODIFIED: 369 + /* check to see if we have already modified the reservation */ 370 + if (bitmap_equal(rsv->mas.bm, mas.bm, UWB_NUM_MAS)) { 371 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED); 372 break; 373 } 374 + 375 + /* find if the owner wants to expand or reduce */ 376 + if (bitmap_subset(mas.bm, rsv->mas.bm, UWB_NUM_MAS)) { 377 + /* owner is reducing */ 378 + bitmap_andnot(mv->companion_mas.bm, rsv->mas.bm, mas.bm, UWB_NUM_MAS); 379 + uwb_drp_avail_release(rsv->rc, &mv->companion_mas); 380 } 381 + 382 + bitmap_copy(rsv->mas.bm, mas.bm, UWB_NUM_MAS); 383 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_RESIZED); 384 + break; 385 + default: 386 + dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n", 387 + reason_code, status); 388 } 389 } 390 ··· 199 * state. 200 */ 201 static void uwb_drp_process_owner(struct uwb_rc *rc, struct uwb_rsv *rsv, 202 + struct uwb_dev *src, struct uwb_ie_drp *drp_ie, 203 + struct uwb_rc_evt_drp *drp_evt) 204 { 205 struct device *dev = &rc->uwb_dev.dev; 206 + struct uwb_rsv_move *mv = &rsv->mv; 207 int status; 208 enum uwb_drp_reason reason_code; 209 + struct uwb_mas_bm mas; 210 211 status = uwb_ie_drp_status(drp_ie); 212 reason_code = uwb_ie_drp_reason_code(drp_ie); 213 + uwb_drp_ie_to_bm(&mas, drp_ie); 214 215 if (status) { 216 switch (reason_code) { 217 case UWB_DRP_REASON_ACCEPTED: 218 + switch (rsv->state) { 219 + case UWB_RSV_STATE_O_PENDING: 220 + case UWB_RSV_STATE_O_INITIATED: 221 + case UWB_RSV_STATE_O_ESTABLISHED: 222 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); 223 + break; 224 + case UWB_RSV_STATE_O_MODIFIED: 225 + if (bitmap_equal(mas.bm, rsv->mas.bm, UWB_NUM_MAS)) { 226 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); 227 + } else { 228 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED); 229 + } 230 + break; 231 + 232 + case UWB_RSV_STATE_O_MOVE_REDUCING: /* shouldn' t be a problem */ 233 + if (bitmap_equal(mas.bm, rsv->mas.bm, UWB_NUM_MAS)) { 234 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); 235 + } else { 236 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_REDUCING); 237 + } 238 + break; 239 + case UWB_RSV_STATE_O_MOVE_EXPANDING: 240 + if (bitmap_equal(mas.bm, mv->companion_mas.bm, UWB_NUM_MAS)) { 241 + /* Companion reservation accepted */ 242 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_COMBINING); 243 + } else { 244 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_EXPANDING); 245 + } 246 + break; 247 + case UWB_RSV_STATE_O_MOVE_COMBINING: 248 + if (bitmap_equal(mas.bm, rsv->mas.bm, UWB_NUM_MAS)) 249 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_REDUCING); 250 + else 251 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_COMBINING); 252 + break; 253 + default: 254 + break; 255 + } 256 break; 257 default: 258 dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n", ··· 230 uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); 231 break; 232 case UWB_DRP_REASON_CONFLICT: 233 + /* resolve the conflict */ 234 + bitmap_complement(mas.bm, src->last_availability_bm, 235 + UWB_NUM_MAS); 236 + uwb_drp_handle_conflict_rsv(rc, rsv, drp_evt, drp_ie, &mas); 237 break; 238 default: 239 dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n", ··· 241 } 242 } 243 244 + static void uwb_cnflt_alien_stroke_timer(struct uwb_cnflt_alien *cnflt) 245 + { 246 + unsigned timeout_us = UWB_MAX_LOST_BEACONS * UWB_SUPERFRAME_LENGTH_US; 247 + mod_timer(&cnflt->timer, jiffies + usecs_to_jiffies(timeout_us)); 248 + } 249 + 250 + static void uwb_cnflt_update_work(struct work_struct *work) 251 + { 252 + struct uwb_cnflt_alien *cnflt = container_of(work, 253 + struct uwb_cnflt_alien, 254 + cnflt_update_work); 255 + struct uwb_cnflt_alien *c; 256 + struct uwb_rc *rc = cnflt->rc; 257 + 258 + unsigned long delay_us = UWB_MAS_LENGTH_US * UWB_MAS_PER_ZONE; 259 + 260 + mutex_lock(&rc->rsvs_mutex); 261 + 262 + list_del(&cnflt->rc_node); 263 + 264 + /* update rc global conflicting alien bitmap */ 265 + bitmap_zero(rc->cnflt_alien_bitmap.bm, UWB_NUM_MAS); 266 + 267 + list_for_each_entry(c, &rc->cnflt_alien_list, rc_node) { 268 + bitmap_or(rc->cnflt_alien_bitmap.bm, rc->cnflt_alien_bitmap.bm, c->mas.bm, UWB_NUM_MAS); 269 + } 270 + 271 + queue_delayed_work(rc->rsv_workq, &rc->rsv_alien_bp_work, usecs_to_jiffies(delay_us)); 272 + 273 + kfree(cnflt); 274 + mutex_unlock(&rc->rsvs_mutex); 275 + } 276 + 277 + static void uwb_cnflt_timer(unsigned long arg) 278 + { 279 + struct uwb_cnflt_alien *cnflt = (struct uwb_cnflt_alien *)arg; 280 + 281 + queue_work(cnflt->rc->rsv_workq, &cnflt->cnflt_update_work); 282 + } 283 + 284 /* 285 + * We have received an DRP_IE of type Alien BP and we need to make 286 + * sure we do not transmit in conflicting MASs. 287 */ 288 + static void uwb_drp_handle_alien_drp(struct uwb_rc *rc, struct uwb_ie_drp *drp_ie) 289 + { 290 + struct device *dev = &rc->uwb_dev.dev; 291 + struct uwb_mas_bm mas; 292 + struct uwb_cnflt_alien *cnflt; 293 + char buf[72]; 294 + unsigned long delay_us = UWB_MAS_LENGTH_US * UWB_MAS_PER_ZONE; 295 + 296 + uwb_drp_ie_to_bm(&mas, drp_ie); 297 + bitmap_scnprintf(buf, sizeof(buf), mas.bm, UWB_NUM_MAS); 298 + 299 + list_for_each_entry(cnflt, &rc->cnflt_alien_list, rc_node) { 300 + if (bitmap_equal(cnflt->mas.bm, mas.bm, UWB_NUM_MAS)) { 301 + /* Existing alien BP reservation conflicting 302 + * bitmap, just reset the timer */ 303 + uwb_cnflt_alien_stroke_timer(cnflt); 304 + return; 305 + } 306 + } 307 + 308 + /* New alien BP reservation conflicting bitmap */ 309 + 310 + /* alloc and initialize new uwb_cnflt_alien */ 311 + cnflt = kzalloc(sizeof(struct uwb_cnflt_alien), GFP_KERNEL); 312 + if (!cnflt) 313 + dev_err(dev, "failed to alloc uwb_cnflt_alien struct\n"); 314 + INIT_LIST_HEAD(&cnflt->rc_node); 315 + init_timer(&cnflt->timer); 316 + cnflt->timer.function = uwb_cnflt_timer; 317 + cnflt->timer.data = (unsigned long)cnflt; 318 + 319 + cnflt->rc = rc; 320 + INIT_WORK(&cnflt->cnflt_update_work, uwb_cnflt_update_work); 321 + 322 + bitmap_copy(cnflt->mas.bm, mas.bm, UWB_NUM_MAS); 323 + 324 + list_add_tail(&cnflt->rc_node, &rc->cnflt_alien_list); 325 + 326 + /* update rc global conflicting alien bitmap */ 327 + bitmap_or(rc->cnflt_alien_bitmap.bm, rc->cnflt_alien_bitmap.bm, mas.bm, UWB_NUM_MAS); 328 + 329 + queue_delayed_work(rc->rsv_workq, &rc->rsv_alien_bp_work, usecs_to_jiffies(delay_us)); 330 + 331 + /* start the timer */ 332 + uwb_cnflt_alien_stroke_timer(cnflt); 333 + } 334 + 335 + static void uwb_drp_process_not_involved(struct uwb_rc *rc, 336 + struct uwb_rc_evt_drp *drp_evt, 337 + struct uwb_ie_drp *drp_ie) 338 + { 339 + struct uwb_mas_bm mas; 340 + 341 + uwb_drp_ie_to_bm(&mas, drp_ie); 342 + uwb_drp_handle_all_conflict_rsv(rc, drp_evt, drp_ie, &mas); 343 + } 344 + 345 + static void uwb_drp_process_involved(struct uwb_rc *rc, struct uwb_dev *src, 346 + struct uwb_rc_evt_drp *drp_evt, 347 + struct uwb_ie_drp *drp_ie) 348 { 349 struct uwb_rsv *rsv; 350 ··· 259 */ 260 return; 261 } 262 + 263 /* 264 * Do nothing with DRP IEs for reservations that have been 265 * terminated. ··· 268 uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); 269 return; 270 } 271 + 272 if (uwb_ie_drp_owner(drp_ie)) 273 + uwb_drp_process_target(rc, rsv, drp_ie, drp_evt); 274 else 275 + uwb_drp_process_owner(rc, rsv, src, drp_ie, drp_evt); 276 + 277 } 278 279 + 280 + static bool uwb_drp_involves_us(struct uwb_rc *rc, struct uwb_ie_drp *drp_ie) 281 + { 282 + return uwb_dev_addr_cmp(&rc->uwb_dev.dev_addr, &drp_ie->dev_addr) == 0; 283 + } 284 + 285 + /* 286 + * Process a received DRP IE. 287 + */ 288 + static void uwb_drp_process(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt, 289 + struct uwb_dev *src, struct uwb_ie_drp *drp_ie) 290 + { 291 + if (uwb_ie_drp_type(drp_ie) == UWB_DRP_TYPE_ALIEN_BP) 292 + uwb_drp_handle_alien_drp(rc, drp_ie); 293 + else if (uwb_drp_involves_us(rc, drp_ie)) 294 + uwb_drp_process_involved(rc, src, drp_evt, drp_ie); 295 + else 296 + uwb_drp_process_not_involved(rc, drp_evt, drp_ie); 297 + } 298 + 299 + /* 300 + * Process a received DRP Availability IE 301 + */ 302 + static void uwb_drp_availability_process(struct uwb_rc *rc, struct uwb_dev *src, 303 + struct uwb_ie_drp_avail *drp_availability_ie) 304 + { 305 + bitmap_copy(src->last_availability_bm, 306 + drp_availability_ie->bmp, UWB_NUM_MAS); 307 + } 308 309 /* 310 * Process all the DRP IEs (both DRP IEs and the DRP Availability IE) ··· 296 297 switch (ie_hdr->element_id) { 298 case UWB_IE_DRP_AVAILABILITY: 299 + uwb_drp_availability_process(rc, src_dev, (struct uwb_ie_drp_avail *)ie_hdr); 300 break; 301 case UWB_IE_DRP: 302 + uwb_drp_process(rc, drp_evt, src_dev, (struct uwb_ie_drp *)ie_hdr); 303 break; 304 default: 305 dev_warn(dev, "unexpected IE in DRP notification\n"); ··· 311 dev_warn(dev, "%d octets remaining in DRP notification\n", 312 (int)ielen); 313 } 314 315 /** 316 * uwbd_evt_handle_rc_drp - handle a DRP_IE event ··· 401 size_t ielength, bytes_left; 402 struct uwb_dev_addr src_addr; 403 struct uwb_dev *src_dev; 404 405 /* Is there enough data to decode the event (and any IEs in 406 its payload)? */ ··· 437 438 mutex_lock(&rc->rsvs_mutex); 439 440 + /* We do not distinguish from the reason */ 441 + uwb_drp_process_all(rc, drp_evt, ielength, src_dev); 442 443 mutex_unlock(&rc->rsvs_mutex); 444
+1 -13
drivers/uwb/est.c
··· 40 * uwb_est_get_size() 41 */ 42 #include <linux/spinlock.h> 43 - #define D_LOCAL 0 44 - #include <linux/uwb/debug.h> 45 - #include "uwb-internal.h" 46 47 48 struct uwb_est { 49 u16 type_event_high; ··· 49 u8 entries; 50 const struct uwb_est_entry *entry; 51 }; 52 - 53 54 static struct uwb_est *uwb_est; 55 static u8 uwb_est_size; ··· 437 u8 *ptr = (u8 *) rceb; 438 439 read_lock_irqsave(&uwb_est_lock, flags); 440 - d_printf(2, dev, "Size query for event 0x%02x/%04x/%02x," 441 - " buffer size %ld\n", 442 - (unsigned) rceb->bEventType, 443 - (unsigned) le16_to_cpu(rceb->wEvent), 444 - (unsigned) rceb->bEventContext, 445 - (long) rceb_size); 446 size = -ENOSPC; 447 if (rceb_size < sizeof(*rceb)) 448 goto out; 449 event = le16_to_cpu(rceb->wEvent); 450 type_event_high = rceb->bEventType << 8 | (event & 0xff00) >> 8; 451 for (itr = 0; itr < uwb_est_used; itr++) { 452 - d_printf(3, dev, "Checking EST 0x%04x/%04x/%04x\n", 453 - uwb_est[itr].type_event_high, uwb_est[itr].vendor, 454 - uwb_est[itr].product); 455 if (uwb_est[itr].type_event_high != type_event_high) 456 continue; 457 size = uwb_est_get_size(rc, &uwb_est[itr],
··· 40 * uwb_est_get_size() 41 */ 42 #include <linux/spinlock.h> 43 44 + #include "uwb-internal.h" 45 46 struct uwb_est { 47 u16 type_event_high; ··· 51 u8 entries; 52 const struct uwb_est_entry *entry; 53 }; 54 55 static struct uwb_est *uwb_est; 56 static u8 uwb_est_size; ··· 440 u8 *ptr = (u8 *) rceb; 441 442 read_lock_irqsave(&uwb_est_lock, flags); 443 size = -ENOSPC; 444 if (rceb_size < sizeof(*rceb)) 445 goto out; 446 event = le16_to_cpu(rceb->wEvent); 447 type_event_high = rceb->bEventType << 8 | (event & 0xff00) >> 8; 448 for (itr = 0; itr < uwb_est_used; itr++) { 449 if (uwb_est[itr].type_event_high != type_event_high) 450 continue; 451 size = uwb_est_get_size(rc, &uwb_est[itr],
+29 -24
drivers/uwb/hwa-rc.c
··· 51 * 52 * 53 */ 54 - #include <linux/version.h> 55 #include <linux/init.h> 56 #include <linux/module.h> 57 #include <linux/usb.h> 58 #include <linux/usb/wusb.h> 59 #include <linux/usb/wusb-wa.h> 60 #include <linux/uwb.h> 61 #include "uwb-internal.h" 62 - #define D_LOCAL 1 63 - #include <linux/uwb/debug.h> 64 65 /* The device uses commands and events from the WHCI specification, although 66 * reporting itself as WUSB compliant. */ ··· 629 630 switch (result = urb->status) { 631 case 0: 632 - d_printf(3, dev, "NEEP: receive stat %d, %zu bytes\n", 633 - urb->status, (size_t)urb->actual_length); 634 uwb_rc_neh_grok(hwarc->uwb_rc, urb->transfer_buffer, 635 urb->actual_length); 636 break; 637 case -ECONNRESET: /* Not an error, but a controlled situation; */ 638 case -ENOENT: /* (we killed the URB)...so, no broadcast */ 639 - d_printf(2, dev, "NEEP: URB reset/noent %d\n", urb->status); 640 goto out; 641 case -ESHUTDOWN: /* going away! */ 642 - d_printf(2, dev, "NEEP: URB down %d\n", urb->status); 643 goto out; 644 default: /* On general errors, retry unless it gets ugly */ 645 if (edc_inc(&hwarc->neep_edc, EDC_MAX_ERRORS, ··· 644 dev_err(dev, "NEEP: URB error %d\n", urb->status); 645 } 646 result = usb_submit_urb(urb, GFP_ATOMIC); 647 - d_printf(3, dev, "NEEP: submit %d\n", result); 648 if (result < 0) { 649 dev_err(dev, "NEEP: Can't resubmit URB (%d) resetting device\n", 650 result); ··· 752 itr_size = le16_to_cpu(usb_dev->actconfig->desc.wTotalLength); 753 while (itr_size >= sizeof(*hdr)) { 754 hdr = (struct usb_descriptor_header *) itr; 755 - d_printf(3, dev, "Extra device descriptor: " 756 - "type %02x/%u bytes @ %zu (%zu left)\n", 757 - hdr->bDescriptorType, hdr->bLength, 758 - (itr - usb_dev->rawdescriptors[actconfig_idx]), 759 - itr_size); 760 if (hdr->bDescriptorType == USB_DT_CS_RADIO_CONTROL) 761 goto found; 762 itr += hdr->bLength; ··· 788 goto error; 789 } 790 rc->version = version; 791 - d_printf(3, dev, "Device supports WUSB protocol version 0x%04x \n", 792 - rc->version); 793 result = 0; 794 error: 795 return result; ··· 869 uwb_rc_rm(uwb_rc); 870 usb_put_intf(hwarc->usb_iface); 871 usb_put_dev(hwarc->usb_dev); 872 - d_printf(1, &hwarc->usb_iface->dev, "freed hwarc %p\n", hwarc); 873 kfree(hwarc); 874 uwb_rc_put(uwb_rc); /* when creating the device, refcount = 1 */ 875 } 876 877 /** USB device ID's that we handle */ ··· 907 908 static struct usb_driver hwarc_driver = { 909 .name = "hwa-rc", 910 .probe = hwarc_probe, 911 .disconnect = hwarc_disconnect, 912 - .id_table = hwarc_id_table, 913 }; 914 915 static int __init hwarc_driver_init(void) 916 { 917 - int result; 918 - result = usb_register(&hwarc_driver); 919 - if (result < 0) 920 - printk(KERN_ERR "HWA-RC: Cannot register USB driver: %d\n", 921 - result); 922 - return result; 923 - 924 } 925 module_init(hwarc_driver_init); 926
··· 51 * 52 * 53 */ 54 #include <linux/init.h> 55 #include <linux/module.h> 56 #include <linux/usb.h> 57 #include <linux/usb/wusb.h> 58 #include <linux/usb/wusb-wa.h> 59 #include <linux/uwb.h> 60 + 61 #include "uwb-internal.h" 62 63 /* The device uses commands and events from the WHCI specification, although 64 * reporting itself as WUSB compliant. */ ··· 631 632 switch (result = urb->status) { 633 case 0: 634 uwb_rc_neh_grok(hwarc->uwb_rc, urb->transfer_buffer, 635 urb->actual_length); 636 break; 637 case -ECONNRESET: /* Not an error, but a controlled situation; */ 638 case -ENOENT: /* (we killed the URB)...so, no broadcast */ 639 goto out; 640 case -ESHUTDOWN: /* going away! */ 641 goto out; 642 default: /* On general errors, retry unless it gets ugly */ 643 if (edc_inc(&hwarc->neep_edc, EDC_MAX_ERRORS, ··· 650 dev_err(dev, "NEEP: URB error %d\n", urb->status); 651 } 652 result = usb_submit_urb(urb, GFP_ATOMIC); 653 if (result < 0) { 654 dev_err(dev, "NEEP: Can't resubmit URB (%d) resetting device\n", 655 result); ··· 759 itr_size = le16_to_cpu(usb_dev->actconfig->desc.wTotalLength); 760 while (itr_size >= sizeof(*hdr)) { 761 hdr = (struct usb_descriptor_header *) itr; 762 + dev_dbg(dev, "Extra device descriptor: " 763 + "type %02x/%u bytes @ %zu (%zu left)\n", 764 + hdr->bDescriptorType, hdr->bLength, 765 + (itr - usb_dev->rawdescriptors[actconfig_idx]), 766 + itr_size); 767 if (hdr->bDescriptorType == USB_DT_CS_RADIO_CONTROL) 768 goto found; 769 itr += hdr->bLength; ··· 795 goto error; 796 } 797 rc->version = version; 798 + dev_dbg(dev, "Device supports WUSB protocol version 0x%04x \n", rc->version); 799 result = 0; 800 error: 801 return result; ··· 877 uwb_rc_rm(uwb_rc); 878 usb_put_intf(hwarc->usb_iface); 879 usb_put_dev(hwarc->usb_dev); 880 kfree(hwarc); 881 uwb_rc_put(uwb_rc); /* when creating the device, refcount = 1 */ 882 + } 883 + 884 + static int hwarc_pre_reset(struct usb_interface *iface) 885 + { 886 + struct hwarc *hwarc = usb_get_intfdata(iface); 887 + struct uwb_rc *uwb_rc = hwarc->uwb_rc; 888 + 889 + uwb_rc_pre_reset(uwb_rc); 890 + return 0; 891 + } 892 + 893 + static int hwarc_post_reset(struct usb_interface *iface) 894 + { 895 + struct hwarc *hwarc = usb_get_intfdata(iface); 896 + struct uwb_rc *uwb_rc = hwarc->uwb_rc; 897 + 898 + uwb_rc_post_reset(uwb_rc); 899 + return 0; 900 } 901 902 /** USB device ID's that we handle */ ··· 898 899 static struct usb_driver hwarc_driver = { 900 .name = "hwa-rc", 901 + .id_table = hwarc_id_table, 902 .probe = hwarc_probe, 903 .disconnect = hwarc_disconnect, 904 + .pre_reset = hwarc_pre_reset, 905 + .post_reset = hwarc_post_reset, 906 }; 907 908 static int __init hwarc_driver_init(void) 909 { 910 + return usb_register(&hwarc_driver); 911 } 912 module_init(hwarc_driver_init); 913
+2 -8
drivers/uwb/i1480/dfu/dfu.c
··· 34 #include <linux/uwb.h> 35 #include <linux/random.h> 36 37 - #define D_LOCAL 0 38 - #include <linux/uwb/debug.h> 39 - 40 - /** 41 * i1480_rceb_check - Check RCEB for expected field values 42 * @i1480: pointer to device for which RCEB is being checked 43 * @rceb: RCEB being checked ··· 80 EXPORT_SYMBOL_GPL(i1480_rceb_check); 81 82 83 - /** 84 * Execute a Radio Control Command 85 * 86 * Command data has to be in i1480->cmd_buf. ··· 98 u8 expected_type = reply->bEventType; 99 u8 context; 100 101 - d_fnstart(3, i1480->dev, "(%p, %s, %zu)\n", i1480, cmd_name, cmd_size); 102 init_completion(&i1480->evt_complete); 103 i1480->evt_result = -EINPROGRESS; 104 do { ··· 146 result = i1480_rceb_check(i1480, i1480->evt_buf, cmd_name, context, 147 expected_type, expected_event); 148 error: 149 - d_fnend(3, i1480->dev, "(%p, %s, %zu) = %zd\n", 150 - i1480, cmd_name, cmd_size, result); 151 return result; 152 } 153 EXPORT_SYMBOL_GPL(i1480_cmd);
··· 34 #include <linux/uwb.h> 35 #include <linux/random.h> 36 37 + /* 38 * i1480_rceb_check - Check RCEB for expected field values 39 * @i1480: pointer to device for which RCEB is being checked 40 * @rceb: RCEB being checked ··· 83 EXPORT_SYMBOL_GPL(i1480_rceb_check); 84 85 86 + /* 87 * Execute a Radio Control Command 88 * 89 * Command data has to be in i1480->cmd_buf. ··· 101 u8 expected_type = reply->bEventType; 102 u8 context; 103 104 init_completion(&i1480->evt_complete); 105 i1480->evt_result = -EINPROGRESS; 106 do { ··· 150 result = i1480_rceb_check(i1480, i1480->evt_buf, cmd_name, context, 151 expected_type, expected_event); 152 error: 153 return result; 154 } 155 EXPORT_SYMBOL_GPL(i1480_cmd);
-18
drivers/uwb/i1480/dfu/mac.c
··· 31 #include <linux/uwb.h> 32 #include "i1480-dfu.h" 33 34 - #define D_LOCAL 0 35 - #include <linux/uwb/debug.h> 36 - 37 /* 38 * Descriptor for a continuous segment of MAC fw data 39 */ ··· 181 } 182 if (memcmp(i1480->cmd_buf, bin + src_itr, result)) { 183 u8 *buf = i1480->cmd_buf; 184 - d_printf(2, i1480->dev, 185 - "original data @ %p + %u, %zu bytes\n", 186 - bin, src_itr, result); 187 - d_dump(4, i1480->dev, bin + src_itr, result); 188 for (cnt = 0; cnt < result; cnt++) 189 if (bin[src_itr + cnt] != buf[cnt]) { 190 dev_err(i1480->dev, "byte failed at " ··· 217 struct fw_hdr *hdr_itr; 218 int verif_retry_count; 219 220 - d_fnstart(3, dev, "(%p, %p)\n", i1480, hdr); 221 /* Now, header by header, push them to the hw */ 222 for (hdr_itr = hdr; hdr_itr != NULL; hdr_itr = hdr_itr->next) { 223 verif_retry_count = 0; ··· 256 break; 257 } 258 } 259 - d_fnend(3, dev, "(%zd)\n", result); 260 return result; 261 } 262 ··· 328 const struct firmware *fw; 329 struct fw_hdr *fw_hdrs; 330 331 - d_fnstart(3, i1480->dev, "(%p, %s, %s)\n", i1480, fw_name, fw_tag); 332 result = request_firmware(&fw, fw_name, i1480->dev); 333 if (result < 0) /* Up to caller to complain on -ENOENT */ 334 goto out; 335 - d_printf(3, i1480->dev, "%s fw '%s': uploading\n", fw_tag, fw_name); 336 result = fw_hdrs_load(i1480, &fw_hdrs, fw->data, fw->size); 337 if (result < 0) { 338 dev_err(i1480->dev, "%s fw '%s': failed to parse firmware " ··· 352 out_release: 353 release_firmware(fw); 354 out: 355 - d_fnend(3, i1480->dev, "(%p, %s, %s) = %d\n", i1480, fw_name, fw_tag, 356 - result); 357 return result; 358 } 359 ··· 420 int result; 421 u32 *val = (u32 *) i1480->cmd_buf; 422 423 - d_fnstart(3, i1480->dev, "(i1480 %p)\n", i1480); 424 for (cnt = 0; cnt < 10; cnt++) { 425 msleep(100); 426 result = i1480->read(i1480, 0x80080000, 4); ··· 433 dev_err(i1480->dev, "Timed out waiting for fw to start\n"); 434 result = -ETIMEDOUT; 435 out: 436 - d_fnend(3, i1480->dev, "(i1480 %p) = %d\n", i1480, result); 437 return result; 438 439 } ··· 452 int result = 0, deprecated_name = 0; 453 struct i1480_rceb *rcebe = (void *) i1480->evt_buf; 454 455 - d_fnstart(3, i1480->dev, "(%p)\n", i1480); 456 result = __mac_fw_upload(i1480, i1480->mac_fw_name, "MAC"); 457 if (result == -ENOENT) { 458 result = __mac_fw_upload(i1480, i1480->mac_fw_name_deprecate, ··· 485 dev_err(i1480->dev, "MAC fw '%s': initialization event returns " 486 "wrong size (%zu bytes vs %zu needed)\n", 487 i1480->mac_fw_name, i1480->evt_result, sizeof(*rcebe)); 488 - dump_bytes(i1480->dev, rcebe, min(i1480->evt_result, (ssize_t)32)); 489 goto error_size; 490 } 491 result = -EIO; ··· 505 error_init_timeout: 506 error_size: 507 error_setup: 508 - d_fnend(3, i1480->dev, "(i1480 %p) = %d\n", i1480, result); 509 return result; 510 }
··· 31 #include <linux/uwb.h> 32 #include "i1480-dfu.h" 33 34 /* 35 * Descriptor for a continuous segment of MAC fw data 36 */ ··· 184 } 185 if (memcmp(i1480->cmd_buf, bin + src_itr, result)) { 186 u8 *buf = i1480->cmd_buf; 187 for (cnt = 0; cnt < result; cnt++) 188 if (bin[src_itr + cnt] != buf[cnt]) { 189 dev_err(i1480->dev, "byte failed at " ··· 224 struct fw_hdr *hdr_itr; 225 int verif_retry_count; 226 227 /* Now, header by header, push them to the hw */ 228 for (hdr_itr = hdr; hdr_itr != NULL; hdr_itr = hdr_itr->next) { 229 verif_retry_count = 0; ··· 264 break; 265 } 266 } 267 return result; 268 } 269 ··· 337 const struct firmware *fw; 338 struct fw_hdr *fw_hdrs; 339 340 result = request_firmware(&fw, fw_name, i1480->dev); 341 if (result < 0) /* Up to caller to complain on -ENOENT */ 342 goto out; 343 result = fw_hdrs_load(i1480, &fw_hdrs, fw->data, fw->size); 344 if (result < 0) { 345 dev_err(i1480->dev, "%s fw '%s': failed to parse firmware " ··· 363 out_release: 364 release_firmware(fw); 365 out: 366 return result; 367 } 368 ··· 433 int result; 434 u32 *val = (u32 *) i1480->cmd_buf; 435 436 for (cnt = 0; cnt < 10; cnt++) { 437 msleep(100); 438 result = i1480->read(i1480, 0x80080000, 4); ··· 447 dev_err(i1480->dev, "Timed out waiting for fw to start\n"); 448 result = -ETIMEDOUT; 449 out: 450 return result; 451 452 } ··· 467 int result = 0, deprecated_name = 0; 468 struct i1480_rceb *rcebe = (void *) i1480->evt_buf; 469 470 result = __mac_fw_upload(i1480, i1480->mac_fw_name, "MAC"); 471 if (result == -ENOENT) { 472 result = __mac_fw_upload(i1480, i1480->mac_fw_name_deprecate, ··· 501 dev_err(i1480->dev, "MAC fw '%s': initialization event returns " 502 "wrong size (%zu bytes vs %zu needed)\n", 503 i1480->mac_fw_name, i1480->evt_result, sizeof(*rcebe)); 504 goto error_size; 505 } 506 result = -EIO; ··· 522 error_init_timeout: 523 error_size: 524 error_setup: 525 return result; 526 }
-27
drivers/uwb/i1480/dfu/usb.c
··· 35 * the functions are i1480_usb_NAME(). 36 */ 37 #include <linux/module.h> 38 - #include <linux/version.h> 39 #include <linux/usb.h> 40 #include <linux/interrupt.h> 41 #include <linux/delay.h> ··· 42 #include <linux/usb/wusb.h> 43 #include <linux/usb/wusb-wa.h> 44 #include "i1480-dfu.h" 45 - 46 - #define D_LOCAL 0 47 - #include <linux/uwb/debug.h> 48 - 49 50 struct i1480_usb { 51 struct i1480 i1480; ··· 113 struct i1480_usb *i1480_usb = container_of(i1480, struct i1480_usb, i1480); 114 size_t buffer_size, itr = 0; 115 116 - d_fnstart(3, i1480->dev, "(%p, 0x%08x, %p, %zu)\n", 117 - i1480, memory_address, buffer, size); 118 BUG_ON(size & 0x3); /* Needs to be a multiple of 4 */ 119 while (size > 0) { 120 buffer_size = size < i1480->buf_size ? size : i1480->buf_size; ··· 125 i1480->cmd_buf, buffer_size, 100 /* FIXME: arbitrary */); 126 if (result < 0) 127 break; 128 - d_printf(3, i1480->dev, 129 - "wrote @ 0x%08x %u bytes (of %zu bytes requested)\n", 130 - memory_address, result, buffer_size); 131 - d_dump(4, i1480->dev, i1480->cmd_buf, result); 132 itr += result; 133 memory_address += result; 134 size -= result; 135 } 136 - d_fnend(3, i1480->dev, "(%p, 0x%08x, %p, %zu) = %d\n", 137 - i1480, memory_address, buffer, size, result); 138 return result; 139 } 140 ··· 153 size_t itr, read_size = i1480->buf_size; 154 struct i1480_usb *i1480_usb = container_of(i1480, struct i1480_usb, i1480); 155 156 - d_fnstart(3, i1480->dev, "(%p, 0x%08x, %zu)\n", 157 - i1480, addr, size); 158 BUG_ON(size > i1480->buf_size); 159 BUG_ON(size & 0x3); /* Needs to be a multiple of 4 */ 160 BUG_ON(read_size > 512); ··· 186 } 187 result = bytes; 188 out: 189 - d_fnend(3, i1480->dev, "(%p, 0x%08x, %zu) = %zd\n", 190 - i1480, addr, size, result); 191 - if (result > 0) 192 - d_dump(4, i1480->dev, i1480->cmd_buf, result); 193 return result; 194 } 195 ··· 241 struct i1480_usb *i1480_usb = container_of(i1480, struct i1480_usb, i1480); 242 struct usb_endpoint_descriptor *epd; 243 244 - d_fnstart(3, dev, "(%p)\n", i1480); 245 init_completion(&i1480->evt_complete); 246 i1480->evt_result = -EINPROGRESS; 247 epd = &i1480_usb->usb_iface->cur_altsetting->endpoint[0].desc; ··· 262 goto error_wait; 263 } 264 usb_kill_urb(i1480_usb->neep_urb); 265 - d_fnend(3, dev, "(%p) = 0\n", i1480); 266 return 0; 267 268 error_wait: 269 usb_kill_urb(i1480_usb->neep_urb); 270 error_submit: 271 i1480->evt_result = result; 272 - d_fnend(3, dev, "(%p) = %d\n", i1480, result); 273 return result; 274 } 275 ··· 298 struct uwb_rccb *cmd = i1480->cmd_buf; 299 u8 iface_no; 300 301 - d_fnstart(3, dev, "(%p, %s, %zu)\n", i1480, cmd_name, cmd_size); 302 /* Post a read on the notification & event endpoint */ 303 iface_no = i1480_usb->usb_iface->cur_altsetting->desc.bInterfaceNumber; 304 epd = &i1480_usb->usb_iface->cur_altsetting->endpoint[0].desc; ··· 325 cmd_name, result); 326 goto error_submit_ep0; 327 } 328 - d_fnend(3, dev, "(%p, %s, %zu) = %d\n", 329 - i1480, cmd_name, cmd_size, result); 330 return result; 331 332 error_submit_ep0: 333 usb_kill_urb(i1480_usb->neep_urb); 334 error_submit_ep1: 335 - d_fnend(3, dev, "(%p, %s, %zu) = %d\n", 336 - i1480, cmd_name, cmd_size, result); 337 return result; 338 } 339
··· 35 * the functions are i1480_usb_NAME(). 36 */ 37 #include <linux/module.h> 38 #include <linux/usb.h> 39 #include <linux/interrupt.h> 40 #include <linux/delay.h> ··· 43 #include <linux/usb/wusb.h> 44 #include <linux/usb/wusb-wa.h> 45 #include "i1480-dfu.h" 46 47 struct i1480_usb { 48 struct i1480 i1480; ··· 118 struct i1480_usb *i1480_usb = container_of(i1480, struct i1480_usb, i1480); 119 size_t buffer_size, itr = 0; 120 121 BUG_ON(size & 0x3); /* Needs to be a multiple of 4 */ 122 while (size > 0) { 123 buffer_size = size < i1480->buf_size ? size : i1480->buf_size; ··· 132 i1480->cmd_buf, buffer_size, 100 /* FIXME: arbitrary */); 133 if (result < 0) 134 break; 135 itr += result; 136 memory_address += result; 137 size -= result; 138 } 139 return result; 140 } 141 ··· 166 size_t itr, read_size = i1480->buf_size; 167 struct i1480_usb *i1480_usb = container_of(i1480, struct i1480_usb, i1480); 168 169 BUG_ON(size > i1480->buf_size); 170 BUG_ON(size & 0x3); /* Needs to be a multiple of 4 */ 171 BUG_ON(read_size > 512); ··· 201 } 202 result = bytes; 203 out: 204 return result; 205 } 206 ··· 260 struct i1480_usb *i1480_usb = container_of(i1480, struct i1480_usb, i1480); 261 struct usb_endpoint_descriptor *epd; 262 263 init_completion(&i1480->evt_complete); 264 i1480->evt_result = -EINPROGRESS; 265 epd = &i1480_usb->usb_iface->cur_altsetting->endpoint[0].desc; ··· 282 goto error_wait; 283 } 284 usb_kill_urb(i1480_usb->neep_urb); 285 return 0; 286 287 error_wait: 288 usb_kill_urb(i1480_usb->neep_urb); 289 error_submit: 290 i1480->evt_result = result; 291 return result; 292 } 293 ··· 320 struct uwb_rccb *cmd = i1480->cmd_buf; 321 u8 iface_no; 322 323 /* Post a read on the notification & event endpoint */ 324 iface_no = i1480_usb->usb_iface->cur_altsetting->desc.bInterfaceNumber; 325 epd = &i1480_usb->usb_iface->cur_altsetting->endpoint[0].desc; ··· 348 cmd_name, result); 349 goto error_submit_ep0; 350 } 351 return result; 352 353 error_submit_ep0: 354 usb_kill_urb(i1480_usb->neep_urb); 355 error_submit_ep1: 356 return result; 357 } 358
+2 -3
drivers/uwb/i1480/i1480u-wlp/lc.c
··· 55 * is being removed. 56 * i1480u_rm() 57 */ 58 - #include <linux/version.h> 59 #include <linux/if_arp.h> 60 #include <linux/etherdevice.h> 61 - #include <linux/uwb/debug.h> 62 #include "i1480u-wlp.h" 63 64 ··· 206 wlp->fill_device_info = i1480u_fill_device_info; 207 wlp->stop_queue = i1480u_stop_queue; 208 wlp->start_queue = i1480u_start_queue; 209 - result = wlp_setup(wlp, rc); 210 if (result < 0) { 211 dev_err(&iface->dev, "Cannot setup WLP\n"); 212 goto error_wlp_setup;
··· 55 * is being removed. 56 * i1480u_rm() 57 */ 58 #include <linux/if_arp.h> 59 #include <linux/etherdevice.h> 60 + 61 #include "i1480u-wlp.h" 62 63 ··· 207 wlp->fill_device_info = i1480u_fill_device_info; 208 wlp->stop_queue = i1480u_stop_queue; 209 wlp->start_queue = i1480u_start_queue; 210 + result = wlp_setup(wlp, rc, net_dev); 211 if (result < 0) { 212 dev_err(&iface->dev, "Cannot setup WLP\n"); 213 goto error_wlp_setup;
+12 -41
drivers/uwb/i1480/i1480u-wlp/netdev.c
··· 41 42 #include <linux/if_arp.h> 43 #include <linux/etherdevice.h> 44 - #include <linux/uwb/debug.h> 45 #include "i1480u-wlp.h" 46 47 struct i1480u_cmd_set_ip_mas { ··· 207 result = i1480u_rx_setup(i1480u); /* Alloc RX stuff */ 208 if (result < 0) 209 goto error_rx_setup; 210 netif_wake_queue(net_dev); 211 #ifdef i1480u_FLOW_CONTROL 212 result = usb_submit_urb(i1480u->notif_urb, GFP_KERNEL);; ··· 220 goto error_notif_urb_submit; 221 } 222 #endif 223 - i1480u->uwb_notifs_handler.cb = i1480u_uwb_notifs_cb; 224 - i1480u->uwb_notifs_handler.data = i1480u; 225 - if (uwb_bg_joined(rc)) 226 - netif_carrier_on(net_dev); 227 - else 228 - netif_carrier_off(net_dev); 229 - uwb_notifs_register(rc, &i1480u->uwb_notifs_handler); 230 /* Interface is up with an address, now we can create WSS */ 231 result = wlp_wss_setup(net_dev, &wlp->wss); 232 if (result < 0) { 233 dev_err(dev, "Can't create WSS: %d. \n", result); 234 - goto error_notif_deregister; 235 } 236 return 0; 237 - error_notif_deregister: 238 - uwb_notifs_deregister(rc, &i1480u->uwb_notifs_handler); 239 #ifdef i1480u_FLOW_CONTROL 240 error_notif_urb_submit: 241 #endif 242 netif_stop_queue(net_dev); 243 i1480u_rx_release(i1480u); 244 error_rx_setup: ··· 248 { 249 struct i1480u *i1480u = netdev_priv(net_dev); 250 struct wlp *wlp = &i1480u->wlp; 251 - struct uwb_rc *rc = wlp->rc; 252 253 BUG_ON(wlp->rc == NULL); 254 wlp_wss_remove(&wlp->wss); 255 - uwb_notifs_deregister(rc, &i1480u->uwb_notifs_handler); 256 netif_carrier_off(net_dev); 257 #ifdef i1480u_FLOW_CONTROL 258 usb_kill_urb(i1480u->notif_urb); 259 #endif 260 netif_stop_queue(net_dev); 261 i1480u_rx_release(i1480u); 262 i1480u_tx_release(i1480u); 263 return 0; ··· 300 return -ERANGE; 301 net_dev->mtu = mtu; 302 return 0; 303 - } 304 - 305 - 306 - /** 307 - * Callback function to handle events from UWB 308 - * When we see other devices we know the carrier is ok, 309 - * if we are the only device in the beacon group we set the carrier 310 - * state to off. 311 - * */ 312 - void i1480u_uwb_notifs_cb(void *data, struct uwb_dev *uwb_dev, 313 - enum uwb_notifs event) 314 - { 315 - struct i1480u *i1480u = data; 316 - struct net_device *net_dev = i1480u->net_dev; 317 - struct device *dev = &i1480u->usb_iface->dev; 318 - switch (event) { 319 - case UWB_NOTIF_BG_JOIN: 320 - netif_carrier_on(net_dev); 321 - dev_info(dev, "Link is up\n"); 322 - break; 323 - case UWB_NOTIF_BG_LEAVE: 324 - netif_carrier_off(net_dev); 325 - dev_info(dev, "Link is down\n"); 326 - break; 327 - default: 328 - dev_err(dev, "don't know how to handle event %d from uwb\n", 329 - event); 330 - } 331 } 332 333 /**
··· 41 42 #include <linux/if_arp.h> 43 #include <linux/etherdevice.h> 44 + 45 #include "i1480u-wlp.h" 46 47 struct i1480u_cmd_set_ip_mas { ··· 207 result = i1480u_rx_setup(i1480u); /* Alloc RX stuff */ 208 if (result < 0) 209 goto error_rx_setup; 210 + 211 + result = uwb_radio_start(&wlp->pal); 212 + if (result < 0) 213 + goto error_radio_start; 214 + 215 netif_wake_queue(net_dev); 216 #ifdef i1480u_FLOW_CONTROL 217 result = usb_submit_urb(i1480u->notif_urb, GFP_KERNEL);; ··· 215 goto error_notif_urb_submit; 216 } 217 #endif 218 /* Interface is up with an address, now we can create WSS */ 219 result = wlp_wss_setup(net_dev, &wlp->wss); 220 if (result < 0) { 221 dev_err(dev, "Can't create WSS: %d. \n", result); 222 + goto error_wss_setup; 223 } 224 return 0; 225 + error_wss_setup: 226 #ifdef i1480u_FLOW_CONTROL 227 + usb_kill_urb(i1480u->notif_urb); 228 error_notif_urb_submit: 229 #endif 230 + uwb_radio_stop(&wlp->pal); 231 + error_radio_start: 232 netif_stop_queue(net_dev); 233 i1480u_rx_release(i1480u); 234 error_rx_setup: ··· 248 { 249 struct i1480u *i1480u = netdev_priv(net_dev); 250 struct wlp *wlp = &i1480u->wlp; 251 252 BUG_ON(wlp->rc == NULL); 253 wlp_wss_remove(&wlp->wss); 254 netif_carrier_off(net_dev); 255 #ifdef i1480u_FLOW_CONTROL 256 usb_kill_urb(i1480u->notif_urb); 257 #endif 258 netif_stop_queue(net_dev); 259 + uwb_radio_stop(&wlp->pal); 260 i1480u_rx_release(i1480u); 261 i1480u_tx_release(i1480u); 262 return 0; ··· 301 return -ERANGE; 302 net_dev->mtu = mtu; 303 return 0; 304 } 305 306 /**
+7 -18
drivers/uwb/i1480/i1480u-wlp/rx.c
··· 68 #include <linux/etherdevice.h> 69 #include "i1480u-wlp.h" 70 71 - #define D_LOCAL 0 72 - #include <linux/uwb/debug.h> 73 - 74 - 75 - /** 76 * Setup the RX context 77 * 78 * Each URB is provided with a transfer_buffer that is the data field ··· 125 } 126 127 128 - /** Release resources associated to the rx context */ 129 void i1480u_rx_release(struct i1480u *i1480u) 130 { 131 int cnt; ··· 151 } 152 } 153 154 - /** Fix an out-of-sequence packet */ 155 #define i1480u_fix(i1480u, msg...) \ 156 do { \ 157 if (printk_ratelimit()) \ ··· 162 } while (0) 163 164 165 - /** Drop an out-of-sequence packet */ 166 #define i1480u_drop(i1480u, msg...) \ 167 do { \ 168 if (printk_ratelimit()) \ ··· 173 174 175 176 - /** Finalizes setting up the SKB and delivers it 177 * 178 * We first pass the incoming frame to WLP substack for verification. It 179 * may also be a WLP association frame in which case WLP will take over the ··· 188 struct net_device *net_dev = i1480u->net_dev; 189 struct device *dev = &i1480u->usb_iface->dev; 190 191 - d_printf(6, dev, "RX delivered pre skb(%p), %u bytes\n", 192 - i1480u->rx_skb, i1480u->rx_skb->len); 193 - d_dump(7, dev, i1480u->rx_skb->data, i1480u->rx_skb->len); 194 should_parse = wlp_receive_frame(dev, &i1480u->wlp, i1480u->rx_skb, 195 &i1480u->rx_srcaddr); 196 if (!should_parse) 197 goto out; 198 i1480u->rx_skb->protocol = eth_type_trans(i1480u->rx_skb, net_dev); 199 - d_printf(5, dev, "RX delivered skb(%p), %u bytes\n", 200 - i1480u->rx_skb, i1480u->rx_skb->len); 201 - d_dump(7, dev, i1480u->rx_skb->data, 202 - i1480u->rx_skb->len > 72 ? 72 : i1480u->rx_skb->len); 203 i1480u->stats.rx_packets++; 204 i1480u->stats.rx_bytes += i1480u->rx_untd_pkt_size; 205 net_dev->last_rx = jiffies; ··· 205 } 206 207 208 - /** 209 * Process a buffer of data received from the USB RX endpoint 210 * 211 * First fragment arrives with next or last fragment. All other fragments ··· 393 } 394 395 396 - /** 397 * Called when an RX URB has finished receiving or has found some kind 398 * of error condition. 399 *
··· 68 #include <linux/etherdevice.h> 69 #include "i1480u-wlp.h" 70 71 + /* 72 * Setup the RX context 73 * 74 * Each URB is provided with a transfer_buffer that is the data field ··· 129 } 130 131 132 + /* Release resources associated to the rx context */ 133 void i1480u_rx_release(struct i1480u *i1480u) 134 { 135 int cnt; ··· 155 } 156 } 157 158 + /* Fix an out-of-sequence packet */ 159 #define i1480u_fix(i1480u, msg...) \ 160 do { \ 161 if (printk_ratelimit()) \ ··· 166 } while (0) 167 168 169 + /* Drop an out-of-sequence packet */ 170 #define i1480u_drop(i1480u, msg...) \ 171 do { \ 172 if (printk_ratelimit()) \ ··· 177 178 179 180 + /* Finalizes setting up the SKB and delivers it 181 * 182 * We first pass the incoming frame to WLP substack for verification. It 183 * may also be a WLP association frame in which case WLP will take over the ··· 192 struct net_device *net_dev = i1480u->net_dev; 193 struct device *dev = &i1480u->usb_iface->dev; 194 195 should_parse = wlp_receive_frame(dev, &i1480u->wlp, i1480u->rx_skb, 196 &i1480u->rx_srcaddr); 197 if (!should_parse) 198 goto out; 199 i1480u->rx_skb->protocol = eth_type_trans(i1480u->rx_skb, net_dev); 200 i1480u->stats.rx_packets++; 201 i1480u->stats.rx_bytes += i1480u->rx_untd_pkt_size; 202 net_dev->last_rx = jiffies; ··· 216 } 217 218 219 + /* 220 * Process a buffer of data received from the USB RX endpoint 221 * 222 * First fragment arrives with next or last fragment. All other fragments ··· 404 } 405 406 407 + /* 408 * Called when an RX URB has finished receiving or has found some kind 409 * of error condition. 410 *
+1 -2
drivers/uwb/i1480/i1480u-wlp/sysfs.c
··· 25 26 #include <linux/netdevice.h> 27 #include <linux/etherdevice.h> 28 - #include <linux/uwb/debug.h> 29 #include <linux/device.h> 30 #include "i1480u-wlp.h" 31 32 ··· 226 * (CLASS_DEVICE_ATTR or DEVICE_ATTR) and i1480u_ATTR_NAME produces a 227 * class_device_attr_NAME or device_attr_NAME (for group registration). 228 */ 229 - #include <linux/version.h> 230 231 #define i1480u_SHOW(name, fn, param) \ 232 static ssize_t i1480u_show_##name(struct device *dev, \
··· 25 26 #include <linux/netdevice.h> 27 #include <linux/etherdevice.h> 28 #include <linux/device.h> 29 + 30 #include "i1480u-wlp.h" 31 32 ··· 226 * (CLASS_DEVICE_ATTR or DEVICE_ATTR) and i1480u_ATTR_NAME produces a 227 * class_device_attr_NAME or device_attr_NAME (for group registration). 228 */ 229 230 #define i1480u_SHOW(name, fn, param) \ 231 static ssize_t i1480u_show_##name(struct device *dev, \
+8 -58
drivers/uwb/i1480/i1480u-wlp/tx.c
··· 55 */ 56 57 #include "i1480u-wlp.h" 58 - #define D_LOCAL 5 59 - #include <linux/uwb/debug.h> 60 61 enum { 62 /* This is only for Next and Last TX packets */ ··· 62 - sizeof(struct untd_hdr_rst), 63 }; 64 65 - /** Free resources allocated to a i1480u tx context. */ 66 static 67 void i1480u_tx_free(struct i1480u_tx *wtx) 68 { ··· 97 } 98 99 100 - /** 101 * Callback for a completed tx USB URB. 102 * 103 * TODO: ··· 147 <= i1480u->tx_inflight.threshold 148 && netif_queue_stopped(net_dev) 149 && i1480u->tx_inflight.threshold != 0) { 150 - if (d_test(2) && printk_ratelimit()) 151 - d_printf(2, dev, "Restart queue. \n"); 152 netif_start_queue(net_dev); 153 atomic_inc(&i1480u->tx_inflight.restart_count); 154 } ··· 154 } 155 156 157 - /** 158 * Given a buffer that doesn't fit in a single fragment, create an 159 * scatter/gather structure for delivery to the USB pipe. 160 * ··· 249 /* Now do each remaining fragment */ 250 result = -EINVAL; 251 while (pl_size_left > 0) { 252 - d_printf(5, NULL, "ITR HDR: pl_size_left %zu buf_itr %zu\n", 253 - pl_size_left, buf_itr - wtx->buf); 254 if (buf_itr + sizeof(*untd_hdr_rst) - wtx->buf 255 > wtx->buf_size) { 256 printk(KERN_ERR "BUG: no space for header\n"); 257 goto error_bug; 258 } 259 - d_printf(5, NULL, "ITR HDR 2: pl_size_left %zu buf_itr %zu\n", 260 - pl_size_left, buf_itr - wtx->buf); 261 untd_hdr_rst = buf_itr; 262 buf_itr += sizeof(*untd_hdr_rst); 263 if (pl_size_left > i1480u_MAX_PL_SIZE) { ··· 263 frg_pl_size = pl_size_left; 264 untd_hdr_set_type(&untd_hdr_rst->hdr, i1480u_PKT_FRAG_LST); 265 } 266 - d_printf(5, NULL, 267 - "ITR PL: pl_size_left %zu buf_itr %zu frg_pl_size %zu\n", 268 - pl_size_left, buf_itr - wtx->buf, frg_pl_size); 269 untd_hdr_set_rx_tx(&untd_hdr_rst->hdr, 0); 270 untd_hdr_rst->hdr.len = cpu_to_le16(frg_pl_size); 271 untd_hdr_rst->padding = 0; ··· 275 buf_itr += frg_pl_size; 276 pl_itr += frg_pl_size; 277 pl_size_left -= frg_pl_size; 278 - d_printf(5, NULL, 279 - "ITR PL 2: pl_size_left %zu buf_itr %zu frg_pl_size %zu\n", 280 - pl_size_left, buf_itr - wtx->buf, frg_pl_size); 281 } 282 dev_kfree_skb_irq(skb); 283 return 0; ··· 294 } 295 296 297 - /** 298 * Given a buffer that fits in a single fragment, fill out a @wtx 299 * struct for transmitting it down the USB pipe. 300 * ··· 332 } 333 334 335 - /** 336 * Given a skb to transmit, massage it to become palatable for the TX pipe 337 * 338 * This will break the buffer in chunks smaller than ··· 411 return NULL; 412 } 413 414 - /** 415 * Actual fragmentation and transmission of frame 416 * 417 * @wlp: WLP substack data structure ··· 433 struct i1480u_tx *wtx; 434 struct wlp_tx_hdr *wlp_tx_hdr; 435 static unsigned char dev_bcast[2] = { 0xff, 0xff }; 436 - #if 0 437 - int lockup = 50; 438 - #endif 439 440 - d_fnstart(6, dev, "(skb %p (%u), net_dev %p)\n", skb, skb->len, 441 - net_dev); 442 BUG_ON(i1480u->wlp.rc == NULL); 443 if ((net_dev->flags & IFF_UP) == 0) 444 goto out; 445 result = -EBUSY; 446 if (atomic_read(&i1480u->tx_inflight.count) >= i1480u->tx_inflight.max) { 447 - if (d_test(2) && printk_ratelimit()) 448 - d_printf(2, dev, "Max frames in flight " 449 - "stopping queue.\n"); 450 netif_stop_queue(net_dev); 451 goto error_max_inflight; 452 } ··· 467 wlp_tx_hdr_set_delivery_id_type(wlp_tx_hdr, i1480u->options.pca_base_priority); 468 } 469 470 - #if 0 471 - dev_info(dev, "TX delivering skb -> USB, %zu bytes\n", skb->len); 472 - dump_bytes(dev, skb->data, skb->len > 72 ? 72 : skb->len); 473 - #endif 474 - #if 0 475 - /* simulates a device lockup after every lockup# packets */ 476 - if (lockup && ((i1480u->stats.tx_packets + 1) % lockup) == 0) { 477 - /* Simulate a dropped transmit interrupt */ 478 - net_dev->trans_start = jiffies; 479 - netif_stop_queue(net_dev); 480 - dev_err(dev, "Simulate lockup at %ld\n", jiffies); 481 - return result; 482 - } 483 - #endif 484 - 485 result = usb_submit_urb(wtx->urb, GFP_ATOMIC); /* Go baby */ 486 if (result < 0) { 487 dev_err(dev, "TX: cannot submit URB: %d\n", result); ··· 476 } 477 atomic_inc(&i1480u->tx_inflight.count); 478 net_dev->trans_start = jiffies; 479 - d_fnend(6, dev, "(skb %p (%u), net_dev %p) = %d\n", skb, skb->len, 480 - net_dev, result); 481 return result; 482 483 error_tx_urb_submit: ··· 483 error_wtx_alloc: 484 error_max_inflight: 485 out: 486 - d_fnend(6, dev, "(skb %p (%u), net_dev %p) = %d\n", skb, skb->len, 487 - net_dev, result); 488 return result; 489 } 490 491 492 - /** 493 * Transmit an skb Called when an skbuf has to be transmitted 494 * 495 * The skb is first passed to WLP substack to ensure this is a valid ··· 510 struct device *dev = &i1480u->usb_iface->dev; 511 struct uwb_dev_addr dst; 512 513 - d_fnstart(6, dev, "(skb %p (%u), net_dev %p)\n", skb, skb->len, 514 - net_dev); 515 - BUG_ON(i1480u->wlp.rc == NULL); 516 if ((net_dev->flags & IFF_UP) == 0) 517 goto error; 518 result = wlp_prepare_tx_frame(dev, &i1480u->wlp, skb, &dst); ··· 518 "Dropping packet.\n", result); 519 goto error; 520 } else if (result == 1) { 521 - d_printf(6, dev, "WLP will transmit frame. \n"); 522 /* trans_start time will be set when WLP actually transmits 523 * the frame */ 524 goto out; 525 } 526 - d_printf(6, dev, "Transmitting frame. \n"); 527 result = i1480u_xmit_frame(&i1480u->wlp, skb, &dst); 528 if (result < 0) { 529 dev_err(dev, "Frame TX failed (%d).\n", result); 530 goto error; 531 } 532 - d_fnend(6, dev, "(skb %p (%u), net_dev %p) = %d\n", skb, skb->len, 533 - net_dev, result); 534 return NETDEV_TX_OK; 535 error: 536 dev_kfree_skb_any(skb); 537 i1480u->stats.tx_dropped++; 538 out: 539 - d_fnend(6, dev, "(skb %p (%u), net_dev %p) = %d\n", skb, skb->len, 540 - net_dev, result); 541 return NETDEV_TX_OK; 542 } 543 544 545 - /** 546 * Called when a pkt transmission doesn't complete in a reasonable period 547 * Device reset may sleep - do it outside of interrupt context (delayed) 548 */
··· 55 */ 56 57 #include "i1480u-wlp.h" 58 59 enum { 60 /* This is only for Next and Last TX packets */ ··· 64 - sizeof(struct untd_hdr_rst), 65 }; 66 67 + /* Free resources allocated to a i1480u tx context. */ 68 static 69 void i1480u_tx_free(struct i1480u_tx *wtx) 70 { ··· 99 } 100 101 102 + /* 103 * Callback for a completed tx USB URB. 104 * 105 * TODO: ··· 149 <= i1480u->tx_inflight.threshold 150 && netif_queue_stopped(net_dev) 151 && i1480u->tx_inflight.threshold != 0) { 152 netif_start_queue(net_dev); 153 atomic_inc(&i1480u->tx_inflight.restart_count); 154 } ··· 158 } 159 160 161 + /* 162 * Given a buffer that doesn't fit in a single fragment, create an 163 * scatter/gather structure for delivery to the USB pipe. 164 * ··· 253 /* Now do each remaining fragment */ 254 result = -EINVAL; 255 while (pl_size_left > 0) { 256 if (buf_itr + sizeof(*untd_hdr_rst) - wtx->buf 257 > wtx->buf_size) { 258 printk(KERN_ERR "BUG: no space for header\n"); 259 goto error_bug; 260 } 261 untd_hdr_rst = buf_itr; 262 buf_itr += sizeof(*untd_hdr_rst); 263 if (pl_size_left > i1480u_MAX_PL_SIZE) { ··· 271 frg_pl_size = pl_size_left; 272 untd_hdr_set_type(&untd_hdr_rst->hdr, i1480u_PKT_FRAG_LST); 273 } 274 untd_hdr_set_rx_tx(&untd_hdr_rst->hdr, 0); 275 untd_hdr_rst->hdr.len = cpu_to_le16(frg_pl_size); 276 untd_hdr_rst->padding = 0; ··· 286 buf_itr += frg_pl_size; 287 pl_itr += frg_pl_size; 288 pl_size_left -= frg_pl_size; 289 } 290 dev_kfree_skb_irq(skb); 291 return 0; ··· 308 } 309 310 311 + /* 312 * Given a buffer that fits in a single fragment, fill out a @wtx 313 * struct for transmitting it down the USB pipe. 314 * ··· 346 } 347 348 349 + /* 350 * Given a skb to transmit, massage it to become palatable for the TX pipe 351 * 352 * This will break the buffer in chunks smaller than ··· 425 return NULL; 426 } 427 428 + /* 429 * Actual fragmentation and transmission of frame 430 * 431 * @wlp: WLP substack data structure ··· 447 struct i1480u_tx *wtx; 448 struct wlp_tx_hdr *wlp_tx_hdr; 449 static unsigned char dev_bcast[2] = { 0xff, 0xff }; 450 451 BUG_ON(i1480u->wlp.rc == NULL); 452 if ((net_dev->flags & IFF_UP) == 0) 453 goto out; 454 result = -EBUSY; 455 if (atomic_read(&i1480u->tx_inflight.count) >= i1480u->tx_inflight.max) { 456 netif_stop_queue(net_dev); 457 goto error_max_inflight; 458 } ··· 489 wlp_tx_hdr_set_delivery_id_type(wlp_tx_hdr, i1480u->options.pca_base_priority); 490 } 491 492 result = usb_submit_urb(wtx->urb, GFP_ATOMIC); /* Go baby */ 493 if (result < 0) { 494 dev_err(dev, "TX: cannot submit URB: %d\n", result); ··· 513 } 514 atomic_inc(&i1480u->tx_inflight.count); 515 net_dev->trans_start = jiffies; 516 return result; 517 518 error_tx_urb_submit: ··· 522 error_wtx_alloc: 523 error_max_inflight: 524 out: 525 return result; 526 } 527 528 529 + /* 530 * Transmit an skb Called when an skbuf has to be transmitted 531 * 532 * The skb is first passed to WLP substack to ensure this is a valid ··· 551 struct device *dev = &i1480u->usb_iface->dev; 552 struct uwb_dev_addr dst; 553 554 if ((net_dev->flags & IFF_UP) == 0) 555 goto error; 556 result = wlp_prepare_tx_frame(dev, &i1480u->wlp, skb, &dst); ··· 562 "Dropping packet.\n", result); 563 goto error; 564 } else if (result == 1) { 565 /* trans_start time will be set when WLP actually transmits 566 * the frame */ 567 goto out; 568 } 569 result = i1480u_xmit_frame(&i1480u->wlp, skb, &dst); 570 if (result < 0) { 571 dev_err(dev, "Frame TX failed (%d).\n", result); 572 goto error; 573 } 574 return NETDEV_TX_OK; 575 error: 576 dev_kfree_skb_any(skb); 577 i1480u->stats.tx_dropped++; 578 out: 579 return NETDEV_TX_OK; 580 } 581 582 583 + /* 584 * Called when a pkt transmission doesn't complete in a reasonable period 585 * Device reset may sleep - do it outside of interrupt context (delayed) 586 */
+55
drivers/uwb/ie-rcv.c
···
··· 1 + /* 2 + * Ultra Wide Band 3 + * IE Received notification handling. 4 + * 5 + * Copyright (C) 2008 Cambridge Silicon Radio Ltd. 6 + * 7 + * This program is free software; you can redistribute it and/or 8 + * modify it under the terms of the GNU General Public License version 9 + * 2 as published by the Free Software Foundation. 10 + * 11 + * This program is distributed in the hope that it will be useful, 12 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 + * GNU General Public License for more details. 15 + * 16 + * You should have received a copy of the GNU General Public License 17 + * along with this program. If not, see <http://www.gnu.org/licenses/>. 18 + */ 19 + 20 + #include <linux/errno.h> 21 + #include <linux/module.h> 22 + #include <linux/device.h> 23 + #include <linux/bitmap.h> 24 + #include "uwb-internal.h" 25 + 26 + /* 27 + * Process an incoming IE Received notification. 28 + */ 29 + int uwbd_evt_handle_rc_ie_rcv(struct uwb_event *evt) 30 + { 31 + int result = -EINVAL; 32 + struct device *dev = &evt->rc->uwb_dev.dev; 33 + struct uwb_rc_evt_ie_rcv *iercv; 34 + size_t iesize; 35 + 36 + /* Is there enough data to decode it? */ 37 + if (evt->notif.size < sizeof(*iercv)) { 38 + dev_err(dev, "IE Received notification: Not enough data to " 39 + "decode (%zu vs %zu bytes needed)\n", 40 + evt->notif.size, sizeof(*iercv)); 41 + goto error; 42 + } 43 + iercv = container_of(evt->notif.rceb, struct uwb_rc_evt_ie_rcv, rceb); 44 + iesize = le16_to_cpu(iercv->wIELength); 45 + 46 + dev_dbg(dev, "IE received, element ID=%d\n", iercv->IEData[0]); 47 + 48 + if (iercv->IEData[0] == UWB_RELINQUISH_REQUEST_IE) { 49 + dev_warn(dev, "unhandled Relinquish Request IE\n"); 50 + } 51 + 52 + return 0; 53 + error: 54 + return result; 55 + }
+147 -310
drivers/uwb/ie.c
··· 25 */ 26 27 #include "uwb-internal.h" 28 - #define D_LOCAL 0 29 - #include <linux/uwb/debug.h> 30 31 /** 32 * uwb_ie_next - get the next IE in a buffer ··· 59 EXPORT_SYMBOL_GPL(uwb_ie_next); 60 61 /** 62 * Get the IEs that a radio controller is sending in its beacon 63 * 64 * @uwb_rc: UWB Radio Controller ··· 104 * anything. Once done with the iedata buffer, call 105 * uwb_rc_ie_release(iedata). Don't call kfree on it. 106 */ 107 ssize_t uwb_rc_get_ie(struct uwb_rc *uwb_rc, struct uwb_rc_evt_get_ie **pget_ie) 108 { 109 ssize_t result; ··· 113 struct uwb_rceb *reply = NULL; 114 struct uwb_rc_evt_get_ie *get_ie; 115 116 - d_fnstart(3, dev, "(%p, %p)\n", uwb_rc, pget_ie); 117 - result = -ENOMEM; 118 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 119 if (cmd == NULL) 120 - goto error_kzalloc; 121 cmd->bCommandType = UWB_RC_CET_GENERAL; 122 cmd->wCommand = cpu_to_le16(UWB_RC_CMD_GET_IE); 123 result = uwb_rc_vcmd(uwb_rc, "GET_IE", cmd, sizeof(*cmd), 124 UWB_RC_CET_GENERAL, UWB_RC_CMD_GET_IE, 125 &reply); 126 if (result < 0) 127 - goto error_cmd; 128 get_ie = container_of(reply, struct uwb_rc_evt_get_ie, rceb); 129 if (result < sizeof(*get_ie)) { 130 dev_err(dev, "not enough data returned for decoding GET IE " 131 "(%zu bytes received vs %zu needed)\n", 132 result, sizeof(*get_ie)); 133 - result = -EINVAL; 134 } else if (result < sizeof(*get_ie) + le16_to_cpu(get_ie->wIELength)) { 135 dev_err(dev, "not enough data returned for decoding GET IE " 136 "payload (%zu bytes received vs %zu needed)\n", result, 137 sizeof(*get_ie) + le16_to_cpu(get_ie->wIELength)); 138 - result = -EINVAL; 139 - } else 140 - *pget_ie = get_ie; 141 - error_cmd: 142 - kfree(cmd); 143 - error_kzalloc: 144 - d_fnend(3, dev, "(%p, %p) = %d\n", uwb_rc, pget_ie, (int)result); 145 - return result; 146 - } 147 - EXPORT_SYMBOL_GPL(uwb_rc_get_ie); 148 - 149 - 150 - /* 151 - * Given a pointer to an IE, print it in ASCII/hex followed by a new line 152 - * 153 - * @ie_hdr: pointer to the IE header. Length is in there, and it is 154 - * guaranteed that the ie_hdr->length bytes following it are 155 - * safely accesible. 156 - * 157 - * @_data: context data passed from uwb_ie_for_each(), an struct output_ctx 158 - */ 159 - int uwb_ie_dump_hex(struct uwb_dev *uwb_dev, const struct uwb_ie_hdr *ie_hdr, 160 - size_t offset, void *_ctx) 161 - { 162 - struct uwb_buf_ctx *ctx = _ctx; 163 - const u8 *pl = (void *)(ie_hdr + 1); 164 - u8 pl_itr; 165 - 166 - ctx->bytes += scnprintf(ctx->buf + ctx->bytes, ctx->size - ctx->bytes, 167 - "%02x %02x ", (unsigned) ie_hdr->element_id, 168 - (unsigned) ie_hdr->length); 169 - pl_itr = 0; 170 - while (pl_itr < ie_hdr->length && ctx->bytes < ctx->size) 171 - ctx->bytes += scnprintf(ctx->buf + ctx->bytes, 172 - ctx->size - ctx->bytes, 173 - "%02x ", (unsigned) pl[pl_itr++]); 174 - if (ctx->bytes < ctx->size) 175 - ctx->buf[ctx->bytes++] = '\n'; 176 - return 0; 177 - } 178 - EXPORT_SYMBOL_GPL(uwb_ie_dump_hex); 179 - 180 - 181 - /** 182 - * Verify that a pointer in a buffer points to valid IE 183 - * 184 - * @start: pointer to start of buffer in which IE appears 185 - * @itr: pointer to IE inside buffer that will be verified 186 - * @top: pointer to end of buffer 187 - * 188 - * @returns: 0 if IE is valid, <0 otherwise 189 - * 190 - * Verification involves checking that the buffer can contain a 191 - * header and the amount of data reported in the IE header can be found in 192 - * the buffer. 193 - */ 194 - static 195 - int uwb_rc_ie_verify(struct uwb_dev *uwb_dev, const void *start, 196 - const void *itr, const void *top) 197 - { 198 - struct device *dev = &uwb_dev->dev; 199 - const struct uwb_ie_hdr *ie_hdr; 200 - 201 - if (top - itr < sizeof(*ie_hdr)) { 202 - dev_err(dev, "Bad IE: no data to decode header " 203 - "(%zu bytes left vs %zu needed) at offset %zu\n", 204 - top - itr, sizeof(*ie_hdr), itr - start); 205 return -EINVAL; 206 } 207 - ie_hdr = itr; 208 - itr += sizeof(*ie_hdr); 209 - if (top - itr < ie_hdr->length) { 210 - dev_err(dev, "Bad IE: not enough data for payload " 211 - "(%zu bytes left vs %zu needed) at offset %zu\n", 212 - top - itr, (size_t)ie_hdr->length, 213 - (void *)ie_hdr - start); 214 - return -EINVAL; 215 - } 216 - return 0; 217 - } 218 219 - 220 - /** 221 - * Walk a buffer filled with consecutive IE's a buffer 222 - * 223 - * @uwb_dev: UWB device this IEs belong to (for err messages mainly) 224 - * 225 - * @fn: function to call with each IE; if it returns 0, we keep 226 - * traversing the buffer. If it returns !0, we'll stop and return 227 - * that value. 228 - * 229 - * @data: pointer passed to @fn 230 - * 231 - * @buf: buffer where the consecutive IEs are located 232 - * 233 - * @size: size of @buf 234 - * 235 - * Each IE is checked for basic correctness (there is space left for 236 - * the header and the payload). If that test is failed, we stop 237 - * processing. For every good IE, @fn is called. 238 - */ 239 - ssize_t uwb_ie_for_each(struct uwb_dev *uwb_dev, uwb_ie_f fn, void *data, 240 - const void *buf, size_t size) 241 - { 242 - ssize_t result = 0; 243 - const struct uwb_ie_hdr *ie_hdr; 244 - const void *itr = buf, *top = itr + size; 245 - 246 - while (itr < top) { 247 - if (uwb_rc_ie_verify(uwb_dev, buf, itr, top) != 0) 248 - break; 249 - ie_hdr = itr; 250 - itr += sizeof(*ie_hdr) + ie_hdr->length; 251 - result = fn(uwb_dev, ie_hdr, itr - buf, data); 252 - if (result != 0) 253 - break; 254 - } 255 return result; 256 } 257 - EXPORT_SYMBOL_GPL(uwb_ie_for_each); 258 259 260 /** ··· 178 return result; 179 } 180 181 - /** 182 - * Determine by IE id if IE is host settable 183 - * WUSB 1.0 [8.6.2.8 Table 8.85] 184 - * 185 - * EXCEPTION: 186 - * All but UWB_IE_WLP appears in Table 8.85 from WUSB 1.0. Setting this IE 187 - * is required for the WLP substack to perform association with its WSS so 188 - * we hope that the WUSB spec will be changed to reflect this. 189 - */ 190 - static 191 - int uwb_rc_ie_is_host_settable(enum uwb_ie element_id) 192 - { 193 - if (element_id == UWB_PCA_AVAILABILITY || 194 - element_id == UWB_BP_SWITCH_IE || 195 - element_id == UWB_MAC_CAPABILITIES_IE || 196 - element_id == UWB_PHY_CAPABILITIES_IE || 197 - element_id == UWB_APP_SPEC_PROBE_IE || 198 - element_id == UWB_IDENTIFICATION_IE || 199 - element_id == UWB_MASTER_KEY_ID_IE || 200 - element_id == UWB_IE_WLP || 201 - element_id == UWB_APP_SPEC_IE) 202 - return 1; 203 - return 0; 204 - } 205 - 206 - 207 - /** 208 - * Extract Host Settable IEs from IE 209 - * 210 - * @ie_data: pointer to buffer containing all IEs 211 - * @size: size of buffer 212 - * 213 - * @returns: length of buffer that only includes host settable IEs 214 - * 215 - * Given a buffer of IEs we move all Host Settable IEs to front of buffer 216 - * by overwriting the IEs that are not Host Settable. 217 - * Buffer length is adjusted accordingly. 218 - */ 219 - static 220 - ssize_t uwb_rc_parse_host_settable_ie(struct uwb_dev *uwb_dev, 221 - void *ie_data, size_t size) 222 - { 223 - size_t new_len = size; 224 - struct uwb_ie_hdr *ie_hdr; 225 - size_t ie_length; 226 - void *itr = ie_data, *top = itr + size; 227 - 228 - while (itr < top) { 229 - if (uwb_rc_ie_verify(uwb_dev, ie_data, itr, top) != 0) 230 - break; 231 - ie_hdr = itr; 232 - ie_length = sizeof(*ie_hdr) + ie_hdr->length; 233 - if (uwb_rc_ie_is_host_settable(ie_hdr->element_id)) { 234 - itr += ie_length; 235 - } else { 236 - memmove(itr, itr + ie_length, top - (itr + ie_length)); 237 - new_len -= ie_length; 238 - top -= ie_length; 239 - } 240 - } 241 - return new_len; 242 - } 243 - 244 - 245 /* Cleanup the whole IE management subsystem */ 246 void uwb_rc_ie_init(struct uwb_rc *uwb_rc) 247 { ··· 186 187 188 /** 189 - * Set up cache for host settable IEs currently being transmitted 190 * 191 - * First we just call GET-IE to get the current IEs being transmitted 192 - * (or we workaround and pretend we did) and (because the format is 193 - * the same) reuse that as the IE cache (with the command prefix, as 194 - * explained in 'struct uwb_rc'). 195 * 196 - * @returns: size of cache created 197 */ 198 - ssize_t uwb_rc_ie_setup(struct uwb_rc *uwb_rc) 199 { 200 - struct device *dev = &uwb_rc->uwb_dev.dev; 201 - ssize_t result; 202 - size_t capacity; 203 - struct uwb_rc_evt_get_ie *ie_info; 204 205 - d_fnstart(3, dev, "(%p)\n", uwb_rc); 206 mutex_lock(&uwb_rc->ies_mutex); 207 - result = uwb_rc_get_ie(uwb_rc, &ie_info); 208 - if (result < 0) 209 - goto error_get_ie; 210 - capacity = result; 211 - d_printf(5, dev, "Got IEs %zu bytes (%zu long at %p)\n", result, 212 - (size_t)le16_to_cpu(ie_info->wIELength), ie_info); 213 214 - /* Remove IEs that host should not set. */ 215 - result = uwb_rc_parse_host_settable_ie(&uwb_rc->uwb_dev, 216 - ie_info->IEData, le16_to_cpu(ie_info->wIELength)); 217 - if (result < 0) 218 - goto error_parse; 219 - d_printf(5, dev, "purged non-settable IEs to %zu bytes\n", result); 220 - uwb_rc->ies = (void *) ie_info; 221 uwb_rc->ies->rccb.bCommandType = UWB_RC_CET_GENERAL; 222 uwb_rc->ies->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_SET_IE); 223 uwb_rc->ies_capacity = capacity; 224 - d_printf(5, dev, "IE cache at %p %zu bytes, %zu capacity\n", 225 - ie_info, result, capacity); 226 - result = 0; 227 - error_parse: 228 - error_get_ie: 229 mutex_unlock(&uwb_rc->ies_mutex); 230 - d_fnend(3, dev, "(%p) = %zu\n", uwb_rc, result); 231 - return result; 232 } 233 234 ··· 226 } 227 228 229 - static 230 - int __acc_size(struct uwb_dev *uwb_dev, const struct uwb_ie_hdr *ie_hdr, 231 - size_t offset, void *_ctx) 232 { 233 - size_t *acc_size = _ctx; 234 - *acc_size += sizeof(*ie_hdr) + ie_hdr->length; 235 - d_printf(6, &uwb_dev->dev, "new acc size %zu\n", *acc_size); 236 return 0; 237 } 238 239 - 240 /** 241 - * Add a new IE to IEs currently being transmitted by device 242 - * 243 * @ies: the buffer containing the new IE or IEs to be added to 244 - * the device's beacon. The buffer will be verified for 245 - * consistence (meaning the headers should be right) and 246 - * consistent with the buffer size. 247 - * @size: size of @ies (in bytes, total buffer size) 248 - * @returns: 0 if ok, <0 errno code on error 249 * 250 * According to WHCI 0.95 [4.13.6] the driver will only receive the RCEB 251 * after the device sent the first beacon that includes the IEs specified ··· 275 * we start beaconing. 276 * 277 * Setting an IE on the device will overwrite all current IEs in device. So 278 - * we take the current IEs being transmitted by the device, append the 279 * new one, and call SET IE with all the IEs needed. 280 * 281 - * The local IE cache will only be updated with the new IE if SET IE 282 - * completed successfully. 283 */ 284 int uwb_rc_ie_add(struct uwb_rc *uwb_rc, 285 const struct uwb_ie_hdr *ies, size_t size) 286 { 287 int result = 0; 288 - struct device *dev = &uwb_rc->uwb_dev.dev; 289 - struct uwb_rc_cmd_set_ie *new_ies; 290 - size_t ies_size, total_size, acc_size = 0; 291 292 - if (uwb_rc->ies == NULL) 293 - return -ESHUTDOWN; 294 - uwb_ie_for_each(&uwb_rc->uwb_dev, __acc_size, &acc_size, ies, size); 295 - if (acc_size != size) { 296 - dev_err(dev, "BUG: bad IEs, misconstructed headers " 297 - "[%zu bytes reported vs %zu calculated]\n", 298 - size, acc_size); 299 - WARN_ON(1); 300 - return -EINVAL; 301 - } 302 mutex_lock(&uwb_rc->ies_mutex); 303 - ies_size = le16_to_cpu(uwb_rc->ies->wIELength); 304 - total_size = sizeof(*uwb_rc->ies) + ies_size; 305 - if (total_size + size > uwb_rc->ies_capacity) { 306 - d_printf(4, dev, "Reallocating IE cache from %p capacity %zu " 307 - "to capacity %zu\n", uwb_rc->ies, uwb_rc->ies_capacity, 308 - total_size + size); 309 - new_ies = kzalloc(total_size + size, GFP_KERNEL); 310 - if (new_ies == NULL) { 311 - dev_err(dev, "No memory for adding new IE\n"); 312 - result = -ENOMEM; 313 - goto error_alloc; 314 - } 315 - memcpy(new_ies, uwb_rc->ies, total_size); 316 - uwb_rc->ies_capacity = total_size + size; 317 - kfree(uwb_rc->ies); 318 - uwb_rc->ies = new_ies; 319 - d_printf(4, dev, "New IE cache at %p capacity %zu\n", 320 - uwb_rc->ies, uwb_rc->ies_capacity); 321 } 322 - memcpy((void *)uwb_rc->ies + total_size, ies, size); 323 - uwb_rc->ies->wIELength = cpu_to_le16(ies_size + size); 324 - if (uwb_rc->beaconing != -1) { 325 - result = uwb_rc_set_ie(uwb_rc, uwb_rc->ies); 326 - if (result < 0) { 327 - dev_err(dev, "Cannot set new IE on device: %d\n", 328 - result); 329 - uwb_rc->ies->wIELength = cpu_to_le16(ies_size); 330 } else 331 - result = 0; 332 } 333 - d_printf(4, dev, "IEs now occupy %hu bytes of %zu capacity at %p\n", 334 - le16_to_cpu(uwb_rc->ies->wIELength), uwb_rc->ies_capacity, 335 - uwb_rc->ies); 336 - error_alloc: 337 mutex_unlock(&uwb_rc->ies_mutex); 338 return result; 339 } 340 EXPORT_SYMBOL_GPL(uwb_rc_ie_add); ··· 327 * beacon. We don't reallocate, we just mark the size smaller. 328 */ 329 static 330 - int uwb_rc_ie_cache_rm(struct uwb_rc *uwb_rc, enum uwb_ie to_remove) 331 { 332 - struct uwb_ie_hdr *ie_hdr; 333 - size_t new_len = le16_to_cpu(uwb_rc->ies->wIELength); 334 - void *itr = uwb_rc->ies->IEData; 335 - void *top = itr + new_len; 336 337 - while (itr < top) { 338 - ie_hdr = itr; 339 - if (ie_hdr->element_id != to_remove) { 340 - itr += sizeof(*ie_hdr) + ie_hdr->length; 341 - } else { 342 - int ie_length; 343 - ie_length = sizeof(*ie_hdr) + ie_hdr->length; 344 - if (top - itr != ie_length) 345 - memmove(itr, itr + ie_length, top - itr + ie_length); 346 - top -= ie_length; 347 - new_len -= ie_length; 348 } 349 } 350 - uwb_rc->ies->wIELength = cpu_to_le16(new_len); 351 - return 0; 352 } 353 354 355 /** 356 - * Remove an IE currently being transmitted by device 357 * 358 - * @element_id: id of IE to be removed from device's beacon 359 */ 360 int uwb_rc_ie_rm(struct uwb_rc *uwb_rc, enum uwb_ie element_id) 361 { 362 - struct device *dev = &uwb_rc->uwb_dev.dev; 363 - int result; 364 365 - if (uwb_rc->ies == NULL) 366 - return -ESHUTDOWN; 367 mutex_lock(&uwb_rc->ies_mutex); 368 - result = uwb_rc_ie_cache_rm(uwb_rc, element_id); 369 - if (result < 0) 370 - dev_err(dev, "Cannot remove IE from cache.\n"); 371 - if (uwb_rc->beaconing != -1) { 372 result = uwb_rc_set_ie(uwb_rc, uwb_rc->ies); 373 - if (result < 0) 374 - dev_err(dev, "Cannot set new IE on device.\n"); 375 - } 376 mutex_unlock(&uwb_rc->ies_mutex); 377 return result; 378 } 379 EXPORT_SYMBOL_GPL(uwb_rc_ie_rm);
··· 25 */ 26 27 #include "uwb-internal.h" 28 29 /** 30 * uwb_ie_next - get the next IE in a buffer ··· 61 EXPORT_SYMBOL_GPL(uwb_ie_next); 62 63 /** 64 + * uwb_ie_dump_hex - print IEs to a character buffer 65 + * @ies: the IEs to print. 66 + * @len: length of all the IEs. 67 + * @buf: the destination buffer. 68 + * @size: size of @buf. 69 + * 70 + * Returns the number of characters written. 71 + */ 72 + int uwb_ie_dump_hex(const struct uwb_ie_hdr *ies, size_t len, 73 + char *buf, size_t size) 74 + { 75 + void *ptr; 76 + const struct uwb_ie_hdr *ie; 77 + int r = 0; 78 + u8 *d; 79 + 80 + ptr = (void *)ies; 81 + for (;;) { 82 + ie = uwb_ie_next(&ptr, &len); 83 + if (!ie) 84 + break; 85 + 86 + r += scnprintf(buf + r, size - r, "%02x %02x", 87 + (unsigned)ie->element_id, 88 + (unsigned)ie->length); 89 + d = (uint8_t *)ie + sizeof(struct uwb_ie_hdr); 90 + while (d != ptr && r < size) 91 + r += scnprintf(buf + r, size - r, " %02x", (unsigned)*d++); 92 + if (r < size) 93 + buf[r++] = '\n'; 94 + }; 95 + 96 + return r; 97 + } 98 + 99 + /** 100 * Get the IEs that a radio controller is sending in its beacon 101 * 102 * @uwb_rc: UWB Radio Controller ··· 70 * anything. Once done with the iedata buffer, call 71 * uwb_rc_ie_release(iedata). Don't call kfree on it. 72 */ 73 + static 74 ssize_t uwb_rc_get_ie(struct uwb_rc *uwb_rc, struct uwb_rc_evt_get_ie **pget_ie) 75 { 76 ssize_t result; ··· 78 struct uwb_rceb *reply = NULL; 79 struct uwb_rc_evt_get_ie *get_ie; 80 81 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 82 if (cmd == NULL) 83 + return -ENOMEM; 84 + 85 cmd->bCommandType = UWB_RC_CET_GENERAL; 86 cmd->wCommand = cpu_to_le16(UWB_RC_CMD_GET_IE); 87 result = uwb_rc_vcmd(uwb_rc, "GET_IE", cmd, sizeof(*cmd), 88 UWB_RC_CET_GENERAL, UWB_RC_CMD_GET_IE, 89 &reply); 90 + kfree(cmd); 91 if (result < 0) 92 + return result; 93 + 94 get_ie = container_of(reply, struct uwb_rc_evt_get_ie, rceb); 95 if (result < sizeof(*get_ie)) { 96 dev_err(dev, "not enough data returned for decoding GET IE " 97 "(%zu bytes received vs %zu needed)\n", 98 result, sizeof(*get_ie)); 99 + return -EINVAL; 100 } else if (result < sizeof(*get_ie) + le16_to_cpu(get_ie->wIELength)) { 101 dev_err(dev, "not enough data returned for decoding GET IE " 102 "payload (%zu bytes received vs %zu needed)\n", result, 103 sizeof(*get_ie) + le16_to_cpu(get_ie->wIELength)); 104 return -EINVAL; 105 } 106 107 + *pget_ie = get_ie; 108 return result; 109 } 110 111 112 /** ··· 256 return result; 257 } 258 259 /* Cleanup the whole IE management subsystem */ 260 void uwb_rc_ie_init(struct uwb_rc *uwb_rc) 261 { ··· 328 329 330 /** 331 + * uwb_rc_ie_setup - setup a radio controller's IE manager 332 + * @uwb_rc: the radio controller. 333 * 334 + * The current set of IEs are obtained from the hardware with a GET-IE 335 + * command (since the radio controller is not yet beaconing this will 336 + * be just the hardware's MAC and PHY Capability IEs). 337 * 338 + * Returns 0 on success; -ve on an error. 339 */ 340 + int uwb_rc_ie_setup(struct uwb_rc *uwb_rc) 341 { 342 + struct uwb_rc_evt_get_ie *ie_info = NULL; 343 + int capacity; 344 345 + capacity = uwb_rc_get_ie(uwb_rc, &ie_info); 346 + if (capacity < 0) 347 + return capacity; 348 + 349 mutex_lock(&uwb_rc->ies_mutex); 350 351 + uwb_rc->ies = (struct uwb_rc_cmd_set_ie *)ie_info; 352 uwb_rc->ies->rccb.bCommandType = UWB_RC_CET_GENERAL; 353 uwb_rc->ies->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_SET_IE); 354 uwb_rc->ies_capacity = capacity; 355 + 356 mutex_unlock(&uwb_rc->ies_mutex); 357 + 358 + return 0; 359 } 360 361 ··· 383 } 384 385 386 + static int uwb_rc_ie_add_one(struct uwb_rc *rc, const struct uwb_ie_hdr *new_ie) 387 { 388 + struct uwb_rc_cmd_set_ie *new_ies; 389 + void *ptr, *prev_ie; 390 + struct uwb_ie_hdr *ie; 391 + size_t length, new_ie_len, new_capacity, size, prev_size; 392 + 393 + length = le16_to_cpu(rc->ies->wIELength); 394 + new_ie_len = sizeof(struct uwb_ie_hdr) + new_ie->length; 395 + new_capacity = sizeof(struct uwb_rc_cmd_set_ie) + length + new_ie_len; 396 + 397 + if (new_capacity > rc->ies_capacity) { 398 + new_ies = krealloc(rc->ies, new_capacity, GFP_KERNEL); 399 + if (!new_ies) 400 + return -ENOMEM; 401 + rc->ies = new_ies; 402 + } 403 + 404 + ptr = rc->ies->IEData; 405 + size = length; 406 + for (;;) { 407 + prev_ie = ptr; 408 + prev_size = size; 409 + ie = uwb_ie_next(&ptr, &size); 410 + if (!ie || ie->element_id > new_ie->element_id) 411 + break; 412 + } 413 + 414 + memmove(prev_ie + new_ie_len, prev_ie, prev_size); 415 + memcpy(prev_ie, new_ie, new_ie_len); 416 + rc->ies->wIELength = cpu_to_le16(length + new_ie_len); 417 + 418 return 0; 419 } 420 421 /** 422 + * uwb_rc_ie_add - add new IEs to the radio controller's beacon 423 + * @uwb_rc: the radio controller. 424 * @ies: the buffer containing the new IE or IEs to be added to 425 + * the device's beacon. 426 + * @size: length of all the IEs. 427 * 428 * According to WHCI 0.95 [4.13.6] the driver will only receive the RCEB 429 * after the device sent the first beacon that includes the IEs specified ··· 411 * we start beaconing. 412 * 413 * Setting an IE on the device will overwrite all current IEs in device. So 414 + * we take the current IEs being transmitted by the device, insert the 415 * new one, and call SET IE with all the IEs needed. 416 * 417 + * Returns 0 on success; or -ENOMEM. 418 */ 419 int uwb_rc_ie_add(struct uwb_rc *uwb_rc, 420 const struct uwb_ie_hdr *ies, size_t size) 421 { 422 int result = 0; 423 + void *ptr; 424 + const struct uwb_ie_hdr *ie; 425 426 mutex_lock(&uwb_rc->ies_mutex); 427 + 428 + ptr = (void *)ies; 429 + for (;;) { 430 + ie = uwb_ie_next(&ptr, &size); 431 + if (!ie) 432 + break; 433 + 434 + result = uwb_rc_ie_add_one(uwb_rc, ie); 435 + if (result < 0) 436 + break; 437 } 438 + if (result >= 0) { 439 + if (size == 0) { 440 + if (uwb_rc->beaconing != -1) 441 + result = uwb_rc_set_ie(uwb_rc, uwb_rc->ies); 442 } else 443 + result = -EINVAL; 444 } 445 + 446 mutex_unlock(&uwb_rc->ies_mutex); 447 + 448 return result; 449 } 450 EXPORT_SYMBOL_GPL(uwb_rc_ie_add); ··· 489 * beacon. We don't reallocate, we just mark the size smaller. 490 */ 491 static 492 + void uwb_rc_ie_cache_rm(struct uwb_rc *uwb_rc, enum uwb_ie to_remove) 493 { 494 + struct uwb_ie_hdr *ie; 495 + size_t len = le16_to_cpu(uwb_rc->ies->wIELength); 496 + void *ptr; 497 + size_t size; 498 499 + ptr = uwb_rc->ies->IEData; 500 + size = len; 501 + for (;;) { 502 + ie = uwb_ie_next(&ptr, &size); 503 + if (!ie) 504 + break; 505 + if (ie->element_id == to_remove) { 506 + len -= sizeof(struct uwb_ie_hdr) + ie->length; 507 + memmove(ie, ptr, size); 508 + ptr = ie; 509 } 510 } 511 + uwb_rc->ies->wIELength = cpu_to_le16(len); 512 } 513 514 515 /** 516 + * uwb_rc_ie_rm - remove an IE from the radio controller's beacon 517 + * @uwb_rc: the radio controller. 518 + * @element_id: the element ID of the IE to remove. 519 * 520 + * Only IEs previously added with uwb_rc_ie_add() may be removed. 521 + * 522 + * Returns 0 on success; or -ve the SET-IE command to the radio 523 + * controller failed. 524 */ 525 int uwb_rc_ie_rm(struct uwb_rc *uwb_rc, enum uwb_ie element_id) 526 { 527 + int result = 0; 528 529 mutex_lock(&uwb_rc->ies_mutex); 530 + 531 + uwb_rc_ie_cache_rm(uwb_rc, element_id); 532 + 533 + if (uwb_rc->beaconing != -1) 534 result = uwb_rc_set_ie(uwb_rc, uwb_rc->ies); 535 + 536 mutex_unlock(&uwb_rc->ies_mutex); 537 + 538 return result; 539 } 540 EXPORT_SYMBOL_GPL(uwb_rc_ie_rm);
+4 -19
drivers/uwb/lc-dev.c
··· 22 * 23 * FIXME: docs 24 */ 25 - 26 #include <linux/kernel.h> 27 #include <linux/device.h> 28 #include <linux/err.h> 29 #include <linux/kdev_t.h> 30 #include <linux/random.h> 31 #include "uwb-internal.h" 32 - 33 - #define D_LOCAL 1 34 - #include <linux/uwb/debug.h> 35 - 36 37 /* We initialize addresses to 0xff (invalid, as it is bcast) */ 38 static inline void uwb_dev_addr_init(struct uwb_dev_addr *addr) ··· 99 { 100 struct uwb_dev *uwb_dev = to_uwb_dev(dev); 101 102 - d_fnstart(4, NULL, "(dev %p uwb_dev %p)\n", dev, uwb_dev); 103 uwb_bce_put(uwb_dev->bce); 104 - d_printf(0, &uwb_dev->dev, "uwb_dev %p freed\n", uwb_dev); 105 memset(uwb_dev, 0x69, sizeof(*uwb_dev)); 106 kfree(uwb_dev); 107 - d_fnend(4, NULL, "(dev %p uwb_dev %p) = void\n", dev, uwb_dev); 108 } 109 110 /* ··· 267 */ 268 static int __uwb_dev_sys_add(struct uwb_dev *uwb_dev, struct device *parent_dev) 269 { 270 - int result; 271 struct device *dev; 272 - 273 - d_fnstart(4, NULL, "(uwb_dev %p parent_dev %p)\n", uwb_dev, parent_dev); 274 - BUG_ON(parent_dev == NULL); 275 276 dev = &uwb_dev->dev; 277 /* Device sysfs files are only useful for neighbor devices not ··· 277 dev->parent = parent_dev; 278 dev_set_drvdata(dev, uwb_dev); 279 280 - result = device_add(dev); 281 - d_fnend(4, NULL, "(uwb_dev %p parent_dev %p) = %d\n", uwb_dev, parent_dev, result); 282 - return result; 283 } 284 285 286 static void __uwb_dev_sys_rm(struct uwb_dev *uwb_dev) 287 { 288 - d_fnstart(4, NULL, "(uwb_dev %p)\n", uwb_dev); 289 dev_set_drvdata(&uwb_dev->dev, NULL); 290 device_del(&uwb_dev->dev); 291 - d_fnend(4, NULL, "(uwb_dev %p) = void\n", uwb_dev); 292 } 293 294 ··· 368 struct device *dev = &uwb_dev->dev; 369 char macbuf[UWB_ADDR_STRSIZE], devbuf[UWB_ADDR_STRSIZE]; 370 371 - d_fnstart(3, NULL, "(dev %p [uwb_dev %p], uwb_rc %p)\n", dev, uwb_dev, rc); 372 uwb_mac_addr_print(macbuf, sizeof(macbuf), &uwb_dev->mac_addr); 373 uwb_dev_addr_print(devbuf, sizeof(devbuf), &uwb_dev->dev_addr); 374 dev_info(dev, "uwb device (mac %s dev %s) disconnected from %s %s\n", ··· 375 rc ? rc->uwb_dev.dev.parent->bus->name : "n/a", 376 rc ? dev_name(rc->uwb_dev.dev.parent) : ""); 377 uwb_dev_rm(uwb_dev); 378 uwb_dev_put(uwb_dev); /* for the creation in _onair() */ 379 - d_fnend(3, NULL, "(dev %p [uwb_dev %p], uwb_rc %p) = 0\n", dev, uwb_dev, rc); 380 return 0; 381 } 382
··· 22 * 23 * FIXME: docs 24 */ 25 #include <linux/kernel.h> 26 #include <linux/device.h> 27 #include <linux/err.h> 28 #include <linux/kdev_t.h> 29 #include <linux/random.h> 30 #include "uwb-internal.h" 31 32 /* We initialize addresses to 0xff (invalid, as it is bcast) */ 33 static inline void uwb_dev_addr_init(struct uwb_dev_addr *addr) ··· 104 { 105 struct uwb_dev *uwb_dev = to_uwb_dev(dev); 106 107 uwb_bce_put(uwb_dev->bce); 108 memset(uwb_dev, 0x69, sizeof(*uwb_dev)); 109 kfree(uwb_dev); 110 } 111 112 /* ··· 275 */ 276 static int __uwb_dev_sys_add(struct uwb_dev *uwb_dev, struct device *parent_dev) 277 { 278 struct device *dev; 279 280 dev = &uwb_dev->dev; 281 /* Device sysfs files are only useful for neighbor devices not ··· 289 dev->parent = parent_dev; 290 dev_set_drvdata(dev, uwb_dev); 291 292 + return device_add(dev); 293 } 294 295 296 static void __uwb_dev_sys_rm(struct uwb_dev *uwb_dev) 297 { 298 dev_set_drvdata(&uwb_dev->dev, NULL); 299 device_del(&uwb_dev->dev); 300 } 301 302 ··· 384 struct device *dev = &uwb_dev->dev; 385 char macbuf[UWB_ADDR_STRSIZE], devbuf[UWB_ADDR_STRSIZE]; 386 387 uwb_mac_addr_print(macbuf, sizeof(macbuf), &uwb_dev->mac_addr); 388 uwb_dev_addr_print(devbuf, sizeof(devbuf), &uwb_dev->dev_addr); 389 dev_info(dev, "uwb device (mac %s dev %s) disconnected from %s %s\n", ··· 392 rc ? rc->uwb_dev.dev.parent->bus->name : "n/a", 393 rc ? dev_name(rc->uwb_dev.dev.parent) : ""); 394 uwb_dev_rm(uwb_dev); 395 + list_del(&uwb_dev->bce->node); 396 + uwb_bce_put(uwb_dev->bce); 397 uwb_dev_put(uwb_dev); /* for the creation in _onair() */ 398 + 399 return 0; 400 } 401
+20 -42
drivers/uwb/lc-rc.c
··· 36 #include <linux/etherdevice.h> 37 #include <linux/usb.h> 38 39 - #define D_LOCAL 1 40 - #include <linux/uwb/debug.h> 41 #include "uwb-internal.h" 42 43 static int uwb_rc_index_match(struct device *dev, void *data) ··· 79 struct uwb_dev *uwb_dev = container_of(dev, struct uwb_dev, dev); 80 struct uwb_rc *rc = container_of(uwb_dev, struct uwb_rc, uwb_dev); 81 82 - uwb_rc_neh_destroy(rc); 83 uwb_rc_ie_release(rc); 84 - d_printf(1, dev, "freed uwb_rc %p\n", rc); 85 kfree(rc); 86 } 87 ··· 96 rc->scan_type = UWB_SCAN_DISABLED; 97 INIT_LIST_HEAD(&rc->notifs_chain.list); 98 mutex_init(&rc->notifs_chain.mutex); 99 uwb_drp_avail_init(rc); 100 uwb_rc_ie_init(rc); 101 uwb_rsv_init(rc); ··· 189 int result; 190 struct device *dev = &rc->uwb_dev.dev; 191 192 - result = uwb_rc_reset(rc); 193 if (result < 0) { 194 - dev_err(dev, "cannot reset UWB radio: %d\n", result); 195 goto error; 196 } 197 result = uwb_rc_mac_addr_setup(rc); ··· 248 249 rc->priv = priv; 250 251 result = rc->start(rc); 252 if (result < 0) 253 goto error_rc_start; ··· 288 error_dev_add: 289 error_rc_setup: 290 rc->stop(rc); 291 - uwbd_flush(rc); 292 error_rc_start: 293 return result; 294 } ··· 310 rc->ready = 0; 311 312 uwb_dbg_del_rc(rc); 313 - uwb_rsv_cleanup(rc); 314 - uwb_rc_ie_rm(rc, UWB_IDENTIFICATION_IE); 315 - if (rc->beaconing >= 0) 316 - uwb_rc_beacon(rc, -1, 0); 317 - if (rc->scan_type != UWB_SCAN_DISABLED) 318 - uwb_rc_scan(rc, rc->scanning, UWB_SCAN_DISABLED, 0); 319 - uwb_rc_reset(rc); 320 321 rc->stop(rc); 322 - uwbd_flush(rc); 323 324 uwb_dev_lock(&rc->uwb_dev); 325 rc->priv = NULL; 326 rc->cmd = NULL; 327 uwb_dev_unlock(&rc->uwb_dev); 328 - mutex_lock(&uwb_beca.mutex); 329 uwb_dev_for_each(rc, uwb_dev_offair_helper, NULL); 330 __uwb_rc_sys_rm(rc); 331 - mutex_unlock(&uwb_beca.mutex); 332 uwb_dev_rm(&rc->uwb_dev); 333 } 334 EXPORT_SYMBOL_GPL(uwb_rc_rm); ··· 471 __uwb_rc_put(rc); 472 } 473 EXPORT_SYMBOL_GPL(uwb_rc_put); 474 - 475 - /* 476 - * 477 - * 478 - */ 479 - ssize_t uwb_rc_print_IEs(struct uwb_rc *uwb_rc, char *buf, size_t size) 480 - { 481 - ssize_t result; 482 - struct uwb_rc_evt_get_ie *ie_info; 483 - struct uwb_buf_ctx ctx; 484 - 485 - result = uwb_rc_get_ie(uwb_rc, &ie_info); 486 - if (result < 0) 487 - goto error_get_ie; 488 - ctx.buf = buf; 489 - ctx.size = size; 490 - ctx.bytes = 0; 491 - uwb_ie_for_each(&uwb_rc->uwb_dev, uwb_ie_dump_hex, &ctx, 492 - ie_info->IEData, result - sizeof(*ie_info)); 493 - result = ctx.bytes; 494 - kfree(ie_info); 495 - error_get_ie: 496 - return result; 497 - } 498 -
··· 36 #include <linux/etherdevice.h> 37 #include <linux/usb.h> 38 39 #include "uwb-internal.h" 40 41 static int uwb_rc_index_match(struct device *dev, void *data) ··· 81 struct uwb_dev *uwb_dev = container_of(dev, struct uwb_dev, dev); 82 struct uwb_rc *rc = container_of(uwb_dev, struct uwb_rc, uwb_dev); 83 84 uwb_rc_ie_release(rc); 85 kfree(rc); 86 } 87 ··· 100 rc->scan_type = UWB_SCAN_DISABLED; 101 INIT_LIST_HEAD(&rc->notifs_chain.list); 102 mutex_init(&rc->notifs_chain.mutex); 103 + INIT_LIST_HEAD(&rc->uwb_beca.list); 104 + mutex_init(&rc->uwb_beca.mutex); 105 uwb_drp_avail_init(rc); 106 uwb_rc_ie_init(rc); 107 uwb_rsv_init(rc); ··· 191 int result; 192 struct device *dev = &rc->uwb_dev.dev; 193 194 + result = uwb_radio_setup(rc); 195 if (result < 0) { 196 + dev_err(dev, "cannot setup UWB radio: %d\n", result); 197 goto error; 198 } 199 result = uwb_rc_mac_addr_setup(rc); ··· 250 251 rc->priv = priv; 252 253 + init_waitqueue_head(&rc->uwbd.wq); 254 + INIT_LIST_HEAD(&rc->uwbd.event_list); 255 + spin_lock_init(&rc->uwbd.event_list_lock); 256 + 257 + uwbd_start(rc); 258 + 259 result = rc->start(rc); 260 if (result < 0) 261 goto error_rc_start; ··· 284 error_dev_add: 285 error_rc_setup: 286 rc->stop(rc); 287 + uwbd_stop(rc); 288 error_rc_start: 289 return result; 290 } ··· 306 rc->ready = 0; 307 308 uwb_dbg_del_rc(rc); 309 + uwb_rsv_remove_all(rc); 310 + uwb_radio_shutdown(rc); 311 312 rc->stop(rc); 313 + 314 + uwbd_stop(rc); 315 + uwb_rc_neh_destroy(rc); 316 317 uwb_dev_lock(&rc->uwb_dev); 318 rc->priv = NULL; 319 rc->cmd = NULL; 320 uwb_dev_unlock(&rc->uwb_dev); 321 + mutex_lock(&rc->uwb_beca.mutex); 322 uwb_dev_for_each(rc, uwb_dev_offair_helper, NULL); 323 __uwb_rc_sys_rm(rc); 324 + mutex_unlock(&rc->uwb_beca.mutex); 325 + uwb_rsv_cleanup(rc); 326 + uwb_beca_release(rc); 327 uwb_dev_rm(&rc->uwb_dev); 328 } 329 EXPORT_SYMBOL_GPL(uwb_rc_rm); ··· 468 __uwb_rc_put(rc); 469 } 470 EXPORT_SYMBOL_GPL(uwb_rc_put);
+33 -39
drivers/uwb/neh.c
··· 86 #include <linux/err.h> 87 88 #include "uwb-internal.h" 89 - #define D_LOCAL 0 90 - #include <linux/uwb/debug.h> 91 92 /* 93 * UWB Radio Controller Notification/Event Handle ··· 252 253 static void __uwb_rc_neh_rm(struct uwb_rc *rc, struct uwb_rc_neh *neh) 254 { 255 - del_timer(&neh->timer); 256 __uwb_rc_ctx_put(rc, neh); 257 list_del(&neh->list_node); 258 } ··· 272 __uwb_rc_neh_rm(rc, neh); 273 spin_unlock_irqrestore(&rc->neh_lock, flags); 274 275 uwb_rc_neh_put(neh); 276 } 277 ··· 347 } 348 349 350 - /** 351 * Process notifications coming from the radio control interface 352 * 353 * @rc: UWB Radio Control Interface descriptor ··· 399 uwb_evt->notif.size = size; 400 uwb_evt->notif.rceb = rceb; 401 402 - switch (le16_to_cpu(rceb->wEvent)) { 403 - /* Trap some vendor specific events 404 - * 405 - * FIXME: move this to handling in ptc-est, where we 406 - * register a NULL event handler for these two guys 407 - * using the Intel IDs. 408 - */ 409 - case 0x0103: 410 - dev_info(dev, "FIXME: DEVICE ADD\n"); 411 - return; 412 - case 0x0104: 413 - dev_info(dev, "FIXME: DEVICE RM\n"); 414 - return; 415 - default: 416 - break; 417 - } 418 - 419 uwbd_event_queue(uwb_evt); 420 } 421 ··· 419 rceb->bEventContext, size); 420 } else { 421 neh = uwb_rc_neh_lookup(rc, rceb); 422 - if (neh) 423 uwb_rc_neh_cb(neh, rceb, size); 424 - else 425 dev_warn(dev, "event 0x%02x/%04x/%02x (%zu bytes): nobody cared\n", 426 rceb->bEventType, le16_to_cpu(rceb->wEvent), 427 rceb->bEventContext, size); ··· 477 size_t size, real_size, event_size; 478 int needtofree; 479 480 - d_fnstart(3, dev, "(rc %p buf %p %zu buf_size)\n", rc, buf, buf_size); 481 - d_printf(2, dev, "groking event block: %zu bytes\n", buf_size); 482 itr = buf; 483 size = buf_size; 484 while (size > 0) { ··· 524 525 itr += real_size; 526 size -= real_size; 527 - d_printf(2, dev, "consumed %zd bytes, %zu left\n", 528 - event_size, size); 529 } 530 - d_fnend(3, dev, "(rc %p buf %p %zu buf_size) = void\n", rc, buf, buf_size); 531 } 532 EXPORT_SYMBOL_GPL(uwb_rc_neh_grok); 533 ··· 539 */ 540 void uwb_rc_neh_error(struct uwb_rc *rc, int error) 541 { 542 - struct uwb_rc_neh *neh, *next; 543 unsigned long flags; 544 545 - BUG_ON(error >= 0); 546 - spin_lock_irqsave(&rc->neh_lock, flags); 547 - list_for_each_entry_safe(neh, next, &rc->neh_list, list_node) { 548 __uwb_rc_neh_rm(rc, neh); 549 uwb_rc_neh_cb(neh, NULL, error); 550 } 551 - spin_unlock_irqrestore(&rc->neh_lock, flags); 552 } 553 EXPORT_SYMBOL_GPL(uwb_rc_neh_error); 554 ··· 566 unsigned long flags; 567 568 spin_lock_irqsave(&rc->neh_lock, flags); 569 - __uwb_rc_neh_rm(rc, neh); 570 spin_unlock_irqrestore(&rc->neh_lock, flags); 571 572 - uwb_rc_neh_cb(neh, NULL, -ETIMEDOUT); 573 } 574 575 /** Initializes the @rc's neh subsystem ··· 592 void uwb_rc_neh_destroy(struct uwb_rc *rc) 593 { 594 unsigned long flags; 595 - struct uwb_rc_neh *neh, *next; 596 597 - spin_lock_irqsave(&rc->neh_lock, flags); 598 - list_for_each_entry_safe(neh, next, &rc->neh_list, list_node) { 599 __uwb_rc_neh_rm(rc, neh); 600 uwb_rc_neh_put(neh); 601 } 602 - spin_unlock_irqrestore(&rc->neh_lock, flags); 603 }
··· 86 #include <linux/err.h> 87 88 #include "uwb-internal.h" 89 90 /* 91 * UWB Radio Controller Notification/Event Handle ··· 254 255 static void __uwb_rc_neh_rm(struct uwb_rc *rc, struct uwb_rc_neh *neh) 256 { 257 __uwb_rc_ctx_put(rc, neh); 258 list_del(&neh->list_node); 259 } ··· 275 __uwb_rc_neh_rm(rc, neh); 276 spin_unlock_irqrestore(&rc->neh_lock, flags); 277 278 + del_timer_sync(&neh->timer); 279 uwb_rc_neh_put(neh); 280 } 281 ··· 349 } 350 351 352 + /* 353 * Process notifications coming from the radio control interface 354 * 355 * @rc: UWB Radio Control Interface descriptor ··· 401 uwb_evt->notif.size = size; 402 uwb_evt->notif.rceb = rceb; 403 404 uwbd_event_queue(uwb_evt); 405 } 406 ··· 438 rceb->bEventContext, size); 439 } else { 440 neh = uwb_rc_neh_lookup(rc, rceb); 441 + if (neh) { 442 + del_timer_sync(&neh->timer); 443 uwb_rc_neh_cb(neh, rceb, size); 444 + } else 445 dev_warn(dev, "event 0x%02x/%04x/%02x (%zu bytes): nobody cared\n", 446 rceb->bEventType, le16_to_cpu(rceb->wEvent), 447 rceb->bEventContext, size); ··· 495 size_t size, real_size, event_size; 496 int needtofree; 497 498 itr = buf; 499 size = buf_size; 500 while (size > 0) { ··· 544 545 itr += real_size; 546 size -= real_size; 547 } 548 } 549 EXPORT_SYMBOL_GPL(uwb_rc_neh_grok); 550 ··· 562 */ 563 void uwb_rc_neh_error(struct uwb_rc *rc, int error) 564 { 565 + struct uwb_rc_neh *neh; 566 unsigned long flags; 567 568 + for (;;) { 569 + spin_lock_irqsave(&rc->neh_lock, flags); 570 + if (list_empty(&rc->neh_list)) { 571 + spin_unlock_irqrestore(&rc->neh_lock, flags); 572 + break; 573 + } 574 + neh = list_first_entry(&rc->neh_list, struct uwb_rc_neh, list_node); 575 __uwb_rc_neh_rm(rc, neh); 576 + spin_unlock_irqrestore(&rc->neh_lock, flags); 577 + 578 + del_timer_sync(&neh->timer); 579 uwb_rc_neh_cb(neh, NULL, error); 580 } 581 } 582 EXPORT_SYMBOL_GPL(uwb_rc_neh_error); 583 ··· 583 unsigned long flags; 584 585 spin_lock_irqsave(&rc->neh_lock, flags); 586 + if (neh->context) 587 + __uwb_rc_neh_rm(rc, neh); 588 + else 589 + neh = NULL; 590 spin_unlock_irqrestore(&rc->neh_lock, flags); 591 592 + if (neh) 593 + uwb_rc_neh_cb(neh, NULL, -ETIMEDOUT); 594 } 595 596 /** Initializes the @rc's neh subsystem ··· 605 void uwb_rc_neh_destroy(struct uwb_rc *rc) 606 { 607 unsigned long flags; 608 + struct uwb_rc_neh *neh; 609 610 + for (;;) { 611 + spin_lock_irqsave(&rc->neh_lock, flags); 612 + if (list_empty(&rc->neh_list)) { 613 + spin_unlock_irqrestore(&rc->neh_lock, flags); 614 + break; 615 + } 616 + neh = list_first_entry(&rc->neh_list, struct uwb_rc_neh, list_node); 617 __uwb_rc_neh_rm(rc, neh); 618 + spin_unlock_irqrestore(&rc->neh_lock, flags); 619 + 620 + del_timer_sync(&neh->timer); 621 uwb_rc_neh_put(neh); 622 } 623 }
+16 -9
drivers/uwb/pal.c
··· 16 * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 */ 18 #include <linux/kernel.h> 19 #include <linux/uwb.h> 20 21 #include "uwb-internal.h" ··· 33 34 /** 35 * uwb_pal_register - register a UWB PAL 36 - * @rc: the radio controller the PAL will be using 37 * @pal: the PAL 38 * 39 * The PAL must be initialized with uwb_pal_init(). 40 */ 41 - int uwb_pal_register(struct uwb_rc *rc, struct uwb_pal *pal) 42 { 43 int ret; 44 45 if (pal->device) { ··· 55 } 56 } 57 58 - spin_lock(&rc->pal_lock); 59 list_add(&pal->node, &rc->pals); 60 - spin_unlock(&rc->pal_lock); 61 62 return 0; 63 } ··· 67 68 /** 69 * uwb_pal_register - unregister a UWB PAL 70 - * @rc: the radio controller the PAL was using 71 * @pal: the PAL 72 */ 73 - void uwb_pal_unregister(struct uwb_rc *rc, struct uwb_pal *pal) 74 { 75 - spin_lock(&rc->pal_lock); 76 list_del(&pal->node); 77 - spin_unlock(&rc->pal_lock); 78 79 if (pal->device) { 80 sysfs_remove_link(&rc->uwb_dev.dev.kobj, pal->name); ··· 94 */ 95 void uwb_rc_pal_init(struct uwb_rc *rc) 96 { 97 - spin_lock_init(&rc->pal_lock); 98 INIT_LIST_HEAD(&rc->pals); 99 }
··· 16 * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 */ 18 #include <linux/kernel.h> 19 + #include <linux/debugfs.h> 20 #include <linux/uwb.h> 21 22 #include "uwb-internal.h" ··· 32 33 /** 34 * uwb_pal_register - register a UWB PAL 35 * @pal: the PAL 36 * 37 * The PAL must be initialized with uwb_pal_init(). 38 */ 39 + int uwb_pal_register(struct uwb_pal *pal) 40 { 41 + struct uwb_rc *rc = pal->rc; 42 int ret; 43 44 if (pal->device) { ··· 54 } 55 } 56 57 + pal->debugfs_dir = uwb_dbg_create_pal_dir(pal); 58 + 59 + mutex_lock(&rc->uwb_dev.mutex); 60 list_add(&pal->node, &rc->pals); 61 + mutex_unlock(&rc->uwb_dev.mutex); 62 63 return 0; 64 } ··· 64 65 /** 66 * uwb_pal_register - unregister a UWB PAL 67 * @pal: the PAL 68 */ 69 + void uwb_pal_unregister(struct uwb_pal *pal) 70 { 71 + struct uwb_rc *rc = pal->rc; 72 + 73 + uwb_radio_stop(pal); 74 + 75 + mutex_lock(&rc->uwb_dev.mutex); 76 list_del(&pal->node); 77 + mutex_unlock(&rc->uwb_dev.mutex); 78 + 79 + debugfs_remove(pal->debugfs_dir); 80 81 if (pal->device) { 82 sysfs_remove_link(&rc->uwb_dev.dev.kobj, pal->name); ··· 86 */ 87 void uwb_rc_pal_init(struct uwb_rc *rc) 88 { 89 INIT_LIST_HEAD(&rc->pals); 90 }
+202
drivers/uwb/radio.c
···
··· 1 + /* 2 + * UWB radio (channel) management. 3 + * 4 + * Copyright (C) 2008 Cambridge Silicon Radio Ltd. 5 + * 6 + * This program is free software; you can redistribute it and/or 7 + * modify it under the terms of the GNU General Public License version 8 + * 2 as published by the Free Software Foundation. 9 + * 10 + * This program is distributed in the hope that it will be useful, 11 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 + * GNU General Public License for more details. 14 + * 15 + * You should have received a copy of the GNU General Public License 16 + * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 + */ 18 + #include <linux/kernel.h> 19 + #include <linux/uwb.h> 20 + 21 + #include "uwb-internal.h" 22 + 23 + 24 + static int uwb_radio_select_channel(struct uwb_rc *rc) 25 + { 26 + /* 27 + * Default to channel 9 (BG1, TFC1) unless the user has 28 + * selected a specific channel or there are no active PALs. 29 + */ 30 + if (rc->active_pals == 0) 31 + return -1; 32 + if (rc->beaconing_forced) 33 + return rc->beaconing_forced; 34 + return 9; 35 + } 36 + 37 + 38 + /* 39 + * Notify all active PALs that the channel has changed. 40 + */ 41 + static void uwb_radio_channel_changed(struct uwb_rc *rc, int channel) 42 + { 43 + struct uwb_pal *pal; 44 + 45 + list_for_each_entry(pal, &rc->pals, node) { 46 + if (pal->channel && channel != pal->channel) { 47 + pal->channel = channel; 48 + if (pal->channel_changed) 49 + pal->channel_changed(pal, pal->channel); 50 + } 51 + } 52 + } 53 + 54 + /* 55 + * Change to a new channel and notify any active PALs of the new 56 + * channel. 57 + * 58 + * When stopping the radio, PALs need to be notified first so they can 59 + * terminate any active reservations. 60 + */ 61 + static int uwb_radio_change_channel(struct uwb_rc *rc, int channel) 62 + { 63 + int ret = 0; 64 + 65 + if (channel == -1) 66 + uwb_radio_channel_changed(rc, channel); 67 + 68 + if (channel != rc->beaconing) { 69 + if (rc->beaconing != -1 && channel != -1) { 70 + /* 71 + * FIXME: should signal the channel change 72 + * with a Channel Change IE. 73 + */ 74 + ret = uwb_radio_change_channel(rc, -1); 75 + if (ret < 0) 76 + return ret; 77 + } 78 + ret = uwb_rc_beacon(rc, channel, 0); 79 + } 80 + 81 + if (channel != -1) 82 + uwb_radio_channel_changed(rc, rc->beaconing); 83 + 84 + return ret; 85 + } 86 + 87 + /** 88 + * uwb_radio_start - request that the radio be started 89 + * @pal: the PAL making the request. 90 + * 91 + * If the radio is not already active, aa suitable channel is selected 92 + * and beacons are started. 93 + */ 94 + int uwb_radio_start(struct uwb_pal *pal) 95 + { 96 + struct uwb_rc *rc = pal->rc; 97 + int ret = 0; 98 + 99 + mutex_lock(&rc->uwb_dev.mutex); 100 + 101 + if (!pal->channel) { 102 + pal->channel = -1; 103 + rc->active_pals++; 104 + ret = uwb_radio_change_channel(rc, uwb_radio_select_channel(rc)); 105 + } 106 + 107 + mutex_unlock(&rc->uwb_dev.mutex); 108 + return ret; 109 + } 110 + EXPORT_SYMBOL_GPL(uwb_radio_start); 111 + 112 + /** 113 + * uwb_radio_stop - request tha the radio be stopped. 114 + * @pal: the PAL making the request. 115 + * 116 + * Stops the radio if no other PAL is making use of it. 117 + */ 118 + void uwb_radio_stop(struct uwb_pal *pal) 119 + { 120 + struct uwb_rc *rc = pal->rc; 121 + 122 + mutex_lock(&rc->uwb_dev.mutex); 123 + 124 + if (pal->channel) { 125 + rc->active_pals--; 126 + uwb_radio_change_channel(rc, uwb_radio_select_channel(rc)); 127 + pal->channel = 0; 128 + } 129 + 130 + mutex_unlock(&rc->uwb_dev.mutex); 131 + } 132 + EXPORT_SYMBOL_GPL(uwb_radio_stop); 133 + 134 + /* 135 + * uwb_radio_force_channel - force a specific channel to be used 136 + * @rc: the radio controller. 137 + * @channel: the channel to use; -1 to force the radio to stop; 0 to 138 + * use the default channel selection algorithm. 139 + */ 140 + int uwb_radio_force_channel(struct uwb_rc *rc, int channel) 141 + { 142 + int ret = 0; 143 + 144 + mutex_lock(&rc->uwb_dev.mutex); 145 + 146 + rc->beaconing_forced = channel; 147 + ret = uwb_radio_change_channel(rc, uwb_radio_select_channel(rc)); 148 + 149 + mutex_unlock(&rc->uwb_dev.mutex); 150 + return ret; 151 + } 152 + 153 + /* 154 + * uwb_radio_setup - setup the radio manager 155 + * @rc: the radio controller. 156 + * 157 + * The radio controller is reset to ensure it's in a known state 158 + * before it's used. 159 + */ 160 + int uwb_radio_setup(struct uwb_rc *rc) 161 + { 162 + return uwb_rc_reset(rc); 163 + } 164 + 165 + /* 166 + * uwb_radio_reset_state - reset any radio manager state 167 + * @rc: the radio controller. 168 + * 169 + * All internal radio manager state is reset to values corresponding 170 + * to a reset radio controller. 171 + */ 172 + void uwb_radio_reset_state(struct uwb_rc *rc) 173 + { 174 + struct uwb_pal *pal; 175 + 176 + mutex_lock(&rc->uwb_dev.mutex); 177 + 178 + list_for_each_entry(pal, &rc->pals, node) { 179 + if (pal->channel) { 180 + pal->channel = -1; 181 + if (pal->channel_changed) 182 + pal->channel_changed(pal, -1); 183 + } 184 + } 185 + 186 + rc->beaconing = -1; 187 + rc->scanning = -1; 188 + 189 + mutex_unlock(&rc->uwb_dev.mutex); 190 + } 191 + 192 + /* 193 + * uwb_radio_shutdown - shutdown the radio manager 194 + * @rc: the radio controller. 195 + * 196 + * The radio controller is reset. 197 + */ 198 + void uwb_radio_shutdown(struct uwb_rc *rc) 199 + { 200 + uwb_radio_reset_state(rc); 201 + uwb_rc_reset(rc); 202 + }
+37 -10
drivers/uwb/reset.c
··· 32 #include <linux/err.h> 33 34 #include "uwb-internal.h" 35 - #define D_LOCAL 0 36 - #include <linux/uwb/debug.h> 37 38 /** 39 * Command result codes (WUSB1.0[T8-69]) ··· 321 struct uwb_rc *rc = evt->rc; 322 int ret; 323 324 - /* Need to prevent the RC hardware module going away while in 325 - the rc->reset() call. */ 326 - if (!try_module_get(rc->owner)) 327 - return 0; 328 - 329 dev_info(&rc->uwb_dev.dev, "resetting radio controller\n"); 330 ret = rc->reset(rc); 331 - if (ret) 332 dev_err(&rc->uwb_dev.dev, "failed to reset hardware: %d\n", ret); 333 - 334 - module_put(rc->owner); 335 return ret; 336 } 337 ··· 357 uwbd_event_queue(evt); 358 } 359 EXPORT_SYMBOL_GPL(uwb_rc_reset_all);
··· 32 #include <linux/err.h> 33 34 #include "uwb-internal.h" 35 36 /** 37 * Command result codes (WUSB1.0[T8-69]) ··· 323 struct uwb_rc *rc = evt->rc; 324 int ret; 325 326 dev_info(&rc->uwb_dev.dev, "resetting radio controller\n"); 327 ret = rc->reset(rc); 328 + if (ret) { 329 dev_err(&rc->uwb_dev.dev, "failed to reset hardware: %d\n", ret); 330 + goto error; 331 + } 332 + return 0; 333 + error: 334 + /* Nothing can be done except try the reset again. */ 335 + uwb_rc_reset_all(rc); 336 return ret; 337 } 338 ··· 360 uwbd_event_queue(evt); 361 } 362 EXPORT_SYMBOL_GPL(uwb_rc_reset_all); 363 + 364 + void uwb_rc_pre_reset(struct uwb_rc *rc) 365 + { 366 + rc->stop(rc); 367 + uwbd_flush(rc); 368 + 369 + uwb_radio_reset_state(rc); 370 + uwb_rsv_remove_all(rc); 371 + } 372 + EXPORT_SYMBOL_GPL(uwb_rc_pre_reset); 373 + 374 + void uwb_rc_post_reset(struct uwb_rc *rc) 375 + { 376 + int ret; 377 + 378 + ret = rc->start(rc); 379 + if (ret) 380 + goto error; 381 + ret = uwb_rc_mac_addr_set(rc, &rc->uwb_dev.mac_addr); 382 + if (ret) 383 + goto error; 384 + ret = uwb_rc_dev_addr_set(rc, &rc->uwb_dev.dev_addr); 385 + if (ret) 386 + goto error; 387 + return; 388 + error: 389 + /* Nothing can be done except try the reset again. */ 390 + uwb_rc_reset_all(rc); 391 + } 392 + EXPORT_SYMBOL_GPL(uwb_rc_post_reset);
+436 -127
drivers/uwb/rsv.c
··· 15 * You should have received a copy of the GNU General Public License 16 * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 */ 18 - #include <linux/version.h> 19 #include <linux/kernel.h> 20 #include <linux/uwb.h> 21 22 #include "uwb-internal.h" 23 24 static void uwb_rsv_timer(unsigned long arg); 25 26 static const char *rsv_states[] = { 27 - [UWB_RSV_STATE_NONE] = "none", 28 - [UWB_RSV_STATE_O_INITIATED] = "initiated", 29 - [UWB_RSV_STATE_O_PENDING] = "pending", 30 - [UWB_RSV_STATE_O_MODIFIED] = "modified", 31 - [UWB_RSV_STATE_O_ESTABLISHED] = "established", 32 - [UWB_RSV_STATE_T_ACCEPTED] = "accepted", 33 - [UWB_RSV_STATE_T_DENIED] = "denied", 34 - [UWB_RSV_STATE_T_PENDING] = "pending", 35 }; 36 37 static const char *rsv_types[] = { ··· 51 [UWB_DRP_TYPE_PRIVATE] = "private", 52 [UWB_DRP_TYPE_PCA] = "pca", 53 }; 54 55 /** 56 * uwb_rsv_state_str - return a string for a reservation state ··· 101 } 102 EXPORT_SYMBOL_GPL(uwb_rsv_type_str); 103 104 - static void uwb_rsv_dump(struct uwb_rsv *rsv) 105 { 106 struct device *dev = &rsv->rc->uwb_dev.dev; 107 struct uwb_dev_addr devaddr; ··· 117 dev_dbg(dev, "rsv %s -> %s: %s\n", owner, target, uwb_rsv_state_str(rsv->state)); 118 } 119 120 /* 121 * Get a free stream index for a reservation. 122 * ··· 144 static int uwb_rsv_get_stream(struct uwb_rsv *rsv) 145 { 146 struct uwb_rc *rc = rsv->rc; 147 unsigned long *streams_bm; 148 int stream; 149 ··· 166 rsv->stream = stream; 167 set_bit(stream, streams_bm); 168 169 return 0; 170 } 171 172 static void uwb_rsv_put_stream(struct uwb_rsv *rsv) 173 { 174 struct uwb_rc *rc = rsv->rc; 175 unsigned long *streams_bm; 176 177 switch (rsv->target.type) { ··· 189 } 190 191 clear_bit(rsv->stream, streams_bm); 192 } 193 194 - /* 195 - * Generate a MAS allocation with a single row component. 196 - */ 197 - static void uwb_rsv_gen_alloc_row(struct uwb_mas_bm *mas, 198 - int first_mas, int mas_per_zone, 199 - int zs, int ze) 200 { 201 - struct uwb_mas_bm col; 202 - int z; 203 204 - bitmap_zero(mas->bm, UWB_NUM_MAS); 205 - bitmap_zero(col.bm, UWB_NUM_MAS); 206 - bitmap_fill(col.bm, mas_per_zone); 207 - bitmap_shift_left(col.bm, col.bm, first_mas + zs * UWB_MAS_PER_ZONE, UWB_NUM_MAS); 208 - 209 - for (z = zs; z <= ze; z++) { 210 - bitmap_or(mas->bm, mas->bm, col.bm, UWB_NUM_MAS); 211 - bitmap_shift_left(col.bm, col.bm, UWB_MAS_PER_ZONE, UWB_NUM_MAS); 212 } 213 } 214 215 - /* 216 - * Allocate some MAS for this reservation based on current local 217 - * availability, the reservation parameters (max_mas, min_mas, 218 - * sparsity), and the WiMedia rules for MAS allocations. 219 - * 220 - * Returns -EBUSY is insufficient free MAS are available. 221 - * 222 - * FIXME: to simplify this, only safe reservations with a single row 223 - * component in zones 1 to 15 are tried (zone 0 is skipped to avoid 224 - * problems with the MAS reserved for the BP). 225 - * 226 - * [ECMA-368] section B.2. 227 - */ 228 - static int uwb_rsv_alloc_mas(struct uwb_rsv *rsv) 229 { 230 - static const int safe_mas_in_row[UWB_NUM_ZONES] = { 231 - 8, 7, 6, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 2, 1, 232 - }; 233 - int n, r; 234 - struct uwb_mas_bm mas; 235 - bool found = false; 236 237 - /* 238 - * Search all valid safe allocations until either: too few MAS 239 - * are available; or the smallest allocation with sufficient 240 - * MAS is found. 241 - * 242 - * The top of the zones are preferred, so space for larger 243 - * allocations is available in the bottom of the zone (e.g., a 244 - * 15 MAS allocation should start in row 14 leaving space for 245 - * a 120 MAS allocation at row 0). 246 - */ 247 - for (n = safe_mas_in_row[0]; n >= 1; n--) { 248 - int num_mas; 249 250 - num_mas = n * (UWB_NUM_ZONES - 1); 251 - if (num_mas < rsv->min_mas) 252 - break; 253 - if (found && num_mas < rsv->max_mas) 254 - break; 255 256 - for (r = UWB_MAS_PER_ZONE-1; r >= 0; r--) { 257 - if (safe_mas_in_row[r] < n) 258 - continue; 259 - uwb_rsv_gen_alloc_row(&mas, r, n, 1, UWB_NUM_ZONES); 260 - if (uwb_drp_avail_reserve_pending(rsv->rc, &mas) == 0) { 261 - found = true; 262 - break; 263 - } 264 - } 265 - } 266 267 - if (!found) 268 - return -EBUSY; 269 270 - bitmap_copy(rsv->mas.bm, mas.bm, UWB_NUM_MAS); 271 - return 0; 272 } 273 274 static void uwb_rsv_stroke_timer(struct uwb_rsv *rsv) ··· 247 * received. 248 */ 249 if (rsv->is_multicast) { 250 - if (rsv->state == UWB_RSV_STATE_O_INITIATED) 251 sframes = 1; 252 if (rsv->state == UWB_RSV_STATE_O_ESTABLISHED) 253 sframes = 0; 254 } 255 256 - rsv->expired = false; 257 if (sframes > 0) { 258 /* 259 * Add an additional 2 superframes to account for the ··· 278 rsv->state = new_state; 279 rsv->ie_valid = false; 280 281 - uwb_rsv_dump(rsv); 282 283 uwb_rsv_stroke_timer(rsv); 284 uwb_rsv_sched_update(rsv->rc); ··· 292 293 void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state) 294 { 295 if (rsv->state == new_state) { 296 switch (rsv->state) { 297 case UWB_RSV_STATE_O_ESTABLISHED: 298 case UWB_RSV_STATE_T_ACCEPTED: 299 case UWB_RSV_STATE_NONE: 300 uwb_rsv_stroke_timer(rsv); 301 break; ··· 314 return; 315 } 316 317 switch (new_state) { 318 case UWB_RSV_STATE_NONE: 319 - uwb_drp_avail_release(rsv->rc, &rsv->mas); 320 - uwb_rsv_put_stream(rsv); 321 uwb_rsv_state_update(rsv, UWB_RSV_STATE_NONE); 322 uwb_rsv_callback(rsv); 323 break; ··· 327 case UWB_RSV_STATE_O_PENDING: 328 uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_PENDING); 329 break; 330 case UWB_RSV_STATE_O_ESTABLISHED: 331 uwb_drp_avail_reserve(rsv->rc, &rsv->mas); 332 uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_ESTABLISHED); 333 uwb_rsv_callback(rsv); 334 break; 335 case UWB_RSV_STATE_T_ACCEPTED: 336 uwb_drp_avail_reserve(rsv->rc, &rsv->mas); 337 uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_ACCEPTED); 338 uwb_rsv_callback(rsv); ··· 373 case UWB_RSV_STATE_T_DENIED: 374 uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_DENIED); 375 break; 376 default: 377 dev_err(&rsv->rc->uwb_dev.dev, "unhandled state: %s (%d)\n", 378 uwb_rsv_state_str(new_state), new_state); 379 } 380 } 381 382 static struct uwb_rsv *uwb_rsv_alloc(struct uwb_rc *rc) ··· 459 460 INIT_LIST_HEAD(&rsv->rc_node); 461 INIT_LIST_HEAD(&rsv->pal_node); 462 init_timer(&rsv->timer); 463 rsv->timer.function = uwb_rsv_timer; 464 rsv->timer.data = (unsigned long)rsv; 465 466 rsv->rc = rc; 467 468 return rsv; 469 - } 470 - 471 - static void uwb_rsv_free(struct uwb_rsv *rsv) 472 - { 473 - uwb_dev_put(rsv->owner); 474 - if (rsv->target.type == UWB_RSV_TARGET_DEV) 475 - uwb_dev_put(rsv->target.dev); 476 - kfree(rsv); 477 } 478 479 /** ··· 500 501 void uwb_rsv_remove(struct uwb_rsv *rsv) 502 { 503 if (rsv->state != UWB_RSV_STATE_NONE) 504 uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); 505 del_timer_sync(&rsv->timer); 506 - list_del(&rsv->rc_node); 507 - uwb_rsv_free(rsv); 508 } 509 510 /** 511 * uwb_rsv_destroy - free a UWB reservation structure 512 * @rsv: the reservation to free 513 * 514 - * The reservation will be terminated if it is pending or established. 515 */ 516 void uwb_rsv_destroy(struct uwb_rsv *rsv) 517 { 518 - struct uwb_rc *rc = rsv->rc; 519 - 520 - mutex_lock(&rc->rsvs_mutex); 521 - uwb_rsv_remove(rsv); 522 - mutex_unlock(&rc->rsvs_mutex); 523 } 524 EXPORT_SYMBOL_GPL(uwb_rsv_destroy); 525 ··· 538 * @rsv: the reservation 539 * 540 * The PAL should fill in @rsv's owner, target, type, max_mas, 541 - * min_mas, sparsity and is_multicast fields. If the target is a 542 * uwb_dev it must be referenced. 543 * 544 * The reservation's callback will be called when the reservation is ··· 547 int uwb_rsv_establish(struct uwb_rsv *rsv) 548 { 549 struct uwb_rc *rc = rsv->rc; 550 int ret; 551 552 mutex_lock(&rc->rsvs_mutex); 553 - 554 ret = uwb_rsv_get_stream(rsv); 555 if (ret) 556 goto out; 557 558 - ret = uwb_rsv_alloc_mas(rsv); 559 - if (ret) { 560 uwb_rsv_put_stream(rsv); 561 goto out; 562 } 563 564 list_add_tail(&rsv->rc_node, &rc->reservations); 565 rsv->owner = &rc->uwb_dev; 566 uwb_dev_get(rsv->owner); ··· 588 * @rsv: the reservation to modify 589 * @max_mas: new maximum MAS to reserve 590 * @min_mas: new minimum MAS to reserve 591 - * @sparsity: new sparsity to use 592 * 593 * FIXME: implement this once there are PALs that use it. 594 */ 595 - int uwb_rsv_modify(struct uwb_rsv *rsv, int max_mas, int min_mas, int sparsity) 596 { 597 return -ENOSYS; 598 } 599 EXPORT_SYMBOL_GPL(uwb_rsv_modify); 600 601 /** 602 * uwb_rsv_terminate - terminate an established reservation ··· 669 670 mutex_lock(&rc->rsvs_mutex); 671 672 - uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); 673 674 mutex_unlock(&rc->rsvs_mutex); 675 } ··· 684 * 685 * Reservation requests from peers are denied unless a PAL accepts it 686 * by calling this function. 687 */ 688 void uwb_rsv_accept(struct uwb_rsv *rsv, uwb_rsv_cb_f cb, void *pal_priv) 689 { 690 rsv->callback = cb; 691 rsv->pal_priv = pal_priv; 692 rsv->state = UWB_RSV_STATE_T_ACCEPTED; ··· 742 uwb_dev_get(rsv->owner); 743 rsv->target.type = UWB_RSV_TARGET_DEV; 744 rsv->target.dev = &rc->uwb_dev; 745 rsv->type = uwb_ie_drp_type(drp_ie); 746 rsv->stream = uwb_ie_drp_stream_index(drp_ie); 747 - set_bit(rsv->stream, rsv->owner->streams); 748 uwb_drp_ie_to_bm(&rsv->mas, drp_ie); 749 750 /* ··· 752 * deny the request. 753 */ 754 rsv->state = UWB_RSV_STATE_T_DENIED; 755 - spin_lock(&rc->pal_lock); 756 list_for_each_entry(pal, &rc->pals, node) { 757 if (pal->new_rsv) 758 - pal->new_rsv(rsv); 759 if (rsv->state == UWB_RSV_STATE_T_ACCEPTED) 760 break; 761 } 762 - spin_unlock(&rc->pal_lock); 763 764 list_add_tail(&rsv->rc_node, &rc->reservations); 765 state = rsv->state; 766 rsv->state = UWB_RSV_STATE_NONE; 767 - uwb_rsv_set_state(rsv, state); 768 769 return rsv; 770 } 771 772 /** 773 * uwb_rsv_find - find a reservation for a received DRP IE. ··· 830 bool ie_updated = false; 831 832 list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) { 833 - if (rsv->expired) 834 - uwb_drp_handle_timeout(rsv); 835 if (!rsv->ie_valid) { 836 uwb_drp_ie_update(rsv); 837 ie_updated = true; ··· 839 return ie_updated; 840 } 841 842 void uwb_rsv_sched_update(struct uwb_rc *rc) 843 { 844 - queue_work(rc->rsv_workq, &rc->rsv_update_work); 845 } 846 847 /* ··· 888 */ 889 static void uwb_rsv_update_work(struct work_struct *work) 890 { 891 - struct uwb_rc *rc = container_of(work, struct uwb_rc, rsv_update_work); 892 bool ie_updated; 893 894 mutex_lock(&rc->rsvs_mutex); ··· 901 ie_updated = true; 902 } 903 904 - if (ie_updated) 905 uwb_rc_send_all_drp_ie(rc); 906 907 mutex_unlock(&rc->rsvs_mutex); 908 } ··· 928 { 929 struct uwb_rsv *rsv = (struct uwb_rsv *)arg; 930 931 - rsv->expired = true; 932 - uwb_rsv_sched_update(rsv->rc); 933 } 934 935 void uwb_rsv_init(struct uwb_rc *rc) 936 { 937 INIT_LIST_HEAD(&rc->reservations); 938 mutex_init(&rc->rsvs_mutex); 939 - INIT_WORK(&rc->rsv_update_work, uwb_rsv_update_work); 940 941 bitmap_complement(rc->uwb_dev.streams, rc->uwb_dev.streams, UWB_NUM_STREAMS); 942 } ··· 984 985 void uwb_rsv_cleanup(struct uwb_rc *rc) 986 { 987 - struct uwb_rsv *rsv, *t; 988 - 989 - mutex_lock(&rc->rsvs_mutex); 990 - list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) { 991 - uwb_rsv_remove(rsv); 992 - } 993 - mutex_unlock(&rc->rsvs_mutex); 994 - 995 - cancel_work_sync(&rc->rsv_update_work); 996 destroy_workqueue(rc->rsv_workq); 997 }
··· 15 * You should have received a copy of the GNU General Public License 16 * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 */ 18 #include <linux/kernel.h> 19 #include <linux/uwb.h> 20 + #include <linux/random.h> 21 22 #include "uwb-internal.h" 23 24 static void uwb_rsv_timer(unsigned long arg); 25 26 static const char *rsv_states[] = { 27 + [UWB_RSV_STATE_NONE] = "none ", 28 + [UWB_RSV_STATE_O_INITIATED] = "o initiated ", 29 + [UWB_RSV_STATE_O_PENDING] = "o pending ", 30 + [UWB_RSV_STATE_O_MODIFIED] = "o modified ", 31 + [UWB_RSV_STATE_O_ESTABLISHED] = "o established ", 32 + [UWB_RSV_STATE_O_TO_BE_MOVED] = "o to be moved ", 33 + [UWB_RSV_STATE_O_MOVE_EXPANDING] = "o move expanding", 34 + [UWB_RSV_STATE_O_MOVE_COMBINING] = "o move combining", 35 + [UWB_RSV_STATE_O_MOVE_REDUCING] = "o move reducing ", 36 + [UWB_RSV_STATE_T_ACCEPTED] = "t accepted ", 37 + [UWB_RSV_STATE_T_CONFLICT] = "t conflict ", 38 + [UWB_RSV_STATE_T_PENDING] = "t pending ", 39 + [UWB_RSV_STATE_T_DENIED] = "t denied ", 40 + [UWB_RSV_STATE_T_RESIZED] = "t resized ", 41 + [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = "t expanding acc ", 42 + [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = "t expanding conf", 43 + [UWB_RSV_STATE_T_EXPANDING_PENDING] = "t expanding pend", 44 + [UWB_RSV_STATE_T_EXPANDING_DENIED] = "t expanding den ", 45 }; 46 47 static const char *rsv_types[] = { ··· 41 [UWB_DRP_TYPE_PRIVATE] = "private", 42 [UWB_DRP_TYPE_PCA] = "pca", 43 }; 44 + 45 + bool uwb_rsv_has_two_drp_ies(struct uwb_rsv *rsv) 46 + { 47 + static const bool has_two_drp_ies[] = { 48 + [UWB_RSV_STATE_O_INITIATED] = false, 49 + [UWB_RSV_STATE_O_PENDING] = false, 50 + [UWB_RSV_STATE_O_MODIFIED] = false, 51 + [UWB_RSV_STATE_O_ESTABLISHED] = false, 52 + [UWB_RSV_STATE_O_TO_BE_MOVED] = false, 53 + [UWB_RSV_STATE_O_MOVE_COMBINING] = false, 54 + [UWB_RSV_STATE_O_MOVE_REDUCING] = false, 55 + [UWB_RSV_STATE_O_MOVE_EXPANDING] = true, 56 + [UWB_RSV_STATE_T_ACCEPTED] = false, 57 + [UWB_RSV_STATE_T_CONFLICT] = false, 58 + [UWB_RSV_STATE_T_PENDING] = false, 59 + [UWB_RSV_STATE_T_DENIED] = false, 60 + [UWB_RSV_STATE_T_RESIZED] = false, 61 + [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = true, 62 + [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = true, 63 + [UWB_RSV_STATE_T_EXPANDING_PENDING] = true, 64 + [UWB_RSV_STATE_T_EXPANDING_DENIED] = true, 65 + }; 66 + 67 + return has_two_drp_ies[rsv->state]; 68 + } 69 70 /** 71 * uwb_rsv_state_str - return a string for a reservation state ··· 66 } 67 EXPORT_SYMBOL_GPL(uwb_rsv_type_str); 68 69 + void uwb_rsv_dump(char *text, struct uwb_rsv *rsv) 70 { 71 struct device *dev = &rsv->rc->uwb_dev.dev; 72 struct uwb_dev_addr devaddr; ··· 82 dev_dbg(dev, "rsv %s -> %s: %s\n", owner, target, uwb_rsv_state_str(rsv->state)); 83 } 84 85 + static void uwb_rsv_release(struct kref *kref) 86 + { 87 + struct uwb_rsv *rsv = container_of(kref, struct uwb_rsv, kref); 88 + 89 + kfree(rsv); 90 + } 91 + 92 + void uwb_rsv_get(struct uwb_rsv *rsv) 93 + { 94 + kref_get(&rsv->kref); 95 + } 96 + 97 + void uwb_rsv_put(struct uwb_rsv *rsv) 98 + { 99 + kref_put(&rsv->kref, uwb_rsv_release); 100 + } 101 + 102 /* 103 * Get a free stream index for a reservation. 104 * ··· 92 static int uwb_rsv_get_stream(struct uwb_rsv *rsv) 93 { 94 struct uwb_rc *rc = rsv->rc; 95 + struct device *dev = &rc->uwb_dev.dev; 96 unsigned long *streams_bm; 97 int stream; 98 ··· 113 rsv->stream = stream; 114 set_bit(stream, streams_bm); 115 116 + dev_dbg(dev, "get stream %d\n", rsv->stream); 117 + 118 return 0; 119 } 120 121 static void uwb_rsv_put_stream(struct uwb_rsv *rsv) 122 { 123 struct uwb_rc *rc = rsv->rc; 124 + struct device *dev = &rc->uwb_dev.dev; 125 unsigned long *streams_bm; 126 127 switch (rsv->target.type) { ··· 133 } 134 135 clear_bit(rsv->stream, streams_bm); 136 + 137 + dev_dbg(dev, "put stream %d\n", rsv->stream); 138 } 139 140 + void uwb_rsv_backoff_win_timer(unsigned long arg) 141 { 142 + struct uwb_drp_backoff_win *bow = (struct uwb_drp_backoff_win *)arg; 143 + struct uwb_rc *rc = container_of(bow, struct uwb_rc, bow); 144 + struct device *dev = &rc->uwb_dev.dev; 145 146 + bow->can_reserve_extra_mases = true; 147 + if (bow->total_expired <= 4) { 148 + bow->total_expired++; 149 + } else { 150 + /* after 4 backoff window has expired we can exit from 151 + * the backoff procedure */ 152 + bow->total_expired = 0; 153 + bow->window = UWB_DRP_BACKOFF_WIN_MIN >> 1; 154 } 155 + dev_dbg(dev, "backoff_win_timer total_expired=%d, n=%d\n: ", bow->total_expired, bow->n); 156 + 157 + /* try to relocate all the "to be moved" relocations */ 158 + uwb_rsv_handle_drp_avail_change(rc); 159 } 160 161 + void uwb_rsv_backoff_win_increment(struct uwb_rc *rc) 162 { 163 + struct uwb_drp_backoff_win *bow = &rc->bow; 164 + struct device *dev = &rc->uwb_dev.dev; 165 + unsigned timeout_us; 166 167 + dev_dbg(dev, "backoff_win_increment: window=%d\n", bow->window); 168 169 + bow->can_reserve_extra_mases = false; 170 171 + if((bow->window << 1) == UWB_DRP_BACKOFF_WIN_MAX) 172 + return; 173 174 + bow->window <<= 1; 175 + bow->n = random32() & (bow->window - 1); 176 + dev_dbg(dev, "new_window=%d, n=%d\n: ", bow->window, bow->n); 177 178 + /* reset the timer associated variables */ 179 + timeout_us = bow->n * UWB_SUPERFRAME_LENGTH_US; 180 + bow->total_expired = 0; 181 + mod_timer(&bow->timer, jiffies + usecs_to_jiffies(timeout_us)); 182 } 183 184 static void uwb_rsv_stroke_timer(struct uwb_rsv *rsv) ··· 225 * received. 226 */ 227 if (rsv->is_multicast) { 228 + if (rsv->state == UWB_RSV_STATE_O_INITIATED 229 + || rsv->state == UWB_RSV_STATE_O_MOVE_EXPANDING 230 + || rsv->state == UWB_RSV_STATE_O_MOVE_COMBINING 231 + || rsv->state == UWB_RSV_STATE_O_MOVE_REDUCING) 232 sframes = 1; 233 if (rsv->state == UWB_RSV_STATE_O_ESTABLISHED) 234 sframes = 0; 235 + 236 } 237 238 if (sframes > 0) { 239 /* 240 * Add an additional 2 superframes to account for the ··· 253 rsv->state = new_state; 254 rsv->ie_valid = false; 255 256 + uwb_rsv_dump("SU", rsv); 257 258 uwb_rsv_stroke_timer(rsv); 259 uwb_rsv_sched_update(rsv->rc); ··· 267 268 void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state) 269 { 270 + struct uwb_rsv_move *mv = &rsv->mv; 271 + 272 if (rsv->state == new_state) { 273 switch (rsv->state) { 274 case UWB_RSV_STATE_O_ESTABLISHED: 275 + case UWB_RSV_STATE_O_MOVE_EXPANDING: 276 + case UWB_RSV_STATE_O_MOVE_COMBINING: 277 + case UWB_RSV_STATE_O_MOVE_REDUCING: 278 case UWB_RSV_STATE_T_ACCEPTED: 279 + case UWB_RSV_STATE_T_EXPANDING_ACCEPTED: 280 + case UWB_RSV_STATE_T_RESIZED: 281 case UWB_RSV_STATE_NONE: 282 uwb_rsv_stroke_timer(rsv); 283 break; ··· 282 return; 283 } 284 285 + uwb_rsv_dump("SC", rsv); 286 + 287 switch (new_state) { 288 case UWB_RSV_STATE_NONE: 289 uwb_rsv_state_update(rsv, UWB_RSV_STATE_NONE); 290 uwb_rsv_callback(rsv); 291 break; ··· 295 case UWB_RSV_STATE_O_PENDING: 296 uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_PENDING); 297 break; 298 + case UWB_RSV_STATE_O_MODIFIED: 299 + /* in the companion there are the MASes to drop */ 300 + bitmap_andnot(rsv->mas.bm, rsv->mas.bm, mv->companion_mas.bm, UWB_NUM_MAS); 301 + uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MODIFIED); 302 + break; 303 case UWB_RSV_STATE_O_ESTABLISHED: 304 + if (rsv->state == UWB_RSV_STATE_O_MODIFIED 305 + || rsv->state == UWB_RSV_STATE_O_MOVE_REDUCING) { 306 + uwb_drp_avail_release(rsv->rc, &mv->companion_mas); 307 + rsv->needs_release_companion_mas = false; 308 + } 309 uwb_drp_avail_reserve(rsv->rc, &rsv->mas); 310 uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_ESTABLISHED); 311 uwb_rsv_callback(rsv); 312 break; 313 + case UWB_RSV_STATE_O_MOVE_EXPANDING: 314 + rsv->needs_release_companion_mas = true; 315 + uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MOVE_EXPANDING); 316 + break; 317 + case UWB_RSV_STATE_O_MOVE_COMBINING: 318 + rsv->needs_release_companion_mas = false; 319 + uwb_drp_avail_reserve(rsv->rc, &mv->companion_mas); 320 + bitmap_or(rsv->mas.bm, rsv->mas.bm, mv->companion_mas.bm, UWB_NUM_MAS); 321 + rsv->mas.safe += mv->companion_mas.safe; 322 + rsv->mas.unsafe += mv->companion_mas.unsafe; 323 + uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MOVE_COMBINING); 324 + break; 325 + case UWB_RSV_STATE_O_MOVE_REDUCING: 326 + bitmap_andnot(mv->companion_mas.bm, rsv->mas.bm, mv->final_mas.bm, UWB_NUM_MAS); 327 + rsv->needs_release_companion_mas = true; 328 + rsv->mas.safe = mv->final_mas.safe; 329 + rsv->mas.unsafe = mv->final_mas.unsafe; 330 + bitmap_copy(rsv->mas.bm, mv->final_mas.bm, UWB_NUM_MAS); 331 + bitmap_copy(rsv->mas.unsafe_bm, mv->final_mas.unsafe_bm, UWB_NUM_MAS); 332 + uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MOVE_REDUCING); 333 + break; 334 case UWB_RSV_STATE_T_ACCEPTED: 335 + case UWB_RSV_STATE_T_RESIZED: 336 + rsv->needs_release_companion_mas = false; 337 uwb_drp_avail_reserve(rsv->rc, &rsv->mas); 338 uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_ACCEPTED); 339 uwb_rsv_callback(rsv); ··· 308 case UWB_RSV_STATE_T_DENIED: 309 uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_DENIED); 310 break; 311 + case UWB_RSV_STATE_T_CONFLICT: 312 + uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_CONFLICT); 313 + break; 314 + case UWB_RSV_STATE_T_PENDING: 315 + uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_PENDING); 316 + break; 317 + case UWB_RSV_STATE_T_EXPANDING_ACCEPTED: 318 + rsv->needs_release_companion_mas = true; 319 + uwb_drp_avail_reserve(rsv->rc, &mv->companion_mas); 320 + uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_EXPANDING_ACCEPTED); 321 + break; 322 default: 323 dev_err(&rsv->rc->uwb_dev.dev, "unhandled state: %s (%d)\n", 324 uwb_rsv_state_str(new_state), new_state); 325 } 326 + } 327 + 328 + static void uwb_rsv_handle_timeout_work(struct work_struct *work) 329 + { 330 + struct uwb_rsv *rsv = container_of(work, struct uwb_rsv, 331 + handle_timeout_work); 332 + struct uwb_rc *rc = rsv->rc; 333 + 334 + mutex_lock(&rc->rsvs_mutex); 335 + 336 + uwb_rsv_dump("TO", rsv); 337 + 338 + switch (rsv->state) { 339 + case UWB_RSV_STATE_O_INITIATED: 340 + if (rsv->is_multicast) { 341 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); 342 + goto unlock; 343 + } 344 + break; 345 + case UWB_RSV_STATE_O_MOVE_EXPANDING: 346 + if (rsv->is_multicast) { 347 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_COMBINING); 348 + goto unlock; 349 + } 350 + break; 351 + case UWB_RSV_STATE_O_MOVE_COMBINING: 352 + if (rsv->is_multicast) { 353 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_REDUCING); 354 + goto unlock; 355 + } 356 + break; 357 + case UWB_RSV_STATE_O_MOVE_REDUCING: 358 + if (rsv->is_multicast) { 359 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); 360 + goto unlock; 361 + } 362 + break; 363 + case UWB_RSV_STATE_O_ESTABLISHED: 364 + if (rsv->is_multicast) 365 + goto unlock; 366 + break; 367 + case UWB_RSV_STATE_T_EXPANDING_ACCEPTED: 368 + /* 369 + * The time out could be for the main or of the 370 + * companion DRP, assume it's for the companion and 371 + * drop that first. A further time out is required to 372 + * drop the main. 373 + */ 374 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED); 375 + uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas); 376 + goto unlock; 377 + default: 378 + break; 379 + } 380 + 381 + uwb_rsv_remove(rsv); 382 + 383 + unlock: 384 + mutex_unlock(&rc->rsvs_mutex); 385 } 386 387 static struct uwb_rsv *uwb_rsv_alloc(struct uwb_rc *rc) ··· 324 325 INIT_LIST_HEAD(&rsv->rc_node); 326 INIT_LIST_HEAD(&rsv->pal_node); 327 + kref_init(&rsv->kref); 328 init_timer(&rsv->timer); 329 rsv->timer.function = uwb_rsv_timer; 330 rsv->timer.data = (unsigned long)rsv; 331 332 rsv->rc = rc; 333 + INIT_WORK(&rsv->handle_timeout_work, uwb_rsv_handle_timeout_work); 334 335 return rsv; 336 } 337 338 /** ··· 371 372 void uwb_rsv_remove(struct uwb_rsv *rsv) 373 { 374 + uwb_rsv_dump("RM", rsv); 375 + 376 if (rsv->state != UWB_RSV_STATE_NONE) 377 uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); 378 + 379 + if (rsv->needs_release_companion_mas) 380 + uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas); 381 + uwb_drp_avail_release(rsv->rc, &rsv->mas); 382 + 383 + if (uwb_rsv_is_owner(rsv)) 384 + uwb_rsv_put_stream(rsv); 385 + 386 del_timer_sync(&rsv->timer); 387 + uwb_dev_put(rsv->owner); 388 + if (rsv->target.type == UWB_RSV_TARGET_DEV) 389 + uwb_dev_put(rsv->target.dev); 390 + 391 + list_del_init(&rsv->rc_node); 392 + uwb_rsv_put(rsv); 393 } 394 395 /** 396 * uwb_rsv_destroy - free a UWB reservation structure 397 * @rsv: the reservation to free 398 * 399 + * The reservation must already be terminated. 400 */ 401 void uwb_rsv_destroy(struct uwb_rsv *rsv) 402 { 403 + uwb_rsv_put(rsv); 404 } 405 EXPORT_SYMBOL_GPL(uwb_rsv_destroy); 406 ··· 399 * @rsv: the reservation 400 * 401 * The PAL should fill in @rsv's owner, target, type, max_mas, 402 + * min_mas, max_interval and is_multicast fields. If the target is a 403 * uwb_dev it must be referenced. 404 * 405 * The reservation's callback will be called when the reservation is ··· 408 int uwb_rsv_establish(struct uwb_rsv *rsv) 409 { 410 struct uwb_rc *rc = rsv->rc; 411 + struct uwb_mas_bm available; 412 int ret; 413 414 mutex_lock(&rc->rsvs_mutex); 415 ret = uwb_rsv_get_stream(rsv); 416 if (ret) 417 goto out; 418 419 + rsv->tiebreaker = random32() & 1; 420 + /* get available mas bitmap */ 421 + uwb_drp_available(rc, &available); 422 + 423 + ret = uwb_rsv_find_best_allocation(rsv, &available, &rsv->mas); 424 + if (ret == UWB_RSV_ALLOC_NOT_FOUND) { 425 + ret = -EBUSY; 426 uwb_rsv_put_stream(rsv); 427 goto out; 428 } 429 430 + ret = uwb_drp_avail_reserve_pending(rc, &rsv->mas); 431 + if (ret != 0) { 432 + uwb_rsv_put_stream(rsv); 433 + goto out; 434 + } 435 + 436 + uwb_rsv_get(rsv); 437 list_add_tail(&rsv->rc_node, &rc->reservations); 438 rsv->owner = &rc->uwb_dev; 439 uwb_dev_get(rsv->owner); ··· 437 * @rsv: the reservation to modify 438 * @max_mas: new maximum MAS to reserve 439 * @min_mas: new minimum MAS to reserve 440 + * @max_interval: new max_interval to use 441 * 442 * FIXME: implement this once there are PALs that use it. 443 */ 444 + int uwb_rsv_modify(struct uwb_rsv *rsv, int max_mas, int min_mas, int max_interval) 445 { 446 return -ENOSYS; 447 } 448 EXPORT_SYMBOL_GPL(uwb_rsv_modify); 449 + 450 + /* 451 + * move an already established reservation (rc->rsvs_mutex must to be 452 + * taken when tis function is called) 453 + */ 454 + int uwb_rsv_try_move(struct uwb_rsv *rsv, struct uwb_mas_bm *available) 455 + { 456 + struct uwb_rc *rc = rsv->rc; 457 + struct uwb_drp_backoff_win *bow = &rc->bow; 458 + struct device *dev = &rc->uwb_dev.dev; 459 + struct uwb_rsv_move *mv; 460 + int ret = 0; 461 + 462 + if (bow->can_reserve_extra_mases == false) 463 + return -EBUSY; 464 + 465 + mv = &rsv->mv; 466 + 467 + if (uwb_rsv_find_best_allocation(rsv, available, &mv->final_mas) == UWB_RSV_ALLOC_FOUND) { 468 + 469 + if (!bitmap_equal(rsv->mas.bm, mv->final_mas.bm, UWB_NUM_MAS)) { 470 + /* We want to move the reservation */ 471 + bitmap_andnot(mv->companion_mas.bm, mv->final_mas.bm, rsv->mas.bm, UWB_NUM_MAS); 472 + uwb_drp_avail_reserve_pending(rc, &mv->companion_mas); 473 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_EXPANDING); 474 + } 475 + } else { 476 + dev_dbg(dev, "new allocation not found\n"); 477 + } 478 + 479 + return ret; 480 + } 481 + 482 + /* It will try to move every reservation in state O_ESTABLISHED giving 483 + * to the MAS allocator algorithm an availability that is the real one 484 + * plus the allocation already established from the reservation. */ 485 + void uwb_rsv_handle_drp_avail_change(struct uwb_rc *rc) 486 + { 487 + struct uwb_drp_backoff_win *bow = &rc->bow; 488 + struct uwb_rsv *rsv; 489 + struct uwb_mas_bm mas; 490 + 491 + if (bow->can_reserve_extra_mases == false) 492 + return; 493 + 494 + list_for_each_entry(rsv, &rc->reservations, rc_node) { 495 + if (rsv->state == UWB_RSV_STATE_O_ESTABLISHED || 496 + rsv->state == UWB_RSV_STATE_O_TO_BE_MOVED) { 497 + uwb_drp_available(rc, &mas); 498 + bitmap_or(mas.bm, mas.bm, rsv->mas.bm, UWB_NUM_MAS); 499 + uwb_rsv_try_move(rsv, &mas); 500 + } 501 + } 502 + 503 + } 504 505 /** 506 * uwb_rsv_terminate - terminate an established reservation ··· 463 464 mutex_lock(&rc->rsvs_mutex); 465 466 + if (rsv->state != UWB_RSV_STATE_NONE) 467 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); 468 469 mutex_unlock(&rc->rsvs_mutex); 470 } ··· 477 * 478 * Reservation requests from peers are denied unless a PAL accepts it 479 * by calling this function. 480 + * 481 + * The PAL call uwb_rsv_destroy() for all accepted reservations before 482 + * calling uwb_pal_unregister(). 483 */ 484 void uwb_rsv_accept(struct uwb_rsv *rsv, uwb_rsv_cb_f cb, void *pal_priv) 485 { 486 + uwb_rsv_get(rsv); 487 + 488 rsv->callback = cb; 489 rsv->pal_priv = pal_priv; 490 rsv->state = UWB_RSV_STATE_T_ACCEPTED; ··· 530 uwb_dev_get(rsv->owner); 531 rsv->target.type = UWB_RSV_TARGET_DEV; 532 rsv->target.dev = &rc->uwb_dev; 533 + uwb_dev_get(&rc->uwb_dev); 534 rsv->type = uwb_ie_drp_type(drp_ie); 535 rsv->stream = uwb_ie_drp_stream_index(drp_ie); 536 uwb_drp_ie_to_bm(&rsv->mas, drp_ie); 537 538 /* ··· 540 * deny the request. 541 */ 542 rsv->state = UWB_RSV_STATE_T_DENIED; 543 + mutex_lock(&rc->uwb_dev.mutex); 544 list_for_each_entry(pal, &rc->pals, node) { 545 if (pal->new_rsv) 546 + pal->new_rsv(pal, rsv); 547 if (rsv->state == UWB_RSV_STATE_T_ACCEPTED) 548 break; 549 } 550 + mutex_unlock(&rc->uwb_dev.mutex); 551 552 list_add_tail(&rsv->rc_node, &rc->reservations); 553 state = rsv->state; 554 rsv->state = UWB_RSV_STATE_NONE; 555 + 556 + /* FIXME: do something sensible here */ 557 + if (state == UWB_RSV_STATE_T_ACCEPTED 558 + && uwb_drp_avail_reserve_pending(rc, &rsv->mas) == -EBUSY) { 559 + /* FIXME: do something sensible here */ 560 + } else { 561 + uwb_rsv_set_state(rsv, state); 562 + } 563 564 return rsv; 565 } 566 + 567 + /** 568 + * uwb_rsv_get_usable_mas - get the bitmap of the usable MAS of a reservations 569 + * @rsv: the reservation. 570 + * @mas: returns the available MAS. 571 + * 572 + * The usable MAS of a reservation may be less than the negotiated MAS 573 + * if alien BPs are present. 574 + */ 575 + void uwb_rsv_get_usable_mas(struct uwb_rsv *rsv, struct uwb_mas_bm *mas) 576 + { 577 + bitmap_zero(mas->bm, UWB_NUM_MAS); 578 + bitmap_andnot(mas->bm, rsv->mas.bm, rsv->rc->cnflt_alien_bitmap.bm, UWB_NUM_MAS); 579 + } 580 + EXPORT_SYMBOL_GPL(uwb_rsv_get_usable_mas); 581 582 /** 583 * uwb_rsv_find - find a reservation for a received DRP IE. ··· 596 bool ie_updated = false; 597 598 list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) { 599 if (!rsv->ie_valid) { 600 uwb_drp_ie_update(rsv); 601 ie_updated = true; ··· 607 return ie_updated; 608 } 609 610 + void uwb_rsv_queue_update(struct uwb_rc *rc) 611 + { 612 + unsigned long delay_us = UWB_MAS_LENGTH_US * UWB_MAS_PER_ZONE; 613 + 614 + queue_delayed_work(rc->rsv_workq, &rc->rsv_update_work, usecs_to_jiffies(delay_us)); 615 + } 616 + 617 + /** 618 + * uwb_rsv_sched_update - schedule an update of the DRP IEs 619 + * @rc: the radio controller. 620 + * 621 + * To improve performance and ensure correctness with [ECMA-368] the 622 + * number of SET-DRP-IE commands that are done are limited. 623 + * 624 + * DRP IEs update come from two sources: DRP events from the hardware 625 + * which all occur at the beginning of the superframe ('syncronous' 626 + * events) and reservation establishment/termination requests from 627 + * PALs or timers ('asynchronous' events). 628 + * 629 + * A delayed work ensures that all the synchronous events result in 630 + * one SET-DRP-IE command. 631 + * 632 + * Additional logic (the set_drp_ie_pending and rsv_updated_postponed 633 + * flags) will prevent an asynchrous event starting a SET-DRP-IE 634 + * command if one is currently awaiting a response. 635 + * 636 + * FIXME: this does leave a window where an asynchrous event can delay 637 + * the SET-DRP-IE for a synchronous event by one superframe. 638 + */ 639 void uwb_rsv_sched_update(struct uwb_rc *rc) 640 { 641 + spin_lock(&rc->rsvs_lock); 642 + if (!delayed_work_pending(&rc->rsv_update_work)) { 643 + if (rc->set_drp_ie_pending > 0) { 644 + rc->set_drp_ie_pending++; 645 + goto unlock; 646 + } 647 + uwb_rsv_queue_update(rc); 648 + } 649 + unlock: 650 + spin_unlock(&rc->rsvs_lock); 651 } 652 653 /* ··· 618 */ 619 static void uwb_rsv_update_work(struct work_struct *work) 620 { 621 + struct uwb_rc *rc = container_of(work, struct uwb_rc, 622 + rsv_update_work.work); 623 bool ie_updated; 624 625 mutex_lock(&rc->rsvs_mutex); ··· 630 ie_updated = true; 631 } 632 633 + if (ie_updated && (rc->set_drp_ie_pending == 0)) 634 uwb_rc_send_all_drp_ie(rc); 635 + 636 + mutex_unlock(&rc->rsvs_mutex); 637 + } 638 + 639 + static void uwb_rsv_alien_bp_work(struct work_struct *work) 640 + { 641 + struct uwb_rc *rc = container_of(work, struct uwb_rc, 642 + rsv_alien_bp_work.work); 643 + struct uwb_rsv *rsv; 644 + 645 + mutex_lock(&rc->rsvs_mutex); 646 + 647 + list_for_each_entry(rsv, &rc->reservations, rc_node) { 648 + if (rsv->type != UWB_DRP_TYPE_ALIEN_BP) { 649 + rsv->callback(rsv); 650 + } 651 + } 652 653 mutex_unlock(&rc->rsvs_mutex); 654 } ··· 640 { 641 struct uwb_rsv *rsv = (struct uwb_rsv *)arg; 642 643 + queue_work(rsv->rc->rsv_workq, &rsv->handle_timeout_work); 644 + } 645 + 646 + /** 647 + * uwb_rsv_remove_all - remove all reservations 648 + * @rc: the radio controller 649 + * 650 + * A DRP IE update is not done. 651 + */ 652 + void uwb_rsv_remove_all(struct uwb_rc *rc) 653 + { 654 + struct uwb_rsv *rsv, *t; 655 + 656 + mutex_lock(&rc->rsvs_mutex); 657 + list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) { 658 + uwb_rsv_remove(rsv); 659 + } 660 + /* Cancel any postponed update. */ 661 + rc->set_drp_ie_pending = 0; 662 + mutex_unlock(&rc->rsvs_mutex); 663 + 664 + cancel_delayed_work_sync(&rc->rsv_update_work); 665 } 666 667 void uwb_rsv_init(struct uwb_rc *rc) 668 { 669 INIT_LIST_HEAD(&rc->reservations); 670 + INIT_LIST_HEAD(&rc->cnflt_alien_list); 671 mutex_init(&rc->rsvs_mutex); 672 + spin_lock_init(&rc->rsvs_lock); 673 + INIT_DELAYED_WORK(&rc->rsv_update_work, uwb_rsv_update_work); 674 + INIT_DELAYED_WORK(&rc->rsv_alien_bp_work, uwb_rsv_alien_bp_work); 675 + rc->bow.can_reserve_extra_mases = true; 676 + rc->bow.total_expired = 0; 677 + rc->bow.window = UWB_DRP_BACKOFF_WIN_MIN >> 1; 678 + init_timer(&rc->bow.timer); 679 + rc->bow.timer.function = uwb_rsv_backoff_win_timer; 680 + rc->bow.timer.data = (unsigned long)&rc->bow; 681 682 bitmap_complement(rc->uwb_dev.streams, rc->uwb_dev.streams, UWB_NUM_STREAMS); 683 } ··· 667 668 void uwb_rsv_cleanup(struct uwb_rc *rc) 669 { 670 + uwb_rsv_remove_all(rc); 671 destroy_workqueue(rc->rsv_workq); 672 }
+43 -19
drivers/uwb/umc-bus.c
··· 11 #include <linux/uwb/umc.h> 12 #include <linux/pci.h> 13 14 - static int umc_bus_unbind_helper(struct device *dev, void *data) 15 { 16 - struct device *parent = data; 17 18 - if (dev->parent == parent && dev->driver) 19 - device_release_driver(dev); 20 - return 0; 21 } 22 23 /** 24 * umc_controller_reset - reset the whole UMC controller 25 * @umc: the UMC device for the radio controller. 26 * 27 - * Drivers will be unbound from all UMC devices belonging to the 28 - * controller and then the radio controller will be rebound. The 29 - * radio controller is expected to do a full hardware reset when it is 30 - * probed. 31 * 32 * If this is called while a probe() or remove() is in progress it 33 * will return -EAGAIN and not perform the reset. ··· 60 int umc_controller_reset(struct umc_dev *umc) 61 { 62 struct device *parent = umc->dev.parent; 63 - int ret; 64 65 - if (down_trylock(&parent->sem)) 66 return -EAGAIN; 67 - bus_for_each_dev(&umc_bus_type, NULL, parent, umc_bus_unbind_helper); 68 - ret = device_attach(&umc->dev); 69 - if (ret == 1) 70 - ret = 0; 71 up(&parent->sem); 72 73 return ret; ··· 99 if (!dev->driver) 100 ret = device_attach(dev); 101 102 - return ret < 0 ? ret : 0; 103 } 104 105 - static void umc_bus_rescan(void) 106 { 107 int err; 108 ··· 110 * We can't use bus_rescan_devices() here as it deadlocks when 111 * it tries to retake the dev->parent semaphore. 112 */ 113 - err = bus_for_each_dev(&umc_bus_type, NULL, NULL, umc_bus_rescan_helper); 114 if (err < 0) 115 printk(KERN_WARNING "%s: rescan of bus failed: %d\n", 116 KBUILD_MODNAME, err); ··· 144 if (err) 145 put_device(dev); 146 else 147 - umc_bus_rescan(); 148 149 return err; 150 }
··· 11 #include <linux/uwb/umc.h> 12 #include <linux/pci.h> 13 14 + static int umc_bus_pre_reset_helper(struct device *dev, void *data) 15 { 16 + int ret = 0; 17 18 + if (dev->driver) { 19 + struct umc_dev *umc = to_umc_dev(dev); 20 + struct umc_driver *umc_drv = to_umc_driver(dev->driver); 21 + 22 + if (umc_drv->pre_reset) 23 + ret = umc_drv->pre_reset(umc); 24 + else 25 + device_release_driver(dev); 26 + } 27 + return ret; 28 + } 29 + 30 + static int umc_bus_post_reset_helper(struct device *dev, void *data) 31 + { 32 + int ret = 0; 33 + 34 + if (dev->driver) { 35 + struct umc_dev *umc = to_umc_dev(dev); 36 + struct umc_driver *umc_drv = to_umc_driver(dev->driver); 37 + 38 + if (umc_drv->post_reset) 39 + ret = umc_drv->post_reset(umc); 40 + } else 41 + ret = device_attach(dev); 42 + 43 + return ret; 44 } 45 46 /** 47 * umc_controller_reset - reset the whole UMC controller 48 * @umc: the UMC device for the radio controller. 49 * 50 + * Drivers or all capabilities of the controller will have their 51 + * pre_reset methods called or be unbound from their device. Then all 52 + * post_reset methods will be called or the drivers will be rebound. 53 + * 54 + * Radio controllers must provide pre_reset and post_reset methods and 55 + * reset the hardware in their start method. 56 * 57 * If this is called while a probe() or remove() is in progress it 58 * will return -EAGAIN and not perform the reset. ··· 35 int umc_controller_reset(struct umc_dev *umc) 36 { 37 struct device *parent = umc->dev.parent; 38 + int ret = 0; 39 40 + if(down_trylock(&parent->sem)) 41 return -EAGAIN; 42 + ret = device_for_each_child(parent, parent, umc_bus_pre_reset_helper); 43 + if (ret >= 0) 44 + device_for_each_child(parent, parent, umc_bus_post_reset_helper); 45 up(&parent->sem); 46 47 return ret; ··· 75 if (!dev->driver) 76 ret = device_attach(dev); 77 78 + return ret; 79 } 80 81 + static void umc_bus_rescan(struct device *parent) 82 { 83 int err; 84 ··· 86 * We can't use bus_rescan_devices() here as it deadlocks when 87 * it tries to retake the dev->parent semaphore. 88 */ 89 + err = device_for_each_child(parent, NULL, umc_bus_rescan_helper); 90 if (err < 0) 91 printk(KERN_WARNING "%s: rescan of bus failed: %d\n", 92 KBUILD_MODNAME, err); ··· 120 if (err) 121 put_device(dev); 122 else 123 + umc_bus_rescan(dev->parent); 124 125 return err; 126 }
+1 -10
drivers/uwb/umc-dev.c
··· 7 */ 8 #include <linux/kernel.h> 9 #include <linux/uwb/umc.h> 10 - #define D_LOCAL 0 11 - #include <linux/uwb/debug.h> 12 13 static void umc_device_release(struct device *dev) 14 { ··· 29 30 umc = kzalloc(sizeof(struct umc_dev), GFP_KERNEL); 31 if (umc) { 32 - snprintf(umc->dev.bus_id, sizeof(umc->dev.bus_id), "%s-%d", 33 - parent->bus_id, n); 34 umc->dev.parent = parent; 35 umc->dev.bus = &umc_bus_type; 36 umc->dev.release = umc_device_release; ··· 51 { 52 int err; 53 54 - d_fnstart(3, &umc->dev, "(umc_dev %p)\n", umc); 55 - 56 err = request_resource(umc->resource.parent, &umc->resource); 57 if (err < 0) { 58 dev_err(&umc->dev, "can't allocate resource range " ··· 64 err = device_register(&umc->dev); 65 if (err < 0) 66 goto error_device_register; 67 - d_fnend(3, &umc->dev, "(umc_dev %p) = 0\n", umc); 68 return 0; 69 70 error_device_register: 71 release_resource(&umc->resource); 72 error_request_resource: 73 - d_fnend(3, &umc->dev, "(umc_dev %p) = %d\n", umc, err); 74 return err; 75 } 76 EXPORT_SYMBOL_GPL(umc_device_register); ··· 88 if (!umc) 89 return; 90 dev = get_device(&umc->dev); 91 - d_fnstart(3, dev, "(umc_dev %p)\n", umc); 92 device_unregister(&umc->dev); 93 release_resource(&umc->resource); 94 - d_fnend(3, dev, "(umc_dev %p) = void\n", umc); 95 put_device(dev); 96 } 97 EXPORT_SYMBOL_GPL(umc_device_unregister);
··· 7 */ 8 #include <linux/kernel.h> 9 #include <linux/uwb/umc.h> 10 11 static void umc_device_release(struct device *dev) 12 { ··· 31 32 umc = kzalloc(sizeof(struct umc_dev), GFP_KERNEL); 33 if (umc) { 34 + dev_set_name(&umc->dev, "%s-%d", dev_name(parent), n); 35 umc->dev.parent = parent; 36 umc->dev.bus = &umc_bus_type; 37 umc->dev.release = umc_device_release; ··· 54 { 55 int err; 56 57 err = request_resource(umc->resource.parent, &umc->resource); 58 if (err < 0) { 59 dev_err(&umc->dev, "can't allocate resource range " ··· 69 err = device_register(&umc->dev); 70 if (err < 0) 71 goto error_device_register; 72 return 0; 73 74 error_device_register: 75 release_resource(&umc->resource); 76 error_request_resource: 77 return err; 78 } 79 EXPORT_SYMBOL_GPL(umc_device_register); ··· 95 if (!umc) 96 return; 97 dev = get_device(&umc->dev); 98 device_unregister(&umc->dev); 99 release_resource(&umc->resource); 100 put_device(dev); 101 } 102 EXPORT_SYMBOL_GPL(umc_device_unregister);
+94 -55
drivers/uwb/uwb-debug.c
··· 4 * 5 * Copyright (C) 2005-2006 Intel Corporation 6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> 7 * 8 * This program is free software; you can redistribute it and/or 9 * modify it under the terms of the GNU General Public License version ··· 34 #include <linux/seq_file.h> 35 36 #include <linux/uwb/debug-cmd.h> 37 - #define D_LOCAL 0 38 - #include <linux/uwb/debug.h> 39 40 #include "uwb-internal.h" 41 - 42 - void dump_bytes(struct device *dev, const void *_buf, size_t rsize) 43 - { 44 - const char *buf = _buf; 45 - char line[32]; 46 - size_t offset = 0; 47 - int cnt, cnt2; 48 - for (cnt = 0; cnt < rsize; cnt += 8) { 49 - size_t rtop = rsize - cnt < 8 ? rsize - cnt : 8; 50 - for (offset = cnt2 = 0; cnt2 < rtop; cnt2++) { 51 - offset += scnprintf(line + offset, sizeof(line) - offset, 52 - "%02x ", buf[cnt + cnt2] & 0xff); 53 - } 54 - if (dev) 55 - dev_info(dev, "%s\n", line); 56 - else 57 - printk(KERN_INFO "%s\n", line); 58 - } 59 - } 60 - EXPORT_SYMBOL_GPL(dump_bytes); 61 62 /* 63 * Debug interface ··· 63 struct dentry *reservations_f; 64 struct dentry *accept_f; 65 struct dentry *drp_avail_f; 66 }; 67 68 static struct dentry *root_dir; 69 70 static void uwb_dbg_rsv_cb(struct uwb_rsv *rsv) 71 { 72 - struct uwb_rc *rc = rsv->rc; 73 - struct device *dev = &rc->uwb_dev.dev; 74 - struct uwb_dev_addr devaddr; 75 - char owner[UWB_ADDR_STRSIZE], target[UWB_ADDR_STRSIZE]; 76 77 - uwb_dev_addr_print(owner, sizeof(owner), &rsv->owner->dev_addr); 78 - if (rsv->target.type == UWB_RSV_TARGET_DEV) 79 - devaddr = rsv->target.dev->dev_addr; 80 - else 81 - devaddr = rsv->target.devaddr; 82 - uwb_dev_addr_print(target, sizeof(target), &devaddr); 83 84 - dev_dbg(dev, "debug: rsv %s -> %s: %s\n", 85 - owner, target, uwb_rsv_state_str(rsv->state)); 86 } 87 88 static int cmd_rsv_establish(struct uwb_rc *rc, ··· 95 if (target == NULL) 96 return -ENODEV; 97 98 - rsv = uwb_rsv_create(rc, uwb_dbg_rsv_cb, NULL); 99 if (rsv == NULL) { 100 uwb_dev_put(target); 101 return -ENOMEM; 102 } 103 104 - rsv->owner = &rc->uwb_dev; 105 - rsv->target.type = UWB_RSV_TARGET_DEV; 106 - rsv->target.dev = target; 107 - rsv->type = cmd->type; 108 - rsv->max_mas = cmd->max_mas; 109 - rsv->min_mas = cmd->min_mas; 110 - rsv->sparsity = cmd->sparsity; 111 112 ret = uwb_rsv_establish(rsv); 113 if (ret) 114 uwb_rsv_destroy(rsv); 115 - else 116 list_add_tail(&rsv->pal_node, &rc->dbg->rsvs); 117 - 118 return ret; 119 } 120 ··· 125 struct uwb_rsv *rsv, *found = NULL; 126 int i = 0; 127 128 list_for_each_entry(rsv, &rc->dbg->rsvs, pal_node) { 129 if (i == cmd->index) { 130 found = rsv; 131 break; 132 } 133 } 134 if (!found) 135 return -EINVAL; 136 137 - list_del(&found->pal_node); 138 uwb_rsv_terminate(found); 139 140 return 0; 141 } 142 143 static int command_open(struct inode *inode, struct file *file) ··· 171 { 172 struct uwb_rc *rc = file->private_data; 173 struct uwb_dbg_cmd cmd; 174 - int ret; 175 - 176 if (len != sizeof(struct uwb_dbg_cmd)) 177 return -EINVAL; 178 ··· 185 break; 186 case UWB_DBG_CMD_RSV_TERMINATE: 187 ret = cmd_rsv_terminate(rc, &cmd.rsv_terminate); 188 break; 189 default: 190 return -EINVAL; ··· 291 .owner = THIS_MODULE, 292 }; 293 294 - static void uwb_dbg_new_rsv(struct uwb_rsv *rsv) 295 { 296 - struct uwb_rc *rc = rsv->rc; 297 298 - if (rc->dbg->accept) 299 - uwb_rsv_accept(rsv, uwb_dbg_rsv_cb, NULL); 300 } 301 302 /** ··· 324 return; 325 326 INIT_LIST_HEAD(&rc->dbg->rsvs); 327 328 uwb_pal_init(&rc->dbg->pal); 329 rc->dbg->pal.new_rsv = uwb_dbg_new_rsv; 330 - uwb_pal_register(rc, &rc->dbg->pal); 331 if (root_dir) { 332 rc->dbg->root_d = debugfs_create_dir(dev_name(&rc->uwb_dev.dev), 333 root_dir); ··· 351 } 352 353 /** 354 - * uwb_dbg_add_rc - remove a radio controller's debug interface 355 * @rc: the radio controller 356 */ 357 void uwb_dbg_del_rc(struct uwb_rc *rc) ··· 362 return; 363 364 list_for_each_entry_safe(rsv, t, &rc->dbg->rsvs, pal_node) { 365 - uwb_rsv_destroy(rsv); 366 } 367 368 - uwb_pal_unregister(rc, &rc->dbg->pal); 369 370 if (root_dir) { 371 debugfs_remove(rc->dbg->drp_avail_f); ··· 390 void uwb_dbg_exit(void) 391 { 392 debugfs_remove(root_dir); 393 }
··· 4 * 5 * Copyright (C) 2005-2006 Intel Corporation 6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> 7 + * Copyright (C) 2008 Cambridge Silicon Radio Ltd. 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License version ··· 33 #include <linux/seq_file.h> 34 35 #include <linux/uwb/debug-cmd.h> 36 37 #include "uwb-internal.h" 38 39 /* 40 * Debug interface ··· 84 struct dentry *reservations_f; 85 struct dentry *accept_f; 86 struct dentry *drp_avail_f; 87 + spinlock_t list_lock; 88 }; 89 90 static struct dentry *root_dir; 91 92 static void uwb_dbg_rsv_cb(struct uwb_rsv *rsv) 93 { 94 + struct uwb_dbg *dbg = rsv->pal_priv; 95 96 + uwb_rsv_dump("debug", rsv); 97 98 + if (rsv->state == UWB_RSV_STATE_NONE) { 99 + spin_lock(&dbg->list_lock); 100 + list_del(&rsv->pal_node); 101 + spin_unlock(&dbg->list_lock); 102 + uwb_rsv_destroy(rsv); 103 + } 104 } 105 106 static int cmd_rsv_establish(struct uwb_rc *rc, ··· 119 if (target == NULL) 120 return -ENODEV; 121 122 + rsv = uwb_rsv_create(rc, uwb_dbg_rsv_cb, rc->dbg); 123 if (rsv == NULL) { 124 uwb_dev_put(target); 125 return -ENOMEM; 126 } 127 128 + rsv->target.type = UWB_RSV_TARGET_DEV; 129 + rsv->target.dev = target; 130 + rsv->type = cmd->type; 131 + rsv->max_mas = cmd->max_mas; 132 + rsv->min_mas = cmd->min_mas; 133 + rsv->max_interval = cmd->max_interval; 134 135 ret = uwb_rsv_establish(rsv); 136 if (ret) 137 uwb_rsv_destroy(rsv); 138 + else { 139 + spin_lock(&(rc->dbg)->list_lock); 140 list_add_tail(&rsv->pal_node, &rc->dbg->rsvs); 141 + spin_unlock(&(rc->dbg)->list_lock); 142 + } 143 return ret; 144 } 145 ··· 148 struct uwb_rsv *rsv, *found = NULL; 149 int i = 0; 150 151 + spin_lock(&(rc->dbg)->list_lock); 152 + 153 list_for_each_entry(rsv, &rc->dbg->rsvs, pal_node) { 154 if (i == cmd->index) { 155 found = rsv; 156 + uwb_rsv_get(found); 157 break; 158 } 159 + i++; 160 } 161 + 162 + spin_unlock(&(rc->dbg)->list_lock); 163 + 164 if (!found) 165 return -EINVAL; 166 167 uwb_rsv_terminate(found); 168 + uwb_rsv_put(found); 169 170 return 0; 171 + } 172 + 173 + static int cmd_ie_add(struct uwb_rc *rc, struct uwb_dbg_cmd_ie *ie_to_add) 174 + { 175 + return uwb_rc_ie_add(rc, 176 + (const struct uwb_ie_hdr *) ie_to_add->data, 177 + ie_to_add->len); 178 + } 179 + 180 + static int cmd_ie_rm(struct uwb_rc *rc, struct uwb_dbg_cmd_ie *ie_to_rm) 181 + { 182 + return uwb_rc_ie_rm(rc, ie_to_rm->data[0]); 183 } 184 185 static int command_open(struct inode *inode, struct file *file) ··· 175 { 176 struct uwb_rc *rc = file->private_data; 177 struct uwb_dbg_cmd cmd; 178 + int ret = 0; 179 + 180 if (len != sizeof(struct uwb_dbg_cmd)) 181 return -EINVAL; 182 ··· 189 break; 190 case UWB_DBG_CMD_RSV_TERMINATE: 191 ret = cmd_rsv_terminate(rc, &cmd.rsv_terminate); 192 + break; 193 + case UWB_DBG_CMD_IE_ADD: 194 + ret = cmd_ie_add(rc, &cmd.ie_add); 195 + break; 196 + case UWB_DBG_CMD_IE_RM: 197 + ret = cmd_ie_rm(rc, &cmd.ie_rm); 198 + break; 199 + case UWB_DBG_CMD_RADIO_START: 200 + ret = uwb_radio_start(&rc->dbg->pal); 201 + break; 202 + case UWB_DBG_CMD_RADIO_STOP: 203 + uwb_radio_stop(&rc->dbg->pal); 204 break; 205 default: 206 return -EINVAL; ··· 283 .owner = THIS_MODULE, 284 }; 285 286 + static void uwb_dbg_channel_changed(struct uwb_pal *pal, int channel) 287 { 288 + struct device *dev = &pal->rc->uwb_dev.dev; 289 290 + if (channel > 0) 291 + dev_info(dev, "debug: channel %d started\n", channel); 292 + else 293 + dev_info(dev, "debug: channel stopped\n"); 294 + } 295 + 296 + static void uwb_dbg_new_rsv(struct uwb_pal *pal, struct uwb_rsv *rsv) 297 + { 298 + struct uwb_dbg *dbg = container_of(pal, struct uwb_dbg, pal); 299 + 300 + if (dbg->accept) { 301 + spin_lock(&dbg->list_lock); 302 + list_add_tail(&rsv->pal_node, &dbg->rsvs); 303 + spin_unlock(&dbg->list_lock); 304 + uwb_rsv_accept(rsv, uwb_dbg_rsv_cb, dbg); 305 + } 306 } 307 308 /** ··· 302 return; 303 304 INIT_LIST_HEAD(&rc->dbg->rsvs); 305 + spin_lock_init(&(rc->dbg)->list_lock); 306 307 uwb_pal_init(&rc->dbg->pal); 308 + rc->dbg->pal.rc = rc; 309 + rc->dbg->pal.channel_changed = uwb_dbg_channel_changed; 310 rc->dbg->pal.new_rsv = uwb_dbg_new_rsv; 311 + uwb_pal_register(&rc->dbg->pal); 312 + 313 if (root_dir) { 314 rc->dbg->root_d = debugfs_create_dir(dev_name(&rc->uwb_dev.dev), 315 root_dir); ··· 325 } 326 327 /** 328 + * uwb_dbg_del_rc - remove a radio controller's debug interface 329 * @rc: the radio controller 330 */ 331 void uwb_dbg_del_rc(struct uwb_rc *rc) ··· 336 return; 337 338 list_for_each_entry_safe(rsv, t, &rc->dbg->rsvs, pal_node) { 339 + uwb_rsv_terminate(rsv); 340 } 341 342 + uwb_pal_unregister(&rc->dbg->pal); 343 344 if (root_dir) { 345 debugfs_remove(rc->dbg->drp_avail_f); ··· 364 void uwb_dbg_exit(void) 365 { 366 debugfs_remove(root_dir); 367 + } 368 + 369 + /** 370 + * uwb_dbg_create_pal_dir - create a debugfs directory for a PAL 371 + * @pal: The PAL. 372 + */ 373 + struct dentry *uwb_dbg_create_pal_dir(struct uwb_pal *pal) 374 + { 375 + struct uwb_rc *rc = pal->rc; 376 + 377 + if (root_dir && rc->dbg && rc->dbg->root_d && pal->name) 378 + return debugfs_create_dir(pal->name, rc->dbg->root_d); 379 + return NULL; 380 }
+99 -27
drivers/uwb/uwb-internal.h
··· 66 unsigned channel, enum uwb_scan_type type, 67 unsigned bpst_offset); 68 extern int uwb_rc_send_all_drp_ie(struct uwb_rc *rc); 69 - extern ssize_t uwb_rc_print_IEs(struct uwb_rc *rc, char *, size_t); 70 - extern void uwb_rc_ie_init(struct uwb_rc *); 71 - extern void uwb_rc_ie_init(struct uwb_rc *); 72 - extern ssize_t uwb_rc_ie_setup(struct uwb_rc *); 73 - extern void uwb_rc_ie_release(struct uwb_rc *); 74 - extern int uwb_rc_ie_add(struct uwb_rc *, 75 - const struct uwb_ie_hdr *, size_t); 76 - extern int uwb_rc_ie_rm(struct uwb_rc *, enum uwb_ie); 77 78 extern const char *uwb_rc_strerror(unsigned code); 79 ··· 92 93 struct uwb_rc_neh; 94 95 void uwb_rc_neh_create(struct uwb_rc *rc); 96 void uwb_rc_neh_destroy(struct uwb_rc *rc); 97 ··· 112 extern int uwb_est_create(void); 113 extern void uwb_est_destroy(void); 114 115 116 /* 117 * UWB Events & management daemon 118 */ ··· 228 }; 229 }; 230 231 - extern void uwbd_start(void); 232 - extern void uwbd_stop(void); 233 extern struct uwb_event *uwb_event_alloc(size_t, gfp_t gfp_mask); 234 extern void uwbd_event_queue(struct uwb_event *); 235 void uwbd_flush(struct uwb_rc *rc); 236 237 /* UWB event handlers */ 238 extern int uwbd_evt_handle_rc_beacon(struct uwb_event *); 239 extern int uwbd_evt_handle_rc_beacon_size(struct uwb_event *); 240 extern int uwbd_evt_handle_rc_bpoie_change(struct uwb_event *); ··· 261 */ 262 263 extern unsigned long beacon_timeout_ms; 264 - 265 - /** Beacon cache list */ 266 - struct uwb_beca { 267 - struct list_head list; 268 - size_t entries; 269 - struct mutex mutex; 270 - }; 271 - 272 - extern struct uwb_beca uwb_beca; 273 274 /** 275 * Beacon cache entry ··· 288 struct uwb_beacon_frame; 289 extern ssize_t uwb_bce_print_IEs(struct uwb_dev *, struct uwb_beca_e *, 290 char *, size_t); 291 - extern struct uwb_beca_e *__uwb_beca_add(struct uwb_rc_evt_beacon *, 292 - struct uwb_beacon_frame *, 293 - unsigned long); 294 295 extern void uwb_bce_kfree(struct kref *_bce); 296 static inline void uwb_bce_get(struct uwb_beca_e *bce) ··· 298 { 299 kref_put(&bce->refcnt, uwb_bce_kfree); 300 } 301 - extern void uwb_beca_purge(void); 302 - extern void uwb_beca_release(void); 303 304 struct uwb_dev *uwb_dev_get_by_devaddr(struct uwb_rc *rc, 305 const struct uwb_dev_addr *devaddr); 306 struct uwb_dev *uwb_dev_get_by_macaddr(struct uwb_rc *rc, 307 const struct uwb_mac_addr *macaddr); 308 309 /* -- UWB Sysfs representation */ 310 extern struct class uwb_rc_class; ··· 321 void uwb_rsv_init(struct uwb_rc *rc); 322 int uwb_rsv_setup(struct uwb_rc *rc); 323 void uwb_rsv_cleanup(struct uwb_rc *rc); 324 325 void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state); 326 void uwb_rsv_remove(struct uwb_rsv *rsv); 327 struct uwb_rsv *uwb_rsv_find(struct uwb_rc *rc, struct uwb_dev *src, 328 struct uwb_ie_drp *drp_ie); 329 void uwb_rsv_sched_update(struct uwb_rc *rc); 330 331 - void uwb_drp_handle_timeout(struct uwb_rsv *rsv); 332 int uwb_drp_ie_update(struct uwb_rsv *rsv); 333 void uwb_drp_ie_to_bm(struct uwb_mas_bm *bm, const struct uwb_ie_drp *drp_ie); 334 335 void uwb_drp_avail_init(struct uwb_rc *rc); 336 int uwb_drp_avail_reserve_pending(struct uwb_rc *rc, struct uwb_mas_bm *mas); 337 void uwb_drp_avail_reserve(struct uwb_rc *rc, struct uwb_mas_bm *mas); 338 void uwb_drp_avail_release(struct uwb_rc *rc, struct uwb_mas_bm *mas); ··· 362 void uwb_dbg_exit(void); 363 void uwb_dbg_add_rc(struct uwb_rc *rc); 364 void uwb_dbg_del_rc(struct uwb_rc *rc); 365 - 366 - /* Workarounds for version specific stuff */ 367 368 static inline void uwb_dev_lock(struct uwb_dev *uwb_dev) 369 {
··· 66 unsigned channel, enum uwb_scan_type type, 67 unsigned bpst_offset); 68 extern int uwb_rc_send_all_drp_ie(struct uwb_rc *rc); 69 + 70 + void uwb_rc_ie_init(struct uwb_rc *); 71 + int uwb_rc_ie_setup(struct uwb_rc *); 72 + void uwb_rc_ie_release(struct uwb_rc *); 73 + int uwb_ie_dump_hex(const struct uwb_ie_hdr *ies, size_t len, 74 + char *buf, size_t size); 75 + int uwb_rc_set_ie(struct uwb_rc *, struct uwb_rc_cmd_set_ie *); 76 + 77 78 extern const char *uwb_rc_strerror(unsigned code); 79 ··· 92 93 struct uwb_rc_neh; 94 95 + extern int uwb_rc_cmd_async(struct uwb_rc *rc, const char *cmd_name, 96 + struct uwb_rccb *cmd, size_t cmd_size, 97 + u8 expected_type, u16 expected_event, 98 + uwb_rc_cmd_cb_f cb, void *arg); 99 + 100 + 101 void uwb_rc_neh_create(struct uwb_rc *rc); 102 void uwb_rc_neh_destroy(struct uwb_rc *rc); 103 ··· 106 extern int uwb_est_create(void); 107 extern void uwb_est_destroy(void); 108 109 + /* 110 + * UWB conflicting alien reservations 111 + */ 112 + struct uwb_cnflt_alien { 113 + struct uwb_rc *rc; 114 + struct list_head rc_node; 115 + struct uwb_mas_bm mas; 116 + struct timer_list timer; 117 + struct work_struct cnflt_update_work; 118 + }; 119 120 + enum uwb_uwb_rsv_alloc_result { 121 + UWB_RSV_ALLOC_FOUND = 0, 122 + UWB_RSV_ALLOC_NOT_FOUND, 123 + }; 124 + 125 + enum uwb_rsv_mas_status { 126 + UWB_RSV_MAS_NOT_AVAIL = 1, 127 + UWB_RSV_MAS_SAFE, 128 + UWB_RSV_MAS_UNSAFE, 129 + }; 130 + 131 + struct uwb_rsv_col_set_info { 132 + unsigned char start_col; 133 + unsigned char interval; 134 + unsigned char safe_mas_per_col; 135 + unsigned char unsafe_mas_per_col; 136 + }; 137 + 138 + struct uwb_rsv_col_info { 139 + unsigned char max_avail_safe; 140 + unsigned char max_avail_unsafe; 141 + unsigned char highest_mas[UWB_MAS_PER_ZONE]; 142 + struct uwb_rsv_col_set_info csi; 143 + }; 144 + 145 + struct uwb_rsv_row_info { 146 + unsigned char avail[UWB_MAS_PER_ZONE]; 147 + unsigned char free_rows; 148 + unsigned char used_rows; 149 + }; 150 + 151 + /* 152 + * UWB find allocation 153 + */ 154 + struct uwb_rsv_alloc_info { 155 + unsigned char bm[UWB_MAS_PER_ZONE * UWB_NUM_ZONES]; 156 + struct uwb_rsv_col_info ci[UWB_NUM_ZONES]; 157 + struct uwb_rsv_row_info ri; 158 + struct uwb_mas_bm *not_available; 159 + struct uwb_mas_bm *result; 160 + int min_mas; 161 + int max_mas; 162 + int max_interval; 163 + int total_allocated_mases; 164 + int safe_allocated_mases; 165 + int unsafe_allocated_mases; 166 + int interval; 167 + }; 168 + 169 + int uwb_rsv_find_best_allocation(struct uwb_rsv *rsv, struct uwb_mas_bm *available, 170 + struct uwb_mas_bm *result); 171 + void uwb_rsv_handle_drp_avail_change(struct uwb_rc *rc); 172 /* 173 * UWB Events & management daemon 174 */ ··· 160 }; 161 }; 162 163 + extern void uwbd_start(struct uwb_rc *rc); 164 + extern void uwbd_stop(struct uwb_rc *rc); 165 extern struct uwb_event *uwb_event_alloc(size_t, gfp_t gfp_mask); 166 extern void uwbd_event_queue(struct uwb_event *); 167 void uwbd_flush(struct uwb_rc *rc); 168 169 /* UWB event handlers */ 170 + extern int uwbd_evt_handle_rc_ie_rcv(struct uwb_event *); 171 extern int uwbd_evt_handle_rc_beacon(struct uwb_event *); 172 extern int uwbd_evt_handle_rc_beacon_size(struct uwb_event *); 173 extern int uwbd_evt_handle_rc_bpoie_change(struct uwb_event *); ··· 192 */ 193 194 extern unsigned long beacon_timeout_ms; 195 196 /** 197 * Beacon cache entry ··· 228 struct uwb_beacon_frame; 229 extern ssize_t uwb_bce_print_IEs(struct uwb_dev *, struct uwb_beca_e *, 230 char *, size_t); 231 232 extern void uwb_bce_kfree(struct kref *_bce); 233 static inline void uwb_bce_get(struct uwb_beca_e *bce) ··· 241 { 242 kref_put(&bce->refcnt, uwb_bce_kfree); 243 } 244 + extern void uwb_beca_purge(struct uwb_rc *rc); 245 + extern void uwb_beca_release(struct uwb_rc *rc); 246 247 struct uwb_dev *uwb_dev_get_by_devaddr(struct uwb_rc *rc, 248 const struct uwb_dev_addr *devaddr); 249 struct uwb_dev *uwb_dev_get_by_macaddr(struct uwb_rc *rc, 250 const struct uwb_mac_addr *macaddr); 251 + 252 + int uwb_radio_setup(struct uwb_rc *rc); 253 + void uwb_radio_reset_state(struct uwb_rc *rc); 254 + void uwb_radio_shutdown(struct uwb_rc *rc); 255 + int uwb_radio_force_channel(struct uwb_rc *rc, int channel); 256 257 /* -- UWB Sysfs representation */ 258 extern struct class uwb_rc_class; ··· 259 void uwb_rsv_init(struct uwb_rc *rc); 260 int uwb_rsv_setup(struct uwb_rc *rc); 261 void uwb_rsv_cleanup(struct uwb_rc *rc); 262 + void uwb_rsv_remove_all(struct uwb_rc *rc); 263 + void uwb_rsv_get(struct uwb_rsv *rsv); 264 + void uwb_rsv_put(struct uwb_rsv *rsv); 265 + bool uwb_rsv_has_two_drp_ies(struct uwb_rsv *rsv); 266 + void uwb_rsv_dump(char *text, struct uwb_rsv *rsv); 267 + int uwb_rsv_try_move(struct uwb_rsv *rsv, struct uwb_mas_bm *available); 268 + void uwb_rsv_backoff_win_timer(unsigned long arg); 269 + void uwb_rsv_backoff_win_increment(struct uwb_rc *rc); 270 + int uwb_rsv_status(struct uwb_rsv *rsv); 271 + int uwb_rsv_companion_status(struct uwb_rsv *rsv); 272 273 void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state); 274 void uwb_rsv_remove(struct uwb_rsv *rsv); 275 struct uwb_rsv *uwb_rsv_find(struct uwb_rc *rc, struct uwb_dev *src, 276 struct uwb_ie_drp *drp_ie); 277 void uwb_rsv_sched_update(struct uwb_rc *rc); 278 + void uwb_rsv_queue_update(struct uwb_rc *rc); 279 280 int uwb_drp_ie_update(struct uwb_rsv *rsv); 281 void uwb_drp_ie_to_bm(struct uwb_mas_bm *bm, const struct uwb_ie_drp *drp_ie); 282 283 void uwb_drp_avail_init(struct uwb_rc *rc); 284 + void uwb_drp_available(struct uwb_rc *rc, struct uwb_mas_bm *avail); 285 int uwb_drp_avail_reserve_pending(struct uwb_rc *rc, struct uwb_mas_bm *mas); 286 void uwb_drp_avail_reserve(struct uwb_rc *rc, struct uwb_mas_bm *mas); 287 void uwb_drp_avail_release(struct uwb_rc *rc, struct uwb_mas_bm *mas); ··· 289 void uwb_dbg_exit(void); 290 void uwb_dbg_add_rc(struct uwb_rc *rc); 291 void uwb_dbg_del_rc(struct uwb_rc *rc); 292 + struct dentry *uwb_dbg_create_pal_dir(struct uwb_pal *pal); 293 294 static inline void uwb_dev_lock(struct uwb_dev *uwb_dev) 295 {
+66 -110
drivers/uwb/uwbd.c
··· 68 * 69 * Handler functions are called normally uwbd_evt_handle_*(). 70 */ 71 - 72 #include <linux/kthread.h> 73 #include <linux/module.h> 74 #include <linux/freezer.h> 75 #include "uwb-internal.h" 76 77 - #define D_LOCAL 1 78 - #include <linux/uwb/debug.h> 79 - 80 - 81 - /** 82 * UWBD Event handler function signature 83 * 84 * Return !0 if the event needs not to be freed (ie the handler ··· 97 const char *name; 98 }; 99 100 - /** Table of handlers for and properties of the UWBD Radio Control Events */ 101 - static 102 - struct uwbd_event uwbd_events[] = { 103 [UWB_RC_EVT_BEACON] = { 104 .handler = uwbd_evt_handle_rc_beacon, 105 .name = "BEACON_RECEIVED" ··· 141 size_t size; 142 }; 143 144 - #define UWBD_EVT_TYPE_HANDLER(n,a) { \ 145 - .name = (n), \ 146 - .uwbd_events = (a), \ 147 - .size = sizeof(a)/sizeof((a)[0]) \ 148 - } 149 - 150 - 151 - /** Table of handlers for each UWBD Event type. */ 152 - static 153 - struct uwbd_evt_type_handler uwbd_evt_type_handlers[] = { 154 - [UWB_RC_CET_GENERAL] = UWBD_EVT_TYPE_HANDLER("RC", uwbd_events) 155 }; 156 - 157 - static const 158 - size_t uwbd_evt_type_handlers_len = 159 - sizeof(uwbd_evt_type_handlers) / sizeof(uwbd_evt_type_handlers[0]); 160 161 static const struct uwbd_event uwbd_message_handlers[] = { 162 [UWB_EVT_MSG_RESET] = { ··· 157 }, 158 }; 159 160 - static DEFINE_MUTEX(uwbd_event_mutex); 161 - 162 - /** 163 * Handle an URC event passed to the UWB Daemon 164 * 165 * @evt: the event to handle ··· 177 static 178 int uwbd_event_handle_urc(struct uwb_event *evt) 179 { 180 struct uwbd_evt_type_handler *type_table; 181 uwbd_evt_handler_f handler; 182 u8 type, context; ··· 187 event = le16_to_cpu(evt->notif.rceb->wEvent); 188 context = evt->notif.rceb->bEventContext; 189 190 - if (type > uwbd_evt_type_handlers_len) { 191 - printk(KERN_ERR "UWBD: event type %u: unknown (too high)\n", type); 192 - return -EINVAL; 193 - } 194 - type_table = &uwbd_evt_type_handlers[type]; 195 - if (type_table->uwbd_events == NULL) { 196 - printk(KERN_ERR "UWBD: event type %u: unknown\n", type); 197 - return -EINVAL; 198 - } 199 - if (event > type_table->size) { 200 - printk(KERN_ERR "UWBD: event %s[%u]: unknown (too high)\n", 201 - type_table->name, event); 202 - return -EINVAL; 203 - } 204 handler = type_table->uwbd_events[event].handler; 205 - if (handler == NULL) { 206 - printk(KERN_ERR "UWBD: event %s[%u]: unknown\n", type_table->name, event); 207 - return -EINVAL; 208 - } 209 - return (*handler)(evt); 210 } 211 212 static void uwbd_event_handle_message(struct uwb_event *evt) ··· 219 return; 220 } 221 222 - /* If this is a reset event we need to drop the 223 - * uwbd_event_mutex or it deadlocks when the reset handler 224 - * attempts to flush the uwbd events. */ 225 - if (evt->message == UWB_EVT_MSG_RESET) 226 - mutex_unlock(&uwbd_event_mutex); 227 - 228 result = uwbd_message_handlers[evt->message].handler(evt); 229 if (result < 0) 230 dev_err(&rc->uwb_dev.dev, "UWBD: '%s' message failed: %d\n", 231 uwbd_message_handlers[evt->message].name, result); 232 - 233 - if (evt->message == UWB_EVT_MSG_RESET) 234 - mutex_lock(&uwbd_event_mutex); 235 } 236 237 static void uwbd_event_handle(struct uwb_event *evt) ··· 250 251 __uwb_rc_put(rc); /* for the __uwb_rc_get() in uwb_rc_notif_cb() */ 252 } 253 - /* The UWB Daemon */ 254 - 255 - 256 - /** Daemon's PID: used to decide if we can queue or not */ 257 - static int uwbd_pid; 258 - /** Daemon's task struct for managing the kthread */ 259 - static struct task_struct *uwbd_task; 260 - /** Daemon's waitqueue for waiting for new events */ 261 - static DECLARE_WAIT_QUEUE_HEAD(uwbd_wq); 262 - /** Daemon's list of events; we queue/dequeue here */ 263 - static struct list_head uwbd_event_list = LIST_HEAD_INIT(uwbd_event_list); 264 - /** Daemon's list lock to protect concurent access */ 265 - static DEFINE_SPINLOCK(uwbd_event_list_lock); 266 - 267 268 /** 269 * UWB Daemon ··· 263 * FIXME: should change so we don't have a 1HZ timer all the time, but 264 * only if there are devices. 265 */ 266 - static int uwbd(void *unused) 267 { 268 unsigned long flags; 269 - struct list_head list = LIST_HEAD_INIT(list); 270 - struct uwb_event *evt, *nxt; 271 int should_stop = 0; 272 while (1) { 273 wait_event_interruptible_timeout( 274 - uwbd_wq, 275 - !list_empty(&uwbd_event_list) 276 || (should_stop = kthread_should_stop()), 277 HZ); 278 if (should_stop) 279 break; 280 try_to_freeze(); 281 282 - mutex_lock(&uwbd_event_mutex); 283 - spin_lock_irqsave(&uwbd_event_list_lock, flags); 284 - list_splice_init(&uwbd_event_list, &list); 285 - spin_unlock_irqrestore(&uwbd_event_list_lock, flags); 286 - list_for_each_entry_safe(evt, nxt, &list, list_node) { 287 list_del(&evt->list_node); 288 uwbd_event_handle(evt); 289 kfree(evt); 290 } 291 - mutex_unlock(&uwbd_event_mutex); 292 293 - uwb_beca_purge(); /* Purge devices that left */ 294 } 295 return 0; 296 } 297 298 299 /** Start the UWB daemon */ 300 - void uwbd_start(void) 301 { 302 - uwbd_task = kthread_run(uwbd, NULL, "uwbd"); 303 - if (uwbd_task == NULL) 304 printk(KERN_ERR "UWB: Cannot start management daemon; " 305 "UWB won't work\n"); 306 else 307 - uwbd_pid = uwbd_task->pid; 308 } 309 310 /* Stop the UWB daemon and free any unprocessed events */ 311 - void uwbd_stop(void) 312 { 313 - unsigned long flags; 314 - struct uwb_event *evt, *nxt; 315 - kthread_stop(uwbd_task); 316 - spin_lock_irqsave(&uwbd_event_list_lock, flags); 317 - uwbd_pid = 0; 318 - list_for_each_entry_safe(evt, nxt, &uwbd_event_list, list_node) { 319 - if (evt->type == UWB_EVT_TYPE_NOTIF) 320 - kfree(evt->notif.rceb); 321 - kfree(evt); 322 - } 323 - spin_unlock_irqrestore(&uwbd_event_list_lock, flags); 324 - uwb_beca_release(); 325 } 326 327 /* ··· 331 */ 332 void uwbd_event_queue(struct uwb_event *evt) 333 { 334 unsigned long flags; 335 - spin_lock_irqsave(&uwbd_event_list_lock, flags); 336 - if (uwbd_pid != 0) { 337 - list_add(&evt->list_node, &uwbd_event_list); 338 - wake_up_all(&uwbd_wq); 339 } else { 340 __uwb_rc_put(evt->rc); 341 if (evt->type == UWB_EVT_TYPE_NOTIF) 342 kfree(evt->notif.rceb); 343 kfree(evt); 344 } 345 - spin_unlock_irqrestore(&uwbd_event_list_lock, flags); 346 return; 347 } 348 ··· 352 { 353 struct uwb_event *evt, *nxt; 354 355 - mutex_lock(&uwbd_event_mutex); 356 - 357 - spin_lock_irq(&uwbd_event_list_lock); 358 - list_for_each_entry_safe(evt, nxt, &uwbd_event_list, list_node) { 359 if (evt->rc == rc) { 360 __uwb_rc_put(rc); 361 list_del(&evt->list_node); ··· 362 kfree(evt); 363 } 364 } 365 - spin_unlock_irq(&uwbd_event_list_lock); 366 - 367 - mutex_unlock(&uwbd_event_mutex); 368 }
··· 68 * 69 * Handler functions are called normally uwbd_evt_handle_*(). 70 */ 71 #include <linux/kthread.h> 72 #include <linux/module.h> 73 #include <linux/freezer.h> 74 + 75 #include "uwb-internal.h" 76 77 + /* 78 * UWBD Event handler function signature 79 * 80 * Return !0 if the event needs not to be freed (ie the handler ··· 101 const char *name; 102 }; 103 104 + /* Table of handlers for and properties of the UWBD Radio Control Events */ 105 + static struct uwbd_event uwbd_urc_events[] = { 106 + [UWB_RC_EVT_IE_RCV] = { 107 + .handler = uwbd_evt_handle_rc_ie_rcv, 108 + .name = "IE_RECEIVED" 109 + }, 110 [UWB_RC_EVT_BEACON] = { 111 .handler = uwbd_evt_handle_rc_beacon, 112 .name = "BEACON_RECEIVED" ··· 142 size_t size; 143 }; 144 145 + /* Table of handlers for each UWBD Event type. */ 146 + static struct uwbd_evt_type_handler uwbd_urc_evt_type_handlers[] = { 147 + [UWB_RC_CET_GENERAL] = { 148 + .name = "URC", 149 + .uwbd_events = uwbd_urc_events, 150 + .size = ARRAY_SIZE(uwbd_urc_events), 151 + }, 152 }; 153 154 static const struct uwbd_event uwbd_message_handlers[] = { 155 [UWB_EVT_MSG_RESET] = { ··· 166 }, 167 }; 168 169 + /* 170 * Handle an URC event passed to the UWB Daemon 171 * 172 * @evt: the event to handle ··· 188 static 189 int uwbd_event_handle_urc(struct uwb_event *evt) 190 { 191 + int result = -EINVAL; 192 struct uwbd_evt_type_handler *type_table; 193 uwbd_evt_handler_f handler; 194 u8 type, context; ··· 197 event = le16_to_cpu(evt->notif.rceb->wEvent); 198 context = evt->notif.rceb->bEventContext; 199 200 + if (type > ARRAY_SIZE(uwbd_urc_evt_type_handlers)) 201 + goto out; 202 + type_table = &uwbd_urc_evt_type_handlers[type]; 203 + if (type_table->uwbd_events == NULL) 204 + goto out; 205 + if (event > type_table->size) 206 + goto out; 207 handler = type_table->uwbd_events[event].handler; 208 + if (handler == NULL) 209 + goto out; 210 + 211 + result = (*handler)(evt); 212 + out: 213 + if (result < 0) 214 + dev_err(&evt->rc->uwb_dev.dev, 215 + "UWBD: event 0x%02x/%04x/%02x, handling failed: %d\n", 216 + type, event, context, result); 217 + return result; 218 } 219 220 static void uwbd_event_handle_message(struct uwb_event *evt) ··· 231 return; 232 } 233 234 result = uwbd_message_handlers[evt->message].handler(evt); 235 if (result < 0) 236 dev_err(&rc->uwb_dev.dev, "UWBD: '%s' message failed: %d\n", 237 uwbd_message_handlers[evt->message].name, result); 238 } 239 240 static void uwbd_event_handle(struct uwb_event *evt) ··· 271 272 __uwb_rc_put(rc); /* for the __uwb_rc_get() in uwb_rc_notif_cb() */ 273 } 274 275 /** 276 * UWB Daemon ··· 298 * FIXME: should change so we don't have a 1HZ timer all the time, but 299 * only if there are devices. 300 */ 301 + static int uwbd(void *param) 302 { 303 + struct uwb_rc *rc = param; 304 unsigned long flags; 305 + struct uwb_event *evt; 306 int should_stop = 0; 307 + 308 while (1) { 309 wait_event_interruptible_timeout( 310 + rc->uwbd.wq, 311 + !list_empty(&rc->uwbd.event_list) 312 || (should_stop = kthread_should_stop()), 313 HZ); 314 if (should_stop) 315 break; 316 try_to_freeze(); 317 318 + spin_lock_irqsave(&rc->uwbd.event_list_lock, flags); 319 + if (!list_empty(&rc->uwbd.event_list)) { 320 + evt = list_first_entry(&rc->uwbd.event_list, struct uwb_event, list_node); 321 list_del(&evt->list_node); 322 + } else 323 + evt = NULL; 324 + spin_unlock_irqrestore(&rc->uwbd.event_list_lock, flags); 325 + 326 + if (evt) { 327 uwbd_event_handle(evt); 328 kfree(evt); 329 } 330 331 + uwb_beca_purge(rc); /* Purge devices that left */ 332 } 333 return 0; 334 } 335 336 337 /** Start the UWB daemon */ 338 + void uwbd_start(struct uwb_rc *rc) 339 { 340 + rc->uwbd.task = kthread_run(uwbd, rc, "uwbd"); 341 + if (rc->uwbd.task == NULL) 342 printk(KERN_ERR "UWB: Cannot start management daemon; " 343 "UWB won't work\n"); 344 else 345 + rc->uwbd.pid = rc->uwbd.task->pid; 346 } 347 348 /* Stop the UWB daemon and free any unprocessed events */ 349 + void uwbd_stop(struct uwb_rc *rc) 350 { 351 + kthread_stop(rc->uwbd.task); 352 + uwbd_flush(rc); 353 } 354 355 /* ··· 373 */ 374 void uwbd_event_queue(struct uwb_event *evt) 375 { 376 + struct uwb_rc *rc = evt->rc; 377 unsigned long flags; 378 + 379 + spin_lock_irqsave(&rc->uwbd.event_list_lock, flags); 380 + if (rc->uwbd.pid != 0) { 381 + list_add(&evt->list_node, &rc->uwbd.event_list); 382 + wake_up_all(&rc->uwbd.wq); 383 } else { 384 __uwb_rc_put(evt->rc); 385 if (evt->type == UWB_EVT_TYPE_NOTIF) 386 kfree(evt->notif.rceb); 387 kfree(evt); 388 } 389 + spin_unlock_irqrestore(&rc->uwbd.event_list_lock, flags); 390 return; 391 } 392 ··· 392 { 393 struct uwb_event *evt, *nxt; 394 395 + spin_lock_irq(&rc->uwbd.event_list_lock); 396 + list_for_each_entry_safe(evt, nxt, &rc->uwbd.event_list, list_node) { 397 if (evt->rc == rc) { 398 __uwb_rc_put(rc); 399 list_del(&evt->list_node); ··· 404 kfree(evt); 405 } 406 } 407 + spin_unlock_irq(&rc->uwbd.event_list_lock); 408 }
+39 -79
drivers/uwb/whc-rc.c
··· 39 * them to the hw and transfer the replies/notifications back to the 40 * UWB stack through the UWB daemon (UWBD). 41 */ 42 - #include <linux/version.h> 43 #include <linux/init.h> 44 #include <linux/module.h> 45 #include <linux/pci.h> ··· 48 #include <linux/uwb.h> 49 #include <linux/uwb/whci.h> 50 #include <linux/uwb/umc.h> 51 - #include "uwb-internal.h" 52 53 - #define D_LOCAL 0 54 - #include <linux/uwb/debug.h> 55 56 /** 57 * Descriptor for an instance of the UWB Radio Control Driver that ··· 95 struct device *dev = &whcrc->umc_dev->dev; 96 u32 urccmd; 97 98 - d_fnstart(3, dev, "(%p, %p, %zu)\n", uwb_rc, cmd, cmd_size); 99 - might_sleep(); 100 - 101 - if (cmd_size >= 4096) { 102 - result = -E2BIG; 103 - goto error; 104 - } 105 106 /* 107 * If the URC is halted, then the hardware has reset itself. ··· 107 if (le_readl(whcrc->rc_base + URCSTS) & URCSTS_HALTED) { 108 dev_err(dev, "requesting reset of halted radio controller\n"); 109 uwb_rc_reset_all(uwb_rc); 110 - result = -EIO; 111 - goto error; 112 } 113 114 result = wait_event_timeout(whcrc->cmd_wq, 115 !(le_readl(whcrc->rc_base + URCCMD) & URCCMD_ACTIVE), HZ/2); 116 if (result == 0) { 117 dev_err(dev, "device is not ready to execute commands\n"); 118 - result = -ETIMEDOUT; 119 - goto error; 120 } 121 122 memmove(whcrc->cmd_buf, cmd, cmd_size); ··· 127 whcrc->rc_base + URCCMD); 128 spin_unlock(&whcrc->irq_lock); 129 130 - error: 131 - d_fnend(3, dev, "(%p, %p, %zu) = %d\n", 132 - uwb_rc, cmd, cmd_size, result); 133 - return result; 134 } 135 136 static int whcrc_reset(struct uwb_rc *rc) ··· 154 static 155 void whcrc_enable_events(struct whcrc *whcrc) 156 { 157 - struct device *dev = &whcrc->umc_dev->dev; 158 u32 urccmd; 159 - 160 - d_fnstart(4, dev, "(whcrc %p)\n", whcrc); 161 162 le_writeq(whcrc->evt_dma_buf, whcrc->rc_base + URCEVTADDR); 163 ··· 162 urccmd = le_readl(whcrc->rc_base + URCCMD) & ~URCCMD_ACTIVE; 163 le_writel(urccmd | URCCMD_EARV, whcrc->rc_base + URCCMD); 164 spin_unlock(&whcrc->irq_lock); 165 - 166 - d_fnend(4, dev, "(whcrc %p) = void\n", whcrc); 167 } 168 169 static void whcrc_event_work(struct work_struct *work) 170 { 171 struct whcrc *whcrc = container_of(work, struct whcrc, event_work); 172 - struct device *dev = &whcrc->umc_dev->dev; 173 size_t size; 174 u64 urcevtaddr; 175 176 urcevtaddr = le_readq(whcrc->rc_base + URCEVTADDR); 177 size = urcevtaddr & URCEVTADDR_OFFSET_MASK; 178 - 179 - d_printf(3, dev, "received %zu octet event\n", size); 180 - d_dump(4, dev, whcrc->evt_buf, size > 32 ? 32 : size); 181 182 uwb_rc_neh_grok(whcrc->uwb_rc, whcrc->evt_buf, size); 183 whcrc_enable_events(whcrc); ··· 195 return IRQ_NONE; 196 le_writel(urcsts & URCSTS_INT_MASK, whcrc->rc_base + URCSTS); 197 198 - d_printf(4, dev, "acked 0x%08x, urcsts 0x%08x\n", 199 - le_readl(whcrc->rc_base + URCSTS), urcsts); 200 - 201 if (urcsts & URCSTS_HSE) { 202 dev_err(dev, "host system error -- hardware halted\n"); 203 /* FIXME: do something sensible here */ 204 goto out; 205 } 206 - if (urcsts & URCSTS_ER) { 207 - d_printf(3, dev, "ER: event ready\n"); 208 schedule_work(&whcrc->event_work); 209 - } 210 - if (urcsts & URCSTS_RCI) { 211 - d_printf(3, dev, "RCI: ready to execute another command\n"); 212 wake_up_all(&whcrc->cmd_wq); 213 - } 214 out: 215 return IRQ_HANDLED; 216 } ··· 222 whcrc->area = umc_dev->resource.start; 223 whcrc->rc_len = umc_dev->resource.end - umc_dev->resource.start + 1; 224 result = -EBUSY; 225 - if (request_mem_region(whcrc->area, whcrc->rc_len, KBUILD_MODNAME) 226 - == NULL) { 227 dev_err(dev, "can't request URC region (%zu bytes @ 0x%lx): %d\n", 228 whcrc->rc_len, whcrc->area, result); 229 goto error_request_region; ··· 257 dev_err(dev, "Can't allocate evt transfer buffer\n"); 258 goto error_evt_buffer; 259 } 260 - d_printf(3, dev, "UWB RC Interface: %zu bytes at 0x%p, irq %u\n", 261 - whcrc->rc_len, whcrc->rc_base, umc_dev->irq); 262 return 0; 263 264 error_evt_buffer: ··· 301 static int whcrc_start_rc(struct uwb_rc *rc) 302 { 303 struct whcrc *whcrc = rc->priv; 304 - int result = 0; 305 struct device *dev = &whcrc->umc_dev->dev; 306 - unsigned long start, duration; 307 308 /* Reset the thing */ 309 le_writel(URCCMD_RESET, whcrc->rc_base + URCCMD); 310 - if (d_test(3)) 311 - start = jiffies; 312 if (whci_wait_for(dev, whcrc->rc_base + URCCMD, URCCMD_RESET, 0, 313 - 5000, "device to reset at init") < 0) { 314 - result = -EBUSY; 315 - goto error; 316 - } else if (d_test(3)) { 317 - duration = jiffies - start; 318 - if (duration > msecs_to_jiffies(40)) 319 - dev_err(dev, "Device took %ums to " 320 - "reset. MAX expected: 40ms\n", 321 - jiffies_to_msecs(duration)); 322 - } 323 324 /* Set the event buffer, start the controller (enable IRQs later) */ 325 le_writel(0, whcrc->rc_base + URCINTR); 326 le_writel(URCCMD_RS, whcrc->rc_base + URCCMD); 327 - result = -ETIMEDOUT; 328 - if (d_test(3)) 329 - start = jiffies; 330 if (whci_wait_for(dev, whcrc->rc_base + URCSTS, URCSTS_HALTED, 0, 331 - 5000, "device to start") < 0) 332 - goto error; 333 - if (d_test(3)) { 334 - duration = jiffies - start; 335 - if (duration > msecs_to_jiffies(40)) 336 - dev_err(dev, "Device took %ums to start. " 337 - "MAX expected: 40ms\n", 338 - jiffies_to_msecs(duration)); 339 - } 340 whcrc_enable_events(whcrc); 341 - result = 0; 342 le_writel(URCINTR_EN_ALL, whcrc->rc_base + URCINTR); 343 - error: 344 - return result; 345 } 346 347 ··· 339 340 le_writel(0, whcrc->rc_base + URCCMD); 341 whci_wait_for(&umc_dev->dev, whcrc->rc_base + URCSTS, 342 - URCSTS_HALTED, 0, 40, "URCSTS.HALTED"); 343 } 344 345 static void whcrc_init(struct whcrc *whcrc) ··· 365 struct whcrc *whcrc; 366 struct device *dev = &umc_dev->dev; 367 368 - d_fnstart(3, dev, "(umc_dev %p)\n", umc_dev); 369 result = -ENOMEM; 370 uwb_rc = uwb_rc_alloc(); 371 if (uwb_rc == NULL) { ··· 396 if (result < 0) 397 goto error_rc_add; 398 umc_set_drvdata(umc_dev, whcrc); 399 - d_fnend(3, dev, "(umc_dev %p) = 0\n", umc_dev); 400 return 0; 401 402 error_rc_add: ··· 405 error_alloc: 406 uwb_rc_put(uwb_rc); 407 error_rc_alloc: 408 - d_fnend(3, dev, "(umc_dev %p) = %d\n", umc_dev, result); 409 return result; 410 } 411 ··· 427 whcrc_release_rc_umc(whcrc); 428 kfree(whcrc); 429 uwb_rc_put(uwb_rc); 430 - d_printf(1, &umc_dev->dev, "freed whcrc %p\n", whcrc); 431 } 432 433 /* PCI device ID's that we handle [so it gets loaded] */ ··· 455 MODULE_DEVICE_TABLE(pci, whcrc_id_table); 456 457 static struct umc_driver whcrc_driver = { 458 - .name = "whc-rc", 459 - .cap_id = UMC_CAP_ID_WHCI_RC, 460 - .probe = whcrc_probe, 461 - .remove = whcrc_remove, 462 }; 463 464 static int __init whcrc_driver_init(void)
··· 39 * them to the hw and transfer the replies/notifications back to the 40 * UWB stack through the UWB daemon (UWBD). 41 */ 42 #include <linux/init.h> 43 #include <linux/module.h> 44 #include <linux/pci.h> ··· 49 #include <linux/uwb.h> 50 #include <linux/uwb/whci.h> 51 #include <linux/uwb/umc.h> 52 53 + #include "uwb-internal.h" 54 55 /** 56 * Descriptor for an instance of the UWB Radio Control Driver that ··· 98 struct device *dev = &whcrc->umc_dev->dev; 99 u32 urccmd; 100 101 + if (cmd_size >= 4096) 102 + return -EINVAL; 103 104 /* 105 * If the URC is halted, then the hardware has reset itself. ··· 115 if (le_readl(whcrc->rc_base + URCSTS) & URCSTS_HALTED) { 116 dev_err(dev, "requesting reset of halted radio controller\n"); 117 uwb_rc_reset_all(uwb_rc); 118 + return -EIO; 119 } 120 121 result = wait_event_timeout(whcrc->cmd_wq, 122 !(le_readl(whcrc->rc_base + URCCMD) & URCCMD_ACTIVE), HZ/2); 123 if (result == 0) { 124 dev_err(dev, "device is not ready to execute commands\n"); 125 + return -ETIMEDOUT; 126 } 127 128 memmove(whcrc->cmd_buf, cmd, cmd_size); ··· 137 whcrc->rc_base + URCCMD); 138 spin_unlock(&whcrc->irq_lock); 139 140 + return 0; 141 } 142 143 static int whcrc_reset(struct uwb_rc *rc) ··· 167 static 168 void whcrc_enable_events(struct whcrc *whcrc) 169 { 170 u32 urccmd; 171 172 le_writeq(whcrc->evt_dma_buf, whcrc->rc_base + URCEVTADDR); 173 ··· 178 urccmd = le_readl(whcrc->rc_base + URCCMD) & ~URCCMD_ACTIVE; 179 le_writel(urccmd | URCCMD_EARV, whcrc->rc_base + URCCMD); 180 spin_unlock(&whcrc->irq_lock); 181 } 182 183 static void whcrc_event_work(struct work_struct *work) 184 { 185 struct whcrc *whcrc = container_of(work, struct whcrc, event_work); 186 size_t size; 187 u64 urcevtaddr; 188 189 urcevtaddr = le_readq(whcrc->rc_base + URCEVTADDR); 190 size = urcevtaddr & URCEVTADDR_OFFSET_MASK; 191 192 uwb_rc_neh_grok(whcrc->uwb_rc, whcrc->evt_buf, size); 193 whcrc_enable_events(whcrc); ··· 217 return IRQ_NONE; 218 le_writel(urcsts & URCSTS_INT_MASK, whcrc->rc_base + URCSTS); 219 220 if (urcsts & URCSTS_HSE) { 221 dev_err(dev, "host system error -- hardware halted\n"); 222 /* FIXME: do something sensible here */ 223 goto out; 224 } 225 + if (urcsts & URCSTS_ER) 226 schedule_work(&whcrc->event_work); 227 + if (urcsts & URCSTS_RCI) 228 wake_up_all(&whcrc->cmd_wq); 229 out: 230 return IRQ_HANDLED; 231 } ··· 251 whcrc->area = umc_dev->resource.start; 252 whcrc->rc_len = umc_dev->resource.end - umc_dev->resource.start + 1; 253 result = -EBUSY; 254 + if (request_mem_region(whcrc->area, whcrc->rc_len, KBUILD_MODNAME) == NULL) { 255 dev_err(dev, "can't request URC region (%zu bytes @ 0x%lx): %d\n", 256 whcrc->rc_len, whcrc->area, result); 257 goto error_request_region; ··· 287 dev_err(dev, "Can't allocate evt transfer buffer\n"); 288 goto error_evt_buffer; 289 } 290 return 0; 291 292 error_evt_buffer: ··· 333 static int whcrc_start_rc(struct uwb_rc *rc) 334 { 335 struct whcrc *whcrc = rc->priv; 336 struct device *dev = &whcrc->umc_dev->dev; 337 338 /* Reset the thing */ 339 le_writel(URCCMD_RESET, whcrc->rc_base + URCCMD); 340 if (whci_wait_for(dev, whcrc->rc_base + URCCMD, URCCMD_RESET, 0, 341 + 5000, "hardware reset") < 0) 342 + return -EBUSY; 343 344 /* Set the event buffer, start the controller (enable IRQs later) */ 345 le_writel(0, whcrc->rc_base + URCINTR); 346 le_writel(URCCMD_RS, whcrc->rc_base + URCCMD); 347 if (whci_wait_for(dev, whcrc->rc_base + URCSTS, URCSTS_HALTED, 0, 348 + 5000, "radio controller start") < 0) 349 + return -ETIMEDOUT; 350 whcrc_enable_events(whcrc); 351 le_writel(URCINTR_EN_ALL, whcrc->rc_base + URCINTR); 352 + return 0; 353 } 354 355 ··· 395 396 le_writel(0, whcrc->rc_base + URCCMD); 397 whci_wait_for(&umc_dev->dev, whcrc->rc_base + URCSTS, 398 + URCSTS_HALTED, URCSTS_HALTED, 100, "radio controller stop"); 399 } 400 401 static void whcrc_init(struct whcrc *whcrc) ··· 421 struct whcrc *whcrc; 422 struct device *dev = &umc_dev->dev; 423 424 result = -ENOMEM; 425 uwb_rc = uwb_rc_alloc(); 426 if (uwb_rc == NULL) { ··· 453 if (result < 0) 454 goto error_rc_add; 455 umc_set_drvdata(umc_dev, whcrc); 456 return 0; 457 458 error_rc_add: ··· 463 error_alloc: 464 uwb_rc_put(uwb_rc); 465 error_rc_alloc: 466 return result; 467 } 468 ··· 486 whcrc_release_rc_umc(whcrc); 487 kfree(whcrc); 488 uwb_rc_put(uwb_rc); 489 + } 490 + 491 + static int whcrc_pre_reset(struct umc_dev *umc) 492 + { 493 + struct whcrc *whcrc = umc_get_drvdata(umc); 494 + struct uwb_rc *uwb_rc = whcrc->uwb_rc; 495 + 496 + uwb_rc_pre_reset(uwb_rc); 497 + return 0; 498 + } 499 + 500 + static int whcrc_post_reset(struct umc_dev *umc) 501 + { 502 + struct whcrc *whcrc = umc_get_drvdata(umc); 503 + struct uwb_rc *uwb_rc = whcrc->uwb_rc; 504 + 505 + uwb_rc_post_reset(uwb_rc); 506 + return 0; 507 } 508 509 /* PCI device ID's that we handle [so it gets loaded] */ ··· 497 MODULE_DEVICE_TABLE(pci, whcrc_id_table); 498 499 static struct umc_driver whcrc_driver = { 500 + .name = "whc-rc", 501 + .cap_id = UMC_CAP_ID_WHCI_RC, 502 + .probe = whcrc_probe, 503 + .remove = whcrc_remove, 504 + .pre_reset = whcrc_pre_reset, 505 + .post_reset = whcrc_post_reset, 506 }; 507 508 static int __init whcrc_driver_init(void)
+3 -3
drivers/uwb/whci.c
··· 67 val = le_readl(reg); 68 if ((val & mask) == result) 69 break; 70 - msleep(10); 71 if (t >= max_ms) { 72 - dev_err(dev, "timed out waiting for %s ", tag); 73 return -ETIMEDOUT; 74 } 75 t += 10; 76 } 77 return 0; ··· 111 + UWBCAPDATA_TO_OFFSET(capdata); 112 umc->resource.end = umc->resource.start 113 + (n == 0 ? 0x20 : UWBCAPDATA_TO_SIZE(capdata)) - 1; 114 - umc->resource.name = umc->dev.bus_id; 115 umc->resource.flags = card->pci->resource[bar].flags; 116 umc->resource.parent = &card->pci->resource[bar]; 117 umc->irq = card->pci->irq;
··· 67 val = le_readl(reg); 68 if ((val & mask) == result) 69 break; 70 if (t >= max_ms) { 71 + dev_err(dev, "%s timed out\n", tag); 72 return -ETIMEDOUT; 73 } 74 + msleep(10); 75 t += 10; 76 } 77 return 0; ··· 111 + UWBCAPDATA_TO_OFFSET(capdata); 112 umc->resource.end = umc->resource.start 113 + (n == 0 ? 0x20 : UWBCAPDATA_TO_SIZE(capdata)) - 1; 114 + umc->resource.name = dev_name(&umc->dev); 115 umc->resource.flags = card->pci->resource[bar].flags; 116 umc->resource.parent = &card->pci->resource[bar]; 117 umc->irq = card->pci->irq;
+2 -17
drivers/uwb/wlp/eda.c
··· 51 * the tag and address of the transmitting neighbor. 52 */ 53 54 - #define D_LOCAL 5 55 #include <linux/netdevice.h> 56 - #include <linux/uwb/debug.h> 57 #include <linux/etherdevice.h> 58 #include <linux/wlp.h> 59 #include "wlp-internal.h" ··· 302 { 303 int result = 0; 304 struct wlp *wlp = container_of(eda, struct wlp, eda); 305 - struct device *dev = &wlp->rc->uwb_dev.dev; 306 struct wlp_eda_node *itr; 307 unsigned long flags; 308 int found = 0; ··· 310 list_for_each_entry(itr, &eda->cache, list_node) { 311 if (!memcmp(itr->virt_addr, virt_addr, 312 sizeof(itr->virt_addr))) { 313 - d_printf(6, dev, "EDA: looking for %pM hit %02x:%02x " 314 - "wss %p tag 0x%02x state %u\n", 315 - virt_addr, 316 - itr->dev_addr.data[1], 317 - itr->dev_addr.data[0], itr->wss, 318 - itr->tag, itr->state); 319 result = (*function)(wlp, itr, priv); 320 *dev_addr = itr->dev_addr; 321 found = 1; 322 break; 323 - } else 324 - d_printf(6, dev, "EDA: looking for %pM against %pM miss\n", 325 - virt_addr, itr->virt_addr); 326 } 327 - if (!found) { 328 - if (printk_ratelimit()) 329 - dev_err(dev, "EDA: Eth addr %pM not found.\n", 330 - virt_addr); 331 result = -ENODEV; 332 - } 333 spin_unlock_irqrestore(&eda->lock, flags); 334 return result; 335 }
··· 51 * the tag and address of the transmitting neighbor. 52 */ 53 54 #include <linux/netdevice.h> 55 #include <linux/etherdevice.h> 56 #include <linux/wlp.h> 57 #include "wlp-internal.h" ··· 304 { 305 int result = 0; 306 struct wlp *wlp = container_of(eda, struct wlp, eda); 307 struct wlp_eda_node *itr; 308 unsigned long flags; 309 int found = 0; ··· 313 list_for_each_entry(itr, &eda->cache, list_node) { 314 if (!memcmp(itr->virt_addr, virt_addr, 315 sizeof(itr->virt_addr))) { 316 result = (*function)(wlp, itr, priv); 317 *dev_addr = itr->dev_addr; 318 found = 1; 319 break; 320 + } 321 } 322 + if (!found) 323 result = -ENODEV; 324 spin_unlock_irqrestore(&eda->lock, flags); 325 return result; 326 }
+8 -173
drivers/uwb/wlp/messages.c
··· 24 */ 25 26 #include <linux/wlp.h> 27 - #define D_LOCAL 6 28 - #include <linux/uwb/debug.h> 29 #include "wlp-internal.h" 30 31 static ··· 104 #define wlp_set(type, type_code, name) \ 105 static size_t wlp_set_##name(struct wlp_attr_##name *attr, type value) \ 106 { \ 107 - d_fnstart(6, NULL, "(attribute %p)\n", attr); \ 108 wlp_set_attr_hdr(&attr->hdr, type_code, \ 109 sizeof(*attr) - sizeof(struct wlp_attr_hdr)); \ 110 attr->name = value; \ 111 - d_dump(6, NULL, attr, sizeof(*attr)); \ 112 - d_fnend(6, NULL, "(attribute %p)\n", attr); \ 113 return sizeof(*attr); \ 114 } 115 116 #define wlp_pset(type, type_code, name) \ 117 static size_t wlp_set_##name(struct wlp_attr_##name *attr, type value) \ 118 { \ 119 - d_fnstart(6, NULL, "(attribute %p)\n", attr); \ 120 wlp_set_attr_hdr(&attr->hdr, type_code, \ 121 sizeof(*attr) - sizeof(struct wlp_attr_hdr)); \ 122 attr->name = *value; \ 123 - d_dump(6, NULL, attr, sizeof(*attr)); \ 124 - d_fnend(6, NULL, "(attribute %p)\n", attr); \ 125 return sizeof(*attr); \ 126 } 127 ··· 132 static size_t wlp_set_##name(struct wlp_attr_##name *attr, type value, \ 133 size_t len) \ 134 { \ 135 - d_fnstart(6, NULL, "(attribute %p)\n", attr); \ 136 wlp_set_attr_hdr(&attr->hdr, type_code, len); \ 137 memcpy(attr->name, value, len); \ 138 - d_dump(6, NULL, attr, sizeof(*attr) + len); \ 139 - d_fnend(6, NULL, "(attribute %p)\n", attr); \ 140 return sizeof(*attr) + len; \ 141 } 142 ··· 172 size_t datalen; 173 void *ptr = attr->wss_info; 174 size_t used = sizeof(*attr); 175 - d_fnstart(6, NULL, "(attribute %p)\n", attr); 176 datalen = sizeof(struct wlp_wss_info) + strlen(wss->name); 177 wlp_set_attr_hdr(&attr->hdr, WLP_ATTR_WSS_INFO, datalen); 178 used = wlp_set_wssid(ptr, &wss->wssid); ··· 180 used += wlp_set_accept_enrl(ptr + used, wss->accept_enroll); 181 used += wlp_set_wss_sec_status(ptr + used, wss->secure_status); 182 used += wlp_set_wss_bcast(ptr + used, &wss->bcast); 183 - d_dump(6, NULL, attr, sizeof(*attr) + datalen); 184 - d_fnend(6, NULL, "(attribute %p, used %d)\n", 185 - attr, (int)(sizeof(*attr) + used)); 186 return sizeof(*attr) + used; 187 } 188 ··· 401 size_t used = 0; 402 ssize_t result = -EINVAL; 403 404 - d_printf(6, dev, "WLP: WSS info: Retrieving WSS name\n"); 405 result = wlp_get_wss_name(wlp, ptr, info->name, buflen); 406 if (result < 0) { 407 dev_err(dev, "WLP: unable to obtain WSS name from " ··· 408 goto error_parse; 409 } 410 used += result; 411 - d_printf(6, dev, "WLP: WSS info: Retrieving accept enroll\n"); 412 result = wlp_get_accept_enrl(wlp, ptr + used, &info->accept_enroll, 413 buflen - used); 414 if (result < 0) { ··· 423 goto error_parse; 424 } 425 used += result; 426 - d_printf(6, dev, "WLP: WSS info: Retrieving secure status\n"); 427 result = wlp_get_wss_sec_status(wlp, ptr + used, &info->sec_status, 428 buflen - used); 429 if (result < 0) { ··· 438 goto error_parse; 439 } 440 used += result; 441 - d_printf(6, dev, "WLP: WSS info: Retrieving broadcast\n"); 442 result = wlp_get_wss_bcast(wlp, ptr + used, &info->bcast, 443 buflen - used); 444 if (result < 0) { ··· 516 len = result; 517 used = sizeof(*attr); 518 ptr = attr; 519 - d_printf(6, dev, "WLP: WSS info: Retrieving WSSID\n"); 520 result = wlp_get_wssid(wlp, ptr + used, wssid, buflen - used); 521 if (result < 0) { 522 dev_err(dev, "WLP: unable to obtain WSSID from WSS info.\n"); ··· 539 goto out; 540 } 541 result = used; 542 - d_printf(6, dev, "WLP: Successfully parsed WLP information " 543 - "attribute. used %zu bytes\n", used); 544 out: 545 return result; 546 } ··· 582 struct wlp_wssid_e *wssid_e; 583 char buf[WLP_WSS_UUID_STRSIZE]; 584 585 - d_fnstart(6, dev, "wlp %p, attr %p, neighbor %p, wss %p, buflen %d \n", 586 - wlp, attr, neighbor, wss, (int)buflen); 587 if (buflen < 0) 588 goto out; 589 ··· 620 wss->accept_enroll = wss_info.accept_enroll; 621 wss->state = WLP_WSS_STATE_PART_ENROLLED; 622 wlp_wss_uuid_print(buf, sizeof(buf), &wssid); 623 - d_printf(2, dev, "WLP: Found WSS %s. Enrolling.\n", 624 - buf); 625 } else { 626 wssid_e = wlp_create_wssid_e(wlp, neighbor); 627 if (wssid_e == NULL) { ··· 641 if (result < 0 && !enroll) /* this was a discovery */ 642 wlp_remove_neighbor_tmp_info(neighbor); 643 out: 644 - d_fnend(6, dev, "wlp %p, attr %p, neighbor %p, wss %p, buflen %d, " 645 - "result %d \n", wlp, attr, neighbor, wss, (int)buflen, 646 - (int)result); 647 return result; 648 649 } ··· 696 struct sk_buff *_skb; 697 void *d1_itr; 698 699 - d_fnstart(6, dev, "wlp %p\n", wlp); 700 if (wlp->dev_info == NULL) { 701 result = __wlp_setup_device_info(wlp); 702 if (result < 0) { ··· 705 } 706 } 707 info = wlp->dev_info; 708 - d_printf(6, dev, "Local properties:\n" 709 - "Device name (%d bytes): %s\n" 710 - "Model name (%d bytes): %s\n" 711 - "Manufacturer (%d bytes): %s\n" 712 - "Model number (%d bytes): %s\n" 713 - "Serial number (%d bytes): %s\n" 714 - "Primary device type: \n" 715 - " Category: %d \n" 716 - " OUI: %02x:%02x:%02x \n" 717 - " OUI Subdivision: %u \n", 718 - (int)strlen(info->name), info->name, 719 - (int)strlen(info->model_name), info->model_name, 720 - (int)strlen(info->manufacturer), info->manufacturer, 721 - (int)strlen(info->model_nr), info->model_nr, 722 - (int)strlen(info->serial), info->serial, 723 - info->prim_dev_type.category, 724 - info->prim_dev_type.OUI[0], info->prim_dev_type.OUI[1], 725 - info->prim_dev_type.OUI[2], info->prim_dev_type.OUIsubdiv); 726 _skb = dev_alloc_skb(sizeof(*_d1) 727 + sizeof(struct wlp_attr_uuid_e) 728 + sizeof(struct wlp_attr_wss_sel_mthd) ··· 727 goto error; 728 } 729 _d1 = (void *) _skb->data; 730 - d_printf(6, dev, "D1 starts at %p \n", _d1); 731 _d1->hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID); 732 _d1->hdr.type = WLP_FRAME_ASSOCIATION; 733 _d1->type = WLP_ASSOC_D1; ··· 749 used += wlp_set_prim_dev_type(d1_itr + used, &info->prim_dev_type); 750 used += wlp_set_wlp_assc_err(d1_itr + used, WLP_ASSOC_ERROR_NONE); 751 skb_put(_skb, sizeof(*_d1) + used); 752 - d_printf(6, dev, "D1 message:\n"); 753 - d_dump(6, dev, _d1, sizeof(*_d1) 754 - + sizeof(struct wlp_attr_uuid_e) 755 - + sizeof(struct wlp_attr_wss_sel_mthd) 756 - + sizeof(struct wlp_attr_dev_name) 757 - + strlen(info->name) 758 - + sizeof(struct wlp_attr_manufacturer) 759 - + strlen(info->manufacturer) 760 - + sizeof(struct wlp_attr_model_name) 761 - + strlen(info->model_name) 762 - + sizeof(struct wlp_attr_model_nr) 763 - + strlen(info->model_nr) 764 - + sizeof(struct wlp_attr_serial) 765 - + strlen(info->serial) 766 - + sizeof(struct wlp_attr_prim_dev_type) 767 - + sizeof(struct wlp_attr_wlp_assc_err)); 768 *skb = _skb; 769 error: 770 - d_fnend(6, dev, "wlp %p, result = %d\n", wlp, result); 771 return result; 772 } 773 ··· 778 void *d2_itr; 779 size_t mem_needed; 780 781 - d_fnstart(6, dev, "wlp %p\n", wlp); 782 if (wlp->dev_info == NULL) { 783 result = __wlp_setup_device_info(wlp); 784 if (result < 0) { ··· 787 } 788 } 789 info = wlp->dev_info; 790 - d_printf(6, dev, "Local properties:\n" 791 - "Device name (%d bytes): %s\n" 792 - "Model name (%d bytes): %s\n" 793 - "Manufacturer (%d bytes): %s\n" 794 - "Model number (%d bytes): %s\n" 795 - "Serial number (%d bytes): %s\n" 796 - "Primary device type: \n" 797 - " Category: %d \n" 798 - " OUI: %02x:%02x:%02x \n" 799 - " OUI Subdivision: %u \n", 800 - (int)strlen(info->name), info->name, 801 - (int)strlen(info->model_name), info->model_name, 802 - (int)strlen(info->manufacturer), info->manufacturer, 803 - (int)strlen(info->model_nr), info->model_nr, 804 - (int)strlen(info->serial), info->serial, 805 - info->prim_dev_type.category, 806 - info->prim_dev_type.OUI[0], info->prim_dev_type.OUI[1], 807 - info->prim_dev_type.OUI[2], info->prim_dev_type.OUIsubdiv); 808 mem_needed = sizeof(*_d2) 809 + sizeof(struct wlp_attr_uuid_e) 810 + sizeof(struct wlp_attr_uuid_r) ··· 814 goto error; 815 } 816 _d2 = (void *) _skb->data; 817 - d_printf(6, dev, "D2 starts at %p \n", _d2); 818 _d2->hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID); 819 _d2->hdr.type = WLP_FRAME_ASSOCIATION; 820 _d2->type = WLP_ASSOC_D2; ··· 838 used += wlp_set_prim_dev_type(d2_itr + used, &info->prim_dev_type); 839 used += wlp_set_wlp_assc_err(d2_itr + used, WLP_ASSOC_ERROR_NONE); 840 skb_put(_skb, sizeof(*_d2) + used); 841 - d_printf(6, dev, "D2 message:\n"); 842 - d_dump(6, dev, _d2, mem_needed); 843 *skb = _skb; 844 error: 845 - d_fnend(6, dev, "wlp %p, result = %d\n", wlp, result); 846 return result; 847 } 848 ··· 865 struct sk_buff *_skb; 866 struct wlp_nonce tmp; 867 868 - d_fnstart(6, dev, "wlp %p\n", wlp); 869 _skb = dev_alloc_skb(sizeof(*f0)); 870 if (_skb == NULL) { 871 dev_err(dev, "WLP: Unable to allocate memory for F0 " ··· 872 goto error_alloc; 873 } 874 f0 = (void *) _skb->data; 875 - d_printf(6, dev, "F0 starts at %p \n", f0); 876 f0->f0_hdr.hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID); 877 f0->f0_hdr.hdr.type = WLP_FRAME_ASSOCIATION; 878 f0->f0_hdr.type = WLP_ASSOC_F0; ··· 885 *skb = _skb; 886 result = 0; 887 error_alloc: 888 - d_fnend(6, dev, "wlp %p, result %d \n", wlp, result); 889 return result; 890 } 891 ··· 1157 enum wlp_wss_sel_mthd sel_mthd = 0; 1158 struct wlp_device_info dev_info; 1159 enum wlp_assc_error assc_err; 1160 - char uuid[WLP_WSS_UUID_STRSIZE]; 1161 struct sk_buff *resp = NULL; 1162 1163 /* Parse D1 frame */ 1164 - d_fnstart(6, dev, "WLP: handle D1 frame. wlp = %p, skb = %p\n", 1165 - wlp, skb); 1166 mutex_lock(&wss->mutex); 1167 mutex_lock(&wlp->mutex); /* to access wlp->uuid */ 1168 memset(&dev_info, 0, sizeof(dev_info)); ··· 1170 kfree_skb(skb); 1171 goto out; 1172 } 1173 - wlp_wss_uuid_print(uuid, sizeof(uuid), &uuid_e); 1174 - d_printf(6, dev, "From D1 frame:\n" 1175 - "UUID-E: %s\n" 1176 - "Selection method: %d\n" 1177 - "Device name (%d bytes): %s\n" 1178 - "Model name (%d bytes): %s\n" 1179 - "Manufacturer (%d bytes): %s\n" 1180 - "Model number (%d bytes): %s\n" 1181 - "Serial number (%d bytes): %s\n" 1182 - "Primary device type: \n" 1183 - " Category: %d \n" 1184 - " OUI: %02x:%02x:%02x \n" 1185 - " OUI Subdivision: %u \n", 1186 - uuid, sel_mthd, 1187 - (int)strlen(dev_info.name), dev_info.name, 1188 - (int)strlen(dev_info.model_name), dev_info.model_name, 1189 - (int)strlen(dev_info.manufacturer), dev_info.manufacturer, 1190 - (int)strlen(dev_info.model_nr), dev_info.model_nr, 1191 - (int)strlen(dev_info.serial), dev_info.serial, 1192 - dev_info.prim_dev_type.category, 1193 - dev_info.prim_dev_type.OUI[0], 1194 - dev_info.prim_dev_type.OUI[1], 1195 - dev_info.prim_dev_type.OUI[2], 1196 - dev_info.prim_dev_type.OUIsubdiv); 1197 1198 kfree_skb(skb); 1199 if (!wlp_uuid_is_set(&wlp->uuid)) { ··· 1204 kfree(frame_ctx); 1205 mutex_unlock(&wlp->mutex); 1206 mutex_unlock(&wss->mutex); 1207 - d_fnend(6, dev, "WLP: handle D1 frame. wlp = %p\n", wlp); 1208 } 1209 1210 /** ··· 1433 void *ptr = skb->data; 1434 size_t len = skb->len; 1435 size_t used; 1436 - char buf[WLP_WSS_UUID_STRSIZE]; 1437 struct wlp_frame_assoc *assoc = ptr; 1438 1439 - d_fnstart(6, dev, "wlp %p, skb %p \n", wlp, skb); 1440 used = sizeof(*assoc); 1441 result = wlp_get_wssid(wlp, ptr + used, wssid, len - used); 1442 if (result < 0) { ··· 1457 wlp_assoc_frame_str(assoc->type)); 1458 goto error_parse; 1459 } 1460 - wlp_wss_uuid_print(buf, sizeof(buf), wssid); 1461 - d_printf(6, dev, "WLP: parsed: WSSID %s, tag 0x%02x, virt " 1462 - "%02x:%02x:%02x:%02x:%02x:%02x \n", buf, *tag, 1463 - virt_addr->data[0], virt_addr->data[1], virt_addr->data[2], 1464 - virt_addr->data[3], virt_addr->data[4], virt_addr->data[5]); 1465 - 1466 error_parse: 1467 - d_fnend(6, dev, "wlp %p, skb %p, result = %d \n", wlp, skb, result); 1468 return result; 1469 } 1470 ··· 1478 } *c; 1479 struct sk_buff *_skb; 1480 1481 - d_fnstart(6, dev, "wlp %p, wss %p \n", wlp, wss); 1482 _skb = dev_alloc_skb(sizeof(*c)); 1483 if (_skb == NULL) { 1484 dev_err(dev, "WLP: Unable to allocate memory for C1/C2 " ··· 1485 goto error_alloc; 1486 } 1487 c = (void *) _skb->data; 1488 - d_printf(6, dev, "C1/C2 starts at %p \n", c); 1489 c->c_hdr.hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID); 1490 c->c_hdr.hdr.type = WLP_FRAME_ASSOCIATION; 1491 c->c_hdr.type = type; ··· 1492 wlp_set_msg_type(&c->c_hdr.msg_type, type); 1493 wlp_set_wssid(&c->wssid, &wss->wssid); 1494 skb_put(_skb, sizeof(*c)); 1495 - d_printf(6, dev, "C1/C2 message:\n"); 1496 - d_dump(6, dev, c, sizeof(*c)); 1497 *skb = _skb; 1498 result = 0; 1499 error_alloc: 1500 - d_fnend(6, dev, "wlp %p, wss %p, result %d \n", wlp, wss, result); 1501 return result; 1502 } 1503 ··· 1533 } *c; 1534 struct sk_buff *_skb; 1535 1536 - d_fnstart(6, dev, "wlp %p, wss %p \n", wlp, wss); 1537 _skb = dev_alloc_skb(sizeof(*c)); 1538 if (_skb == NULL) { 1539 dev_err(dev, "WLP: Unable to allocate memory for C3/C4 " ··· 1540 goto error_alloc; 1541 } 1542 c = (void *) _skb->data; 1543 - d_printf(6, dev, "C3/C4 starts at %p \n", c); 1544 c->c_hdr.hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID); 1545 c->c_hdr.hdr.type = WLP_FRAME_ASSOCIATION; 1546 c->c_hdr.type = type; ··· 1549 wlp_set_wss_tag(&c->wss_tag, wss->tag); 1550 wlp_set_wss_virt(&c->wss_virt, &wss->virtual_addr); 1551 skb_put(_skb, sizeof(*c)); 1552 - d_printf(6, dev, "C3/C4 message:\n"); 1553 - d_dump(6, dev, c, sizeof(*c)); 1554 *skb = _skb; 1555 result = 0; 1556 error_alloc: 1557 - d_fnend(6, dev, "wlp %p, wss %p, result %d \n", wlp, wss, result); 1558 return result; 1559 } 1560 ··· 1577 struct device *dev = &wlp->rc->uwb_dev.dev; \ 1578 int result; \ 1579 struct sk_buff *skb = NULL; \ 1580 - d_fnstart(6, dev, "wlp %p, wss %p, neighbor: %02x:%02x\n", \ 1581 - wlp, wss, dev_addr->data[1], dev_addr->data[0]); \ 1582 - d_printf(6, dev, "WLP: Constructing %s frame. \n", \ 1583 - wlp_assoc_frame_str(id)); \ 1584 /* Build the frame */ \ 1585 result = wlp_build_assoc_##type(wlp, wss, &skb); \ 1586 if (result < 0) { \ ··· 1586 goto error_build_assoc; \ 1587 } \ 1588 /* Send the frame */ \ 1589 - d_printf(6, dev, "Transmitting %s frame to %02x:%02x \n", \ 1590 - wlp_assoc_frame_str(id), \ 1591 - dev_addr->data[1], dev_addr->data[0]); \ 1592 BUG_ON(wlp->xmit_frame == NULL); \ 1593 result = wlp->xmit_frame(wlp, skb, dev_addr); \ 1594 if (result < 0) { \ ··· 1602 /* We could try again ... */ \ 1603 dev_kfree_skb_any(skb);/*we need to free if tx fails*/ \ 1604 error_build_assoc: \ 1605 - d_fnend(6, dev, "wlp %p, wss %p, neighbor: %02x:%02x\n", \ 1606 - wlp, wss, dev_addr->data[1], dev_addr->data[0]); \ 1607 return result; \ 1608 } 1609 ··· 1654 struct uwb_dev_addr *src = &frame_ctx->src; 1655 int result; 1656 struct wlp_uuid wssid; 1657 - char buf[WLP_WSS_UUID_STRSIZE]; 1658 struct sk_buff *resp = NULL; 1659 1660 /* Parse C1 frame */ 1661 - d_fnstart(6, dev, "WLP: handle C1 frame. wlp = %p, c1 = %p\n", 1662 - wlp, c1); 1663 mutex_lock(&wss->mutex); 1664 result = wlp_get_wssid(wlp, (void *)c1 + sizeof(*c1), &wssid, 1665 len - sizeof(*c1)); ··· 1664 dev_err(dev, "WLP: unable to obtain WSSID from C1 frame.\n"); 1665 goto out; 1666 } 1667 - wlp_wss_uuid_print(buf, sizeof(buf), &wssid); 1668 - d_printf(6, dev, "Received C1 frame with WSSID %s \n", buf); 1669 if (!memcmp(&wssid, &wss->wssid, sizeof(wssid)) 1670 && wss->state == WLP_WSS_STATE_ACTIVE) { 1671 - d_printf(6, dev, "WSSID from C1 frame is known locally " 1672 - "and is active\n"); 1673 /* Construct C2 frame */ 1674 result = wlp_build_assoc_c2(wlp, wss, &resp); 1675 if (result < 0) { ··· 1673 goto out; 1674 } 1675 } else { 1676 - d_printf(6, dev, "WSSID from C1 frame is not known locally " 1677 - "or is not active\n"); 1678 /* Construct F0 frame */ 1679 result = wlp_build_assoc_f0(wlp, &resp, WLP_ASSOC_ERROR_INV); 1680 if (result < 0) { ··· 1681 } 1682 } 1683 /* Send C2 frame */ 1684 - d_printf(6, dev, "Transmitting response (C2/F0) frame to %02x:%02x \n", 1685 - src->data[1], src->data[0]); 1686 BUG_ON(wlp->xmit_frame == NULL); 1687 result = wlp->xmit_frame(wlp, resp, src); 1688 if (result < 0) { ··· 1695 kfree_skb(frame_ctx->skb); 1696 kfree(frame_ctx); 1697 mutex_unlock(&wss->mutex); 1698 - d_fnend(6, dev, "WLP: handle C1 frame. wlp = %p\n", wlp); 1699 } 1700 1701 /** ··· 1716 struct sk_buff *skb = frame_ctx->skb; 1717 struct uwb_dev_addr *src = &frame_ctx->src; 1718 int result; 1719 - char buf[WLP_WSS_UUID_STRSIZE]; 1720 struct sk_buff *resp = NULL; 1721 struct wlp_uuid wssid; 1722 u8 tag; 1723 struct uwb_mac_addr virt_addr; 1724 1725 /* Parse C3 frame */ 1726 - d_fnstart(6, dev, "WLP: handle C3 frame. wlp = %p, skb = %p\n", 1727 - wlp, skb); 1728 mutex_lock(&wss->mutex); 1729 result = wlp_parse_c3c4_frame(wlp, skb, &wssid, &tag, &virt_addr); 1730 if (result < 0) { 1731 dev_err(dev, "WLP: unable to obtain values from C3 frame.\n"); 1732 goto out; 1733 } 1734 - wlp_wss_uuid_print(buf, sizeof(buf), &wssid); 1735 - d_printf(6, dev, "Received C3 frame with WSSID %s \n", buf); 1736 if (!memcmp(&wssid, &wss->wssid, sizeof(wssid)) 1737 && wss->state >= WLP_WSS_STATE_ACTIVE) { 1738 - d_printf(6, dev, "WSSID from C3 frame is known locally " 1739 - "and is active\n"); 1740 result = wlp_eda_update_node(&wlp->eda, src, wss, 1741 (void *) virt_addr.data, tag, 1742 WLP_WSS_CONNECTED); ··· 1754 } 1755 } 1756 } else { 1757 - d_printf(6, dev, "WSSID from C3 frame is not known locally " 1758 - "or is not active\n"); 1759 /* Construct F0 frame */ 1760 result = wlp_build_assoc_f0(wlp, &resp, WLP_ASSOC_ERROR_INV); 1761 if (result < 0) { ··· 1762 } 1763 } 1764 /* Send C4 frame */ 1765 - d_printf(6, dev, "Transmitting response (C4/F0) frame to %02x:%02x \n", 1766 - src->data[1], src->data[0]); 1767 BUG_ON(wlp->xmit_frame == NULL); 1768 result = wlp->xmit_frame(wlp, resp, src); 1769 if (result < 0) { ··· 1776 kfree_skb(frame_ctx->skb); 1777 kfree(frame_ctx); 1778 mutex_unlock(&wss->mutex); 1779 - d_fnend(6, dev, "WLP: handle C3 frame. wlp = %p, skb = %p\n", 1780 - wlp, skb); 1781 } 1782 1783
··· 24 */ 25 26 #include <linux/wlp.h> 27 + 28 #include "wlp-internal.h" 29 30 static ··· 105 #define wlp_set(type, type_code, name) \ 106 static size_t wlp_set_##name(struct wlp_attr_##name *attr, type value) \ 107 { \ 108 wlp_set_attr_hdr(&attr->hdr, type_code, \ 109 sizeof(*attr) - sizeof(struct wlp_attr_hdr)); \ 110 attr->name = value; \ 111 return sizeof(*attr); \ 112 } 113 114 #define wlp_pset(type, type_code, name) \ 115 static size_t wlp_set_##name(struct wlp_attr_##name *attr, type value) \ 116 { \ 117 wlp_set_attr_hdr(&attr->hdr, type_code, \ 118 sizeof(*attr) - sizeof(struct wlp_attr_hdr)); \ 119 attr->name = *value; \ 120 return sizeof(*attr); \ 121 } 122 ··· 139 static size_t wlp_set_##name(struct wlp_attr_##name *attr, type value, \ 140 size_t len) \ 141 { \ 142 wlp_set_attr_hdr(&attr->hdr, type_code, len); \ 143 memcpy(attr->name, value, len); \ 144 return sizeof(*attr) + len; \ 145 } 146 ··· 182 size_t datalen; 183 void *ptr = attr->wss_info; 184 size_t used = sizeof(*attr); 185 + 186 datalen = sizeof(struct wlp_wss_info) + strlen(wss->name); 187 wlp_set_attr_hdr(&attr->hdr, WLP_ATTR_WSS_INFO, datalen); 188 used = wlp_set_wssid(ptr, &wss->wssid); ··· 190 used += wlp_set_accept_enrl(ptr + used, wss->accept_enroll); 191 used += wlp_set_wss_sec_status(ptr + used, wss->secure_status); 192 used += wlp_set_wss_bcast(ptr + used, &wss->bcast); 193 return sizeof(*attr) + used; 194 } 195 ··· 414 size_t used = 0; 415 ssize_t result = -EINVAL; 416 417 result = wlp_get_wss_name(wlp, ptr, info->name, buflen); 418 if (result < 0) { 419 dev_err(dev, "WLP: unable to obtain WSS name from " ··· 422 goto error_parse; 423 } 424 used += result; 425 + 426 result = wlp_get_accept_enrl(wlp, ptr + used, &info->accept_enroll, 427 buflen - used); 428 if (result < 0) { ··· 437 goto error_parse; 438 } 439 used += result; 440 + 441 result = wlp_get_wss_sec_status(wlp, ptr + used, &info->sec_status, 442 buflen - used); 443 if (result < 0) { ··· 452 goto error_parse; 453 } 454 used += result; 455 + 456 result = wlp_get_wss_bcast(wlp, ptr + used, &info->bcast, 457 buflen - used); 458 if (result < 0) { ··· 530 len = result; 531 used = sizeof(*attr); 532 ptr = attr; 533 + 534 result = wlp_get_wssid(wlp, ptr + used, wssid, buflen - used); 535 if (result < 0) { 536 dev_err(dev, "WLP: unable to obtain WSSID from WSS info.\n"); ··· 553 goto out; 554 } 555 result = used; 556 out: 557 return result; 558 } ··· 598 struct wlp_wssid_e *wssid_e; 599 char buf[WLP_WSS_UUID_STRSIZE]; 600 601 if (buflen < 0) 602 goto out; 603 ··· 638 wss->accept_enroll = wss_info.accept_enroll; 639 wss->state = WLP_WSS_STATE_PART_ENROLLED; 640 wlp_wss_uuid_print(buf, sizeof(buf), &wssid); 641 + dev_dbg(dev, "WLP: Found WSS %s. Enrolling.\n", buf); 642 } else { 643 wssid_e = wlp_create_wssid_e(wlp, neighbor); 644 if (wssid_e == NULL) { ··· 660 if (result < 0 && !enroll) /* this was a discovery */ 661 wlp_remove_neighbor_tmp_info(neighbor); 662 out: 663 return result; 664 665 } ··· 718 struct sk_buff *_skb; 719 void *d1_itr; 720 721 if (wlp->dev_info == NULL) { 722 result = __wlp_setup_device_info(wlp); 723 if (result < 0) { ··· 728 } 729 } 730 info = wlp->dev_info; 731 _skb = dev_alloc_skb(sizeof(*_d1) 732 + sizeof(struct wlp_attr_uuid_e) 733 + sizeof(struct wlp_attr_wss_sel_mthd) ··· 768 goto error; 769 } 770 _d1 = (void *) _skb->data; 771 _d1->hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID); 772 _d1->hdr.type = WLP_FRAME_ASSOCIATION; 773 _d1->type = WLP_ASSOC_D1; ··· 791 used += wlp_set_prim_dev_type(d1_itr + used, &info->prim_dev_type); 792 used += wlp_set_wlp_assc_err(d1_itr + used, WLP_ASSOC_ERROR_NONE); 793 skb_put(_skb, sizeof(*_d1) + used); 794 *skb = _skb; 795 error: 796 return result; 797 } 798 ··· 837 void *d2_itr; 838 size_t mem_needed; 839 840 if (wlp->dev_info == NULL) { 841 result = __wlp_setup_device_info(wlp); 842 if (result < 0) { ··· 847 } 848 } 849 info = wlp->dev_info; 850 mem_needed = sizeof(*_d2) 851 + sizeof(struct wlp_attr_uuid_e) 852 + sizeof(struct wlp_attr_uuid_r) ··· 892 goto error; 893 } 894 _d2 = (void *) _skb->data; 895 _d2->hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID); 896 _d2->hdr.type = WLP_FRAME_ASSOCIATION; 897 _d2->type = WLP_ASSOC_D2; ··· 917 used += wlp_set_prim_dev_type(d2_itr + used, &info->prim_dev_type); 918 used += wlp_set_wlp_assc_err(d2_itr + used, WLP_ASSOC_ERROR_NONE); 919 skb_put(_skb, sizeof(*_d2) + used); 920 *skb = _skb; 921 error: 922 return result; 923 } 924 ··· 947 struct sk_buff *_skb; 948 struct wlp_nonce tmp; 949 950 _skb = dev_alloc_skb(sizeof(*f0)); 951 if (_skb == NULL) { 952 dev_err(dev, "WLP: Unable to allocate memory for F0 " ··· 955 goto error_alloc; 956 } 957 f0 = (void *) _skb->data; 958 f0->f0_hdr.hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID); 959 f0->f0_hdr.hdr.type = WLP_FRAME_ASSOCIATION; 960 f0->f0_hdr.type = WLP_ASSOC_F0; ··· 969 *skb = _skb; 970 result = 0; 971 error_alloc: 972 return result; 973 } 974 ··· 1242 enum wlp_wss_sel_mthd sel_mthd = 0; 1243 struct wlp_device_info dev_info; 1244 enum wlp_assc_error assc_err; 1245 struct sk_buff *resp = NULL; 1246 1247 /* Parse D1 frame */ 1248 mutex_lock(&wss->mutex); 1249 mutex_lock(&wlp->mutex); /* to access wlp->uuid */ 1250 memset(&dev_info, 0, sizeof(dev_info)); ··· 1258 kfree_skb(skb); 1259 goto out; 1260 } 1261 1262 kfree_skb(skb); 1263 if (!wlp_uuid_is_set(&wlp->uuid)) { ··· 1316 kfree(frame_ctx); 1317 mutex_unlock(&wlp->mutex); 1318 mutex_unlock(&wss->mutex); 1319 } 1320 1321 /** ··· 1546 void *ptr = skb->data; 1547 size_t len = skb->len; 1548 size_t used; 1549 struct wlp_frame_assoc *assoc = ptr; 1550 1551 used = sizeof(*assoc); 1552 result = wlp_get_wssid(wlp, ptr + used, wssid, len - used); 1553 if (result < 0) { ··· 1572 wlp_assoc_frame_str(assoc->type)); 1573 goto error_parse; 1574 } 1575 error_parse: 1576 return result; 1577 } 1578 ··· 1600 } *c; 1601 struct sk_buff *_skb; 1602 1603 _skb = dev_alloc_skb(sizeof(*c)); 1604 if (_skb == NULL) { 1605 dev_err(dev, "WLP: Unable to allocate memory for C1/C2 " ··· 1608 goto error_alloc; 1609 } 1610 c = (void *) _skb->data; 1611 c->c_hdr.hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID); 1612 c->c_hdr.hdr.type = WLP_FRAME_ASSOCIATION; 1613 c->c_hdr.type = type; ··· 1616 wlp_set_msg_type(&c->c_hdr.msg_type, type); 1617 wlp_set_wssid(&c->wssid, &wss->wssid); 1618 skb_put(_skb, sizeof(*c)); 1619 *skb = _skb; 1620 result = 0; 1621 error_alloc: 1622 return result; 1623 } 1624 ··· 1660 } *c; 1661 struct sk_buff *_skb; 1662 1663 _skb = dev_alloc_skb(sizeof(*c)); 1664 if (_skb == NULL) { 1665 dev_err(dev, "WLP: Unable to allocate memory for C3/C4 " ··· 1668 goto error_alloc; 1669 } 1670 c = (void *) _skb->data; 1671 c->c_hdr.hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID); 1672 c->c_hdr.hdr.type = WLP_FRAME_ASSOCIATION; 1673 c->c_hdr.type = type; ··· 1678 wlp_set_wss_tag(&c->wss_tag, wss->tag); 1679 wlp_set_wss_virt(&c->wss_virt, &wss->virtual_addr); 1680 skb_put(_skb, sizeof(*c)); 1681 *skb = _skb; 1682 result = 0; 1683 error_alloc: 1684 return result; 1685 } 1686 ··· 1709 struct device *dev = &wlp->rc->uwb_dev.dev; \ 1710 int result; \ 1711 struct sk_buff *skb = NULL; \ 1712 + \ 1713 /* Build the frame */ \ 1714 result = wlp_build_assoc_##type(wlp, wss, &skb); \ 1715 if (result < 0) { \ ··· 1721 goto error_build_assoc; \ 1722 } \ 1723 /* Send the frame */ \ 1724 BUG_ON(wlp->xmit_frame == NULL); \ 1725 result = wlp->xmit_frame(wlp, skb, dev_addr); \ 1726 if (result < 0) { \ ··· 1740 /* We could try again ... */ \ 1741 dev_kfree_skb_any(skb);/*we need to free if tx fails*/ \ 1742 error_build_assoc: \ 1743 return result; \ 1744 } 1745 ··· 1794 struct uwb_dev_addr *src = &frame_ctx->src; 1795 int result; 1796 struct wlp_uuid wssid; 1797 struct sk_buff *resp = NULL; 1798 1799 /* Parse C1 frame */ 1800 mutex_lock(&wss->mutex); 1801 result = wlp_get_wssid(wlp, (void *)c1 + sizeof(*c1), &wssid, 1802 len - sizeof(*c1)); ··· 1807 dev_err(dev, "WLP: unable to obtain WSSID from C1 frame.\n"); 1808 goto out; 1809 } 1810 if (!memcmp(&wssid, &wss->wssid, sizeof(wssid)) 1811 && wss->state == WLP_WSS_STATE_ACTIVE) { 1812 /* Construct C2 frame */ 1813 result = wlp_build_assoc_c2(wlp, wss, &resp); 1814 if (result < 0) { ··· 1820 goto out; 1821 } 1822 } else { 1823 /* Construct F0 frame */ 1824 result = wlp_build_assoc_f0(wlp, &resp, WLP_ASSOC_ERROR_INV); 1825 if (result < 0) { ··· 1830 } 1831 } 1832 /* Send C2 frame */ 1833 BUG_ON(wlp->xmit_frame == NULL); 1834 result = wlp->xmit_frame(wlp, resp, src); 1835 if (result < 0) { ··· 1846 kfree_skb(frame_ctx->skb); 1847 kfree(frame_ctx); 1848 mutex_unlock(&wss->mutex); 1849 } 1850 1851 /** ··· 1868 struct sk_buff *skb = frame_ctx->skb; 1869 struct uwb_dev_addr *src = &frame_ctx->src; 1870 int result; 1871 struct sk_buff *resp = NULL; 1872 struct wlp_uuid wssid; 1873 u8 tag; 1874 struct uwb_mac_addr virt_addr; 1875 1876 /* Parse C3 frame */ 1877 mutex_lock(&wss->mutex); 1878 result = wlp_parse_c3c4_frame(wlp, skb, &wssid, &tag, &virt_addr); 1879 if (result < 0) { 1880 dev_err(dev, "WLP: unable to obtain values from C3 frame.\n"); 1881 goto out; 1882 } 1883 if (!memcmp(&wssid, &wss->wssid, sizeof(wssid)) 1884 && wss->state >= WLP_WSS_STATE_ACTIVE) { 1885 result = wlp_eda_update_node(&wlp->eda, src, wss, 1886 (void *) virt_addr.data, tag, 1887 WLP_WSS_CONNECTED); ··· 1913 } 1914 } 1915 } else { 1916 /* Construct F0 frame */ 1917 result = wlp_build_assoc_f0(wlp, &resp, WLP_ASSOC_ERROR_INV); 1918 if (result < 0) { ··· 1923 } 1924 } 1925 /* Send C4 frame */ 1926 BUG_ON(wlp->xmit_frame == NULL); 1927 result = wlp->xmit_frame(wlp, resp, src); 1928 if (result < 0) { ··· 1939 kfree_skb(frame_ctx->skb); 1940 kfree(frame_ctx); 1941 mutex_unlock(&wss->mutex); 1942 } 1943 1944
+1 -1
drivers/uwb/wlp/sysfs.c
··· 23 * FIXME: Docs 24 * 25 */ 26 - 27 #include <linux/wlp.h> 28 #include "wlp-internal.h" 29 30 static
··· 23 * FIXME: Docs 24 * 25 */ 26 #include <linux/wlp.h> 27 + 28 #include "wlp-internal.h" 29 30 static
+8 -29
drivers/uwb/wlp/txrx.c
··· 26 27 #include <linux/etherdevice.h> 28 #include <linux/wlp.h> 29 - #define D_LOCAL 5 30 - #include <linux/uwb/debug.h> 31 #include "wlp-internal.h" 32 33 - 34 - /** 35 * Direct incoming association msg to correct parsing routine 36 * 37 * We only expect D1, E1, C1, C3 messages as new. All other incoming ··· 46 struct device *dev = &wlp->rc->uwb_dev.dev; 47 struct wlp_frame_assoc *assoc = (void *) skb->data; 48 struct wlp_assoc_frame_ctx *frame_ctx; 49 - d_fnstart(5, dev, "wlp %p, skb %p\n", wlp, skb); 50 frame_ctx = kmalloc(sizeof(*frame_ctx), GFP_ATOMIC); 51 if (frame_ctx == NULL) { 52 dev_err(dev, "WLP: Unable to allocate memory for association " 53 "frame handling.\n"); 54 kfree_skb(skb); 55 - goto out; 56 } 57 frame_ctx->wlp = wlp; 58 frame_ctx->skb = skb; 59 frame_ctx->src = *src; 60 switch (assoc->type) { 61 case WLP_ASSOC_D1: 62 - d_printf(5, dev, "Received a D1 frame.\n"); 63 INIT_WORK(&frame_ctx->ws, wlp_handle_d1_frame); 64 schedule_work(&frame_ctx->ws); 65 break; 66 case WLP_ASSOC_E1: 67 - d_printf(5, dev, "Received a E1 frame. FIXME?\n"); 68 kfree_skb(skb); /* Temporary until we handle it */ 69 kfree(frame_ctx); /* Temporary until we handle it */ 70 break; 71 case WLP_ASSOC_C1: 72 - d_printf(5, dev, "Received a C1 frame.\n"); 73 INIT_WORK(&frame_ctx->ws, wlp_handle_c1_frame); 74 schedule_work(&frame_ctx->ws); 75 break; 76 case WLP_ASSOC_C3: 77 - d_printf(5, dev, "Received a C3 frame.\n"); 78 INIT_WORK(&frame_ctx->ws, wlp_handle_c3_frame); 79 schedule_work(&frame_ctx->ws); 80 break; ··· 81 kfree(frame_ctx); 82 break; 83 } 84 - out: 85 - d_fnend(5, dev, "wlp %p\n", wlp); 86 } 87 88 - /** 89 * Process incoming association frame 90 * 91 * Although it could be possible to deal with some incoming association ··· 104 struct wlp_frame_assoc *assoc = (void *) skb->data; 105 struct wlp_session *session = wlp->session; 106 u8 version; 107 - d_fnstart(5, dev, "wlp %p, skb %p\n", wlp, skb); 108 109 if (wlp_get_version(wlp, &assoc->version, &version, 110 sizeof(assoc->version)) < 0) ··· 141 } else { 142 wlp_direct_assoc_frame(wlp, skb, src); 143 } 144 - d_fnend(5, dev, "wlp %p\n", wlp); 145 return; 146 error: 147 kfree_skb(skb); 148 - d_fnend(5, dev, "wlp %p\n", wlp); 149 } 150 151 - /** 152 * Verify incoming frame is from connected neighbor, prep to pass to WLP client 153 * 154 * Verification proceeds according to WLP 0.99 [7.3.1]. The source address ··· 165 struct wlp_eda_node eda_entry; 166 struct wlp_frame_std_abbrv_hdr *hdr = (void *) skb->data; 167 168 - d_fnstart(6, dev, "wlp %p, skb %p \n", wlp, skb); 169 /*verify*/ 170 result = wlp_copy_eda_node(&wlp->eda, src, &eda_entry); 171 if (result < 0) { ··· 195 /*prep*/ 196 skb_pull(skb, sizeof(*hdr)); 197 out: 198 - d_fnend(6, dev, "wlp %p, skb %p, result = %d \n", wlp, skb, result); 199 return result; 200 } 201 202 - /** 203 * Receive a WLP frame from device 204 * 205 * @returns: 1 if calling function should free the skb ··· 213 struct wlp_frame_hdr *hdr; 214 int result = 0; 215 216 - d_fnstart(6, dev, "skb (%p), len (%u)\n", skb, len); 217 if (len < sizeof(*hdr)) { 218 dev_err(dev, "Not enough data to parse WLP header.\n"); 219 result = -EINVAL; 220 goto out; 221 } 222 hdr = ptr; 223 - d_dump(6, dev, hdr, sizeof(*hdr)); 224 if (le16_to_cpu(hdr->mux_hdr) != WLP_PROTOCOL_ID) { 225 dev_err(dev, "Not a WLP frame type.\n"); 226 result = -EINVAL; ··· 255 "WLP header.\n"); 256 goto out; 257 } 258 - d_printf(5, dev, "Association frame received.\n"); 259 wlp_receive_assoc_frame(wlp, skb, src); 260 break; 261 default: ··· 267 kfree_skb(skb); 268 result = 0; 269 } 270 - d_fnend(6, dev, "skb (%p)\n", skb); 271 return result; 272 } 273 EXPORT_SYMBOL_GPL(wlp_receive_frame); 274 275 276 - /** 277 * Verify frame from network stack, prepare for further transmission 278 * 279 * @skb: the socket buffer that needs to be prepared for transmission (it ··· 326 int result = -EINVAL; 327 struct ethhdr *eth_hdr = (void *) skb->data; 328 329 - d_fnstart(6, dev, "wlp (%p), skb (%p) \n", wlp, skb); 330 if (is_broadcast_ether_addr(eth_hdr->h_dest)) { 331 - d_printf(6, dev, "WLP: handling broadcast frame. \n"); 332 result = wlp_eda_for_each(&wlp->eda, wlp_wss_send_copy, skb); 333 if (result < 0) { 334 if (printk_ratelimit()) ··· 338 result = 1; 339 /* Frame will be transmitted by WLP. */ 340 } else { 341 - d_printf(6, dev, "WLP: handling unicast frame. \n"); 342 result = wlp_eda_for_virtual(&wlp->eda, eth_hdr->h_dest, dst, 343 wlp_wss_prep_hdr, skb); 344 if (unlikely(result < 0)) { ··· 348 } 349 } 350 out: 351 - d_fnend(6, dev, "wlp (%p), skb (%p). result = %d \n", wlp, skb, result); 352 return result; 353 } 354 EXPORT_SYMBOL_GPL(wlp_prepare_tx_frame);
··· 26 27 #include <linux/etherdevice.h> 28 #include <linux/wlp.h> 29 + 30 #include "wlp-internal.h" 31 32 + /* 33 * Direct incoming association msg to correct parsing routine 34 * 35 * We only expect D1, E1, C1, C3 messages as new. All other incoming ··· 48 struct device *dev = &wlp->rc->uwb_dev.dev; 49 struct wlp_frame_assoc *assoc = (void *) skb->data; 50 struct wlp_assoc_frame_ctx *frame_ctx; 51 + 52 frame_ctx = kmalloc(sizeof(*frame_ctx), GFP_ATOMIC); 53 if (frame_ctx == NULL) { 54 dev_err(dev, "WLP: Unable to allocate memory for association " 55 "frame handling.\n"); 56 kfree_skb(skb); 57 + return; 58 } 59 frame_ctx->wlp = wlp; 60 frame_ctx->skb = skb; 61 frame_ctx->src = *src; 62 switch (assoc->type) { 63 case WLP_ASSOC_D1: 64 INIT_WORK(&frame_ctx->ws, wlp_handle_d1_frame); 65 schedule_work(&frame_ctx->ws); 66 break; 67 case WLP_ASSOC_E1: 68 kfree_skb(skb); /* Temporary until we handle it */ 69 kfree(frame_ctx); /* Temporary until we handle it */ 70 break; 71 case WLP_ASSOC_C1: 72 INIT_WORK(&frame_ctx->ws, wlp_handle_c1_frame); 73 schedule_work(&frame_ctx->ws); 74 break; 75 case WLP_ASSOC_C3: 76 INIT_WORK(&frame_ctx->ws, wlp_handle_c3_frame); 77 schedule_work(&frame_ctx->ws); 78 break; ··· 87 kfree(frame_ctx); 88 break; 89 } 90 } 91 92 + /* 93 * Process incoming association frame 94 * 95 * Although it could be possible to deal with some incoming association ··· 112 struct wlp_frame_assoc *assoc = (void *) skb->data; 113 struct wlp_session *session = wlp->session; 114 u8 version; 115 116 if (wlp_get_version(wlp, &assoc->version, &version, 117 sizeof(assoc->version)) < 0) ··· 150 } else { 151 wlp_direct_assoc_frame(wlp, skb, src); 152 } 153 return; 154 error: 155 kfree_skb(skb); 156 } 157 158 + /* 159 * Verify incoming frame is from connected neighbor, prep to pass to WLP client 160 * 161 * Verification proceeds according to WLP 0.99 [7.3.1]. The source address ··· 176 struct wlp_eda_node eda_entry; 177 struct wlp_frame_std_abbrv_hdr *hdr = (void *) skb->data; 178 179 /*verify*/ 180 result = wlp_copy_eda_node(&wlp->eda, src, &eda_entry); 181 if (result < 0) { ··· 207 /*prep*/ 208 skb_pull(skb, sizeof(*hdr)); 209 out: 210 return result; 211 } 212 213 + /* 214 * Receive a WLP frame from device 215 * 216 * @returns: 1 if calling function should free the skb ··· 226 struct wlp_frame_hdr *hdr; 227 int result = 0; 228 229 if (len < sizeof(*hdr)) { 230 dev_err(dev, "Not enough data to parse WLP header.\n"); 231 result = -EINVAL; 232 goto out; 233 } 234 hdr = ptr; 235 if (le16_to_cpu(hdr->mux_hdr) != WLP_PROTOCOL_ID) { 236 dev_err(dev, "Not a WLP frame type.\n"); 237 result = -EINVAL; ··· 270 "WLP header.\n"); 271 goto out; 272 } 273 wlp_receive_assoc_frame(wlp, skb, src); 274 break; 275 default: ··· 283 kfree_skb(skb); 284 result = 0; 285 } 286 return result; 287 } 288 EXPORT_SYMBOL_GPL(wlp_receive_frame); 289 290 291 + /* 292 * Verify frame from network stack, prepare for further transmission 293 * 294 * @skb: the socket buffer that needs to be prepared for transmission (it ··· 343 int result = -EINVAL; 344 struct ethhdr *eth_hdr = (void *) skb->data; 345 346 if (is_broadcast_ether_addr(eth_hdr->h_dest)) { 347 result = wlp_eda_for_each(&wlp->eda, wlp_wss_send_copy, skb); 348 if (result < 0) { 349 if (printk_ratelimit()) ··· 357 result = 1; 358 /* Frame will be transmitted by WLP. */ 359 } else { 360 result = wlp_eda_for_virtual(&wlp->eda, eth_hdr->h_dest, dst, 361 wlp_wss_prep_hdr, skb); 362 if (unlikely(result < 0)) { ··· 368 } 369 } 370 out: 371 return result; 372 } 373 EXPORT_SYMBOL_GPL(wlp_prepare_tx_frame);
-4
drivers/uwb/wlp/wlp-internal.h
··· 42 extern struct kobj_type wss_ktype; 43 extern struct attribute_group wss_attr_group; 44 45 - extern int uwb_rc_ie_add(struct uwb_rc *, const struct uwb_ie_hdr *, size_t); 46 - extern int uwb_rc_ie_rm(struct uwb_rc *, enum uwb_ie); 47 - 48 - 49 /* This should be changed to a dynamic array where entries are sorted 50 * by eth_addr and search is done in a binary form 51 *
··· 42 extern struct kobj_type wss_ktype; 43 extern struct attribute_group wss_attr_group; 44 45 /* This should be changed to a dynamic array where entries are sorted 46 * by eth_addr and search is done in a binary form 47 *
+27 -53
drivers/uwb/wlp/wlp-lc.c
··· 21 * 22 * FIXME: docs 23 */ 24 - 25 #include <linux/wlp.h> 26 - #define D_LOCAL 6 27 - #include <linux/uwb/debug.h> 28 - #include "wlp-internal.h" 29 30 31 static 32 void wlp_neighbor_init(struct wlp_neighbor_e *neighbor) ··· 58 static 59 void __wlp_fill_device_info(struct wlp *wlp) 60 { 61 - struct device *dev = &wlp->rc->uwb_dev.dev; 62 - 63 - BUG_ON(wlp->fill_device_info == NULL); 64 - d_printf(6, dev, "Retrieving device information " 65 - "from device driver.\n"); 66 wlp->fill_device_info(wlp, wlp->dev_info); 67 } 68 ··· 119 } 120 } 121 122 - /** 123 * Populate WLP neighborhood cache with neighbor information 124 * 125 * A new neighbor is found. If it is discoverable then we add it to the ··· 133 int discoverable; 134 struct wlp_neighbor_e *neighbor; 135 136 - d_fnstart(6, &dev->dev, "uwb %p \n", dev); 137 - d_printf(6, &dev->dev, "Found neighbor device %02x:%02x \n", 138 - dev->dev_addr.data[1], dev->dev_addr.data[0]); 139 - /** 140 * FIXME: 141 * Use contents of WLP IE found in beacon cache to determine if 142 * neighbor is discoverable. ··· 156 list_add(&neighbor->node, &wlp->neighbors); 157 } 158 error_no_mem: 159 - d_fnend(6, &dev->dev, "uwb %p, result = %d \n", dev, result); 160 return result; 161 } 162 ··· 243 dev_err(dev, "Unable to send D1 frame to neighbor " 244 "%02x:%02x (%d)\n", dev_addr->data[1], 245 dev_addr->data[0], result); 246 - d_printf(6, dev, "Add placeholders into buffer next to " 247 - "neighbor information we have (dev address).\n"); 248 goto out; 249 } 250 /* Create session, wait for response */ ··· 270 /* Parse message in session->data: it will be either D2 or F0 */ 271 skb = session.data; 272 resp = (void *) skb->data; 273 - d_printf(6, dev, "Received response to D1 frame. \n"); 274 - d_dump(6, dev, skb->data, skb->len > 72 ? 72 : skb->len); 275 276 if (resp->type == WLP_ASSOC_F0) { 277 result = wlp_parse_f0(wlp, skb); ··· 321 struct device *dev = &wlp->rc->uwb_dev.dev; 322 char buf[WLP_WSS_UUID_STRSIZE]; 323 struct uwb_dev_addr *dev_addr = &neighbor->uwb_dev->dev_addr; 324 wlp_wss_uuid_print(buf, sizeof(buf), wssid); 325 - d_fnstart(6, dev, "wlp %p, neighbor %p, wss %p, wssid %p (%s)\n", 326 - wlp, neighbor, wss, wssid, buf); 327 - d_printf(6, dev, "Complete me.\n"); 328 result = wlp_d1d2_exchange(wlp, neighbor, wss, wssid); 329 if (result < 0) { 330 dev_err(dev, "WLP: D1/D2 message exchange for enrollment " ··· 343 goto error; 344 } else { 345 wss->state = WLP_WSS_STATE_ENROLLED; 346 - d_printf(2, dev, "WLP: Success Enrollment into unsecure WSS " 347 - "%s using neighbor %02x:%02x. \n", buf, 348 - dev_addr->data[1], dev_addr->data[0]); 349 } 350 - 351 - d_fnend(6, dev, "wlp %p, neighbor %p, wss %p, wssid %p (%s)\n", 352 - wlp, neighbor, wss, wssid, buf); 353 out: 354 return result; 355 error: ··· 429 int result = 0; 430 struct device *dev = &wlp->rc->uwb_dev.dev; 431 432 - d_fnstart(6, dev, "wlp %p \n", wlp); 433 mutex_lock(&wlp->nbmutex); 434 /* Clear current neighborhood cache. */ 435 __wlp_neighbors_release(wlp); ··· 448 } 449 error_dev_for_each: 450 mutex_unlock(&wlp->nbmutex); 451 - d_fnend(6, dev, "wlp %p \n", wlp); 452 return result; 453 } 454 ··· 470 int result; 471 switch (event) { 472 case UWB_NOTIF_ONAIR: 473 - d_printf(6, dev, "UWB device %02x:%02x is onair\n", 474 - uwb_dev->dev_addr.data[1], 475 - uwb_dev->dev_addr.data[0]); 476 result = wlp_eda_create_node(&wlp->eda, 477 uwb_dev->mac_addr.data, 478 &uwb_dev->dev_addr); ··· 480 uwb_dev->dev_addr.data[0]); 481 break; 482 case UWB_NOTIF_OFFAIR: 483 - d_printf(6, dev, "UWB device %02x:%02x is offair\n", 484 - uwb_dev->dev_addr.data[1], 485 - uwb_dev->dev_addr.data[0]); 486 wlp_eda_rm_node(&wlp->eda, &uwb_dev->dev_addr); 487 mutex_lock(&wlp->nbmutex); 488 - list_for_each_entry_safe(neighbor, next, &wlp->neighbors, 489 - node) { 490 - if (neighbor->uwb_dev == uwb_dev) { 491 - d_printf(6, dev, "Removing device from " 492 - "neighborhood.\n"); 493 __wlp_neighbor_release(neighbor); 494 - } 495 } 496 mutex_unlock(&wlp->nbmutex); 497 break; ··· 494 } 495 } 496 497 - int wlp_setup(struct wlp *wlp, struct uwb_rc *rc) 498 { 499 - struct device *dev = &rc->uwb_dev.dev; 500 int result; 501 502 - d_fnstart(6, dev, "wlp %p\n", wlp); 503 BUG_ON(wlp->fill_device_info == NULL); 504 BUG_ON(wlp->xmit_frame == NULL); 505 BUG_ON(wlp->stop_queue == NULL); 506 BUG_ON(wlp->start_queue == NULL); 507 wlp->rc = rc; 508 wlp_eda_init(&wlp->eda);/* Set up address cache */ 509 wlp->uwb_notifs_handler.cb = wlp_uwb_notifs_cb; 510 wlp->uwb_notifs_handler.data = wlp; 511 uwb_notifs_register(rc, &wlp->uwb_notifs_handler); 512 513 uwb_pal_init(&wlp->pal); 514 - result = uwb_pal_register(rc, &wlp->pal); 515 if (result < 0) 516 uwb_notifs_deregister(wlp->rc, &wlp->uwb_notifs_handler); 517 518 - d_fnend(6, dev, "wlp %p, result = %d\n", wlp, result); 519 return result; 520 } 521 EXPORT_SYMBOL_GPL(wlp_setup); 522 523 void wlp_remove(struct wlp *wlp) 524 { 525 - struct device *dev = &wlp->rc->uwb_dev.dev; 526 - d_fnstart(6, dev, "wlp %p\n", wlp); 527 wlp_neighbors_release(wlp); 528 - uwb_pal_unregister(wlp->rc, &wlp->pal); 529 uwb_notifs_deregister(wlp->rc, &wlp->uwb_notifs_handler); 530 wlp_eda_release(&wlp->eda); 531 mutex_lock(&wlp->mutex); ··· 542 kfree(wlp->dev_info); 543 mutex_unlock(&wlp->mutex); 544 wlp->rc = NULL; 545 - /* We have to use NULL here because this function can be called 546 - * when the device disappeared. */ 547 - d_fnend(6, NULL, "wlp %p\n", wlp); 548 } 549 EXPORT_SYMBOL_GPL(wlp_remove); 550
··· 21 * 22 * FIXME: docs 23 */ 24 #include <linux/wlp.h> 25 26 + #include "wlp-internal.h" 27 28 static 29 void wlp_neighbor_init(struct wlp_neighbor_e *neighbor) ··· 61 static 62 void __wlp_fill_device_info(struct wlp *wlp) 63 { 64 wlp->fill_device_info(wlp, wlp->dev_info); 65 } 66 ··· 127 } 128 } 129 130 + /* 131 * Populate WLP neighborhood cache with neighbor information 132 * 133 * A new neighbor is found. If it is discoverable then we add it to the ··· 141 int discoverable; 142 struct wlp_neighbor_e *neighbor; 143 144 + /* 145 * FIXME: 146 * Use contents of WLP IE found in beacon cache to determine if 147 * neighbor is discoverable. ··· 167 list_add(&neighbor->node, &wlp->neighbors); 168 } 169 error_no_mem: 170 return result; 171 } 172 ··· 255 dev_err(dev, "Unable to send D1 frame to neighbor " 256 "%02x:%02x (%d)\n", dev_addr->data[1], 257 dev_addr->data[0], result); 258 goto out; 259 } 260 /* Create session, wait for response */ ··· 284 /* Parse message in session->data: it will be either D2 or F0 */ 285 skb = session.data; 286 resp = (void *) skb->data; 287 288 if (resp->type == WLP_ASSOC_F0) { 289 result = wlp_parse_f0(wlp, skb); ··· 337 struct device *dev = &wlp->rc->uwb_dev.dev; 338 char buf[WLP_WSS_UUID_STRSIZE]; 339 struct uwb_dev_addr *dev_addr = &neighbor->uwb_dev->dev_addr; 340 + 341 wlp_wss_uuid_print(buf, sizeof(buf), wssid); 342 + 343 result = wlp_d1d2_exchange(wlp, neighbor, wss, wssid); 344 if (result < 0) { 345 dev_err(dev, "WLP: D1/D2 message exchange for enrollment " ··· 360 goto error; 361 } else { 362 wss->state = WLP_WSS_STATE_ENROLLED; 363 + dev_dbg(dev, "WLP: Success Enrollment into unsecure WSS " 364 + "%s using neighbor %02x:%02x. \n", 365 + buf, dev_addr->data[1], dev_addr->data[0]); 366 } 367 out: 368 return result; 369 error: ··· 449 int result = 0; 450 struct device *dev = &wlp->rc->uwb_dev.dev; 451 452 mutex_lock(&wlp->nbmutex); 453 /* Clear current neighborhood cache. */ 454 __wlp_neighbors_release(wlp); ··· 469 } 470 error_dev_for_each: 471 mutex_unlock(&wlp->nbmutex); 472 return result; 473 } 474 ··· 492 int result; 493 switch (event) { 494 case UWB_NOTIF_ONAIR: 495 result = wlp_eda_create_node(&wlp->eda, 496 uwb_dev->mac_addr.data, 497 &uwb_dev->dev_addr); ··· 505 uwb_dev->dev_addr.data[0]); 506 break; 507 case UWB_NOTIF_OFFAIR: 508 wlp_eda_rm_node(&wlp->eda, &uwb_dev->dev_addr); 509 mutex_lock(&wlp->nbmutex); 510 + list_for_each_entry_safe(neighbor, next, &wlp->neighbors, node) { 511 + if (neighbor->uwb_dev == uwb_dev) 512 __wlp_neighbor_release(neighbor); 513 } 514 mutex_unlock(&wlp->nbmutex); 515 break; ··· 526 } 527 } 528 529 + static void wlp_channel_changed(struct uwb_pal *pal, int channel) 530 { 531 + struct wlp *wlp = container_of(pal, struct wlp, pal); 532 + 533 + if (channel < 0) 534 + netif_carrier_off(wlp->ndev); 535 + else 536 + netif_carrier_on(wlp->ndev); 537 + } 538 + 539 + int wlp_setup(struct wlp *wlp, struct uwb_rc *rc, struct net_device *ndev) 540 + { 541 int result; 542 543 BUG_ON(wlp->fill_device_info == NULL); 544 BUG_ON(wlp->xmit_frame == NULL); 545 BUG_ON(wlp->stop_queue == NULL); 546 BUG_ON(wlp->start_queue == NULL); 547 + 548 wlp->rc = rc; 549 + wlp->ndev = ndev; 550 wlp_eda_init(&wlp->eda);/* Set up address cache */ 551 wlp->uwb_notifs_handler.cb = wlp_uwb_notifs_cb; 552 wlp->uwb_notifs_handler.data = wlp; 553 uwb_notifs_register(rc, &wlp->uwb_notifs_handler); 554 555 uwb_pal_init(&wlp->pal); 556 + wlp->pal.rc = rc; 557 + wlp->pal.channel_changed = wlp_channel_changed; 558 + result = uwb_pal_register(&wlp->pal); 559 if (result < 0) 560 uwb_notifs_deregister(wlp->rc, &wlp->uwb_notifs_handler); 561 562 return result; 563 } 564 EXPORT_SYMBOL_GPL(wlp_setup); 565 566 void wlp_remove(struct wlp *wlp) 567 { 568 wlp_neighbors_release(wlp); 569 + uwb_pal_unregister(&wlp->pal); 570 uwb_notifs_deregister(wlp->rc, &wlp->uwb_notifs_handler); 571 wlp_eda_release(&wlp->eda); 572 mutex_lock(&wlp->mutex); ··· 565 kfree(wlp->dev_info); 566 mutex_unlock(&wlp->mutex); 567 wlp->rc = NULL; 568 } 569 EXPORT_SYMBOL_GPL(wlp_remove); 570
+18 -112
drivers/uwb/wlp/wss-lc.c
··· 43 * wlp_wss_release() 44 * wlp_wss_reset() 45 */ 46 - 47 #include <linux/etherdevice.h> /* for is_valid_ether_addr */ 48 #include <linux/skbuff.h> 49 #include <linux/wlp.h> 50 - #define D_LOCAL 5 51 - #include <linux/uwb/debug.h> 52 - #include "wlp-internal.h" 53 54 55 size_t wlp_wss_key_print(char *buf, size_t bufsize, u8 *key) 56 { ··· 113 */ 114 void wlp_wss_reset(struct wlp_wss *wss) 115 { 116 - struct wlp *wlp = container_of(wss, struct wlp, wss); 117 - struct device *dev = &wlp->rc->uwb_dev.dev; 118 - d_fnstart(5, dev, "wss (%p) \n", wss); 119 memset(&wss->wssid, 0, sizeof(wss->wssid)); 120 wss->hash = 0; 121 memset(&wss->name[0], 0, sizeof(wss->name)); ··· 121 memset(&wss->master_key[0], 0, sizeof(wss->master_key)); 122 wss->tag = 0; 123 wss->state = WLP_WSS_STATE_NONE; 124 - d_fnend(5, dev, "wss (%p) \n", wss); 125 } 126 127 /** ··· 138 struct device *dev = &wlp->rc->uwb_dev.dev; 139 int result; 140 141 - d_fnstart(5, dev, "wss (%p), wssid: %s\n", wss, wssid_str); 142 result = kobject_set_name(&wss->kobj, "wss-%s", wssid_str); 143 if (result < 0) 144 return result; ··· 154 result); 155 goto error_sysfs_create_group; 156 } 157 - d_fnend(5, dev, "Completed. result = %d \n", result); 158 return 0; 159 error_sysfs_create_group: 160 ··· 205 struct wlp *wlp = container_of(wss, struct wlp, wss); 206 struct device *dev = &wlp->rc->uwb_dev.dev; 207 struct wlp_neighbor_e *neighbor; 208 - char buf[WLP_WSS_UUID_STRSIZE]; 209 int result = -ENXIO; 210 struct uwb_dev_addr *dev_addr; 211 212 - wlp_wss_uuid_print(buf, sizeof(buf), wssid); 213 - d_fnstart(5, dev, "wss %p, wssid %s, registrar %02x:%02x \n", 214 - wss, buf, dest->data[1], dest->data[0]); 215 mutex_lock(&wlp->nbmutex); 216 list_for_each_entry(neighbor, &wlp->neighbors, node) { 217 dev_addr = &neighbor->uwb_dev->dev_addr; 218 if (!memcmp(dest, dev_addr, sizeof(*dest))) { 219 - d_printf(5, dev, "Neighbor %02x:%02x is valid, " 220 - "enrolling. \n", 221 - dev_addr->data[1], dev_addr->data[0]); 222 - result = wlp_enroll_neighbor(wlp, neighbor, wss, 223 - wssid); 224 break; 225 } 226 } ··· 220 dev_err(dev, "WLP: Cannot find neighbor %02x:%02x. \n", 221 dest->data[1], dest->data[0]); 222 mutex_unlock(&wlp->nbmutex); 223 - d_fnend(5, dev, "wss %p, wssid %s, registrar %02x:%02x, result %d \n", 224 - wss, buf, dest->data[1], dest->data[0], result); 225 return result; 226 } 227 ··· 241 char buf[WLP_WSS_UUID_STRSIZE]; 242 int result = -ENXIO; 243 244 - wlp_wss_uuid_print(buf, sizeof(buf), wssid); 245 - d_fnstart(5, dev, "wss %p, wssid %s \n", wss, buf); 246 mutex_lock(&wlp->nbmutex); 247 list_for_each_entry(neighbor, &wlp->neighbors, node) { 248 list_for_each_entry(wssid_e, &neighbor->wssid, node) { 249 if (!memcmp(wssid, &wssid_e->wssid, sizeof(*wssid))) { 250 - d_printf(5, dev, "Found WSSID %s in neighbor " 251 - "%02x:%02x cache. \n", buf, 252 - neighbor->uwb_dev->dev_addr.data[1], 253 - neighbor->uwb_dev->dev_addr.data[0]); 254 result = wlp_enroll_neighbor(wlp, neighbor, 255 wss, wssid); 256 if (result == 0) /* enrollment success */ ··· 255 } 256 } 257 out: 258 - if (result == -ENXIO) 259 dev_err(dev, "WLP: Cannot find WSSID %s in cache. \n", buf); 260 mutex_unlock(&wlp->nbmutex); 261 - d_fnend(5, dev, "wss %p, wssid %s, result %d \n", wss, buf, result); 262 return result; 263 } 264 ··· 284 struct uwb_dev_addr bcast = {.data = {0xff, 0xff} }; 285 286 wlp_wss_uuid_print(buf, sizeof(buf), wssid); 287 if (wss->state != WLP_WSS_STATE_NONE) { 288 dev_err(dev, "WLP: Already enrolled in WSS %s.\n", buf); 289 result = -EEXIST; 290 goto error; 291 } 292 - if (!memcmp(&bcast, devaddr, sizeof(bcast))) { 293 - d_printf(5, dev, "Request to enroll in discovered WSS " 294 - "with WSSID %s \n", buf); 295 result = wlp_wss_enroll_discovered(wss, wssid); 296 - } else { 297 - d_printf(5, dev, "Request to enroll in WSSID %s with " 298 - "registrar %02x:%02x\n", buf, devaddr->data[1], 299 - devaddr->data[0]); 300 result = wlp_wss_enroll_target(wss, wssid, devaddr); 301 - } 302 if (result < 0) { 303 dev_err(dev, "WLP: Unable to enroll into WSS %s, result %d \n", 304 buf, result); 305 goto error; 306 } 307 - d_printf(2, dev, "Successfully enrolled into WSS %s \n", buf); 308 result = wlp_wss_sysfs_add(wss, buf); 309 if (result < 0) { 310 dev_err(dev, "WLP: Unable to set up sysfs for WSS kobject.\n"); ··· 335 u8 hash; /* only include one hash */ 336 } ie_data; 337 338 - d_fnstart(5, dev, "Activating WSS %p. \n", wss); 339 BUG_ON(wss->state != WLP_WSS_STATE_ENROLLED); 340 wss->hash = wlp_wss_comp_wssid_hash(&wss->wssid); 341 wss->tag = wss->hash; ··· 353 wss->state = WLP_WSS_STATE_ACTIVE; 354 result = 0; 355 error_wlp_ie: 356 - d_fnend(5, dev, "Activating WSS %p, result = %d \n", wss, result); 357 return result; 358 } 359 ··· 375 int result = 0; 376 char buf[WLP_WSS_UUID_STRSIZE]; 377 378 - d_fnstart(5, dev, "Enrollment and activation requested. \n"); 379 mutex_lock(&wss->mutex); 380 result = wlp_wss_enroll(wss, wssid, devaddr); 381 if (result < 0) { ··· 393 error_activate: 394 error_enroll: 395 mutex_unlock(&wss->mutex); 396 - d_fnend(5, dev, "Completed. result = %d \n", result); 397 return result; 398 } 399 ··· 415 struct device *dev = &wlp->rc->uwb_dev.dev; 416 int result = 0; 417 char buf[WLP_WSS_UUID_STRSIZE]; 418 - d_fnstart(5, dev, "Request to create new WSS.\n"); 419 result = wlp_wss_uuid_print(buf, sizeof(buf), wssid); 420 - d_printf(5, dev, "Request to create WSS: WSSID=%s, name=%s, " 421 - "sec_status=%u, accepting enrollment=%u \n", 422 - buf, name, sec_status, accept); 423 if (!mutex_trylock(&wss->mutex)) { 424 dev_err(dev, "WLP: WLP association session in progress.\n"); 425 return -EBUSY; ··· 464 result = 0; 465 out: 466 mutex_unlock(&wss->mutex); 467 - d_fnend(5, dev, "Completed. result = %d \n", result); 468 return result; 469 } 470 ··· 485 { 486 int result = 0; 487 struct device *dev = &wlp->rc->uwb_dev.dev; 488 - char buf[WLP_WSS_UUID_STRSIZE]; 489 DECLARE_COMPLETION_ONSTACK(completion); 490 struct wlp_session session; 491 struct sk_buff *skb; 492 struct wlp_frame_assoc *resp; 493 struct wlp_uuid wssid; 494 495 - wlp_wss_uuid_print(buf, sizeof(buf), &wss->wssid); 496 - d_fnstart(5, dev, "wlp %p, wss %p (wssid %s), neighbor %02x:%02x \n", 497 - wlp, wss, buf, dev_addr->data[1], dev_addr->data[0]); 498 mutex_lock(&wlp->mutex); 499 /* Send C1 association frame */ 500 result = wlp_send_assoc_frame(wlp, wss, dev_addr, WLP_ASSOC_C1); ··· 526 /* Parse message in session->data: it will be either C2 or F0 */ 527 skb = session.data; 528 resp = (void *) skb->data; 529 - d_printf(5, dev, "Received response to C1 frame. \n"); 530 - d_dump(5, dev, skb->data, skb->len > 72 ? 72 : skb->len); 531 if (resp->type == WLP_ASSOC_F0) { 532 result = wlp_parse_f0(wlp, skb); 533 if (result < 0) ··· 543 result = 0; 544 goto error_resp_parse; 545 } 546 - if (!memcmp(&wssid, &wss->wssid, sizeof(wssid))) { 547 - d_printf(5, dev, "WSSID in C2 frame matches local " 548 - "active WSS.\n"); 549 result = 1; 550 - } else { 551 dev_err(dev, "WLP: Received a C2 frame without matching " 552 "WSSID.\n"); 553 result = 0; ··· 555 out: 556 wlp->session = NULL; 557 mutex_unlock(&wlp->mutex); 558 - d_fnend(5, dev, "wlp %p, wss %p (wssid %s), neighbor %02x:%02x \n", 559 - wlp, wss, buf, dev_addr->data[1], dev_addr->data[0]); 560 return result; 561 } 562 ··· 575 { 576 struct device *dev = &wlp->rc->uwb_dev.dev; 577 int result = 0; 578 - char buf[WLP_WSS_UUID_STRSIZE]; 579 - wlp_wss_uuid_print(buf, sizeof(buf), wssid); 580 - d_fnstart(5, dev, "wlp %p, wss %p, wssid %s, tag %u, virtual " 581 - "%02x:%02x:%02x:%02x:%02x:%02x \n", wlp, wss, buf, *tag, 582 - virt_addr->data[0], virt_addr->data[1], virt_addr->data[2], 583 - virt_addr->data[3], virt_addr->data[4], virt_addr->data[5]); 584 585 if (!memcmp(wssid, &wss->wssid, sizeof(*wssid))) { 586 - d_printf(5, dev, "WSSID from neighbor frame matches local " 587 - "active WSS.\n"); 588 /* Update EDA cache */ 589 result = wlp_eda_update_node(&wlp->eda, dev_addr, wss, 590 (void *) virt_addr->data, *tag, ··· 585 dev_err(dev, "WLP: Unable to update EDA cache " 586 "with new connected neighbor information.\n"); 587 } else { 588 - dev_err(dev, "WLP: Neighbor does not have matching " 589 - "WSSID.\n"); 590 result = -EINVAL; 591 } 592 - 593 - d_fnend(5, dev, "wlp %p, wss %p, wssid %s, tag %u, virtual " 594 - "%02x:%02x:%02x:%02x:%02x:%02x, result = %d \n", 595 - wlp, wss, buf, *tag, 596 - virt_addr->data[0], virt_addr->data[1], virt_addr->data[2], 597 - virt_addr->data[3], virt_addr->data[4], virt_addr->data[5], 598 - result); 599 - 600 return result; 601 } 602 ··· 603 { 604 int result; 605 struct device *dev = &wlp->rc->uwb_dev.dev; 606 - char buf[WLP_WSS_UUID_STRSIZE]; 607 struct wlp_uuid wssid; 608 u8 tag; 609 struct uwb_mac_addr virt_addr; ··· 611 struct wlp_frame_assoc *resp; 612 struct sk_buff *skb; 613 614 - wlp_wss_uuid_print(buf, sizeof(buf), &wss->wssid); 615 - d_fnstart(5, dev, "wlp %p, wss %p (wssid %s), neighbor %02x:%02x \n", 616 - wlp, wss, buf, dev_addr->data[1], dev_addr->data[0]); 617 mutex_lock(&wlp->mutex); 618 /* Send C3 association frame */ 619 result = wlp_send_assoc_frame(wlp, wss, dev_addr, WLP_ASSOC_C3); ··· 645 /* Parse message in session->data: it will be either C4 or F0 */ 646 skb = session.data; 647 resp = (void *) skb->data; 648 - d_printf(5, dev, "Received response to C3 frame. \n"); 649 - d_dump(5, dev, skb->data, skb->len > 72 ? 72 : skb->len); 650 if (resp->type == WLP_ASSOC_F0) { 651 result = wlp_parse_f0(wlp, skb); 652 if (result < 0) ··· 676 WLP_WSS_CONNECT_FAILED); 677 wlp->session = NULL; 678 mutex_unlock(&wlp->mutex); 679 - d_fnend(5, dev, "wlp %p, wss %p (wssid %s), neighbor %02x:%02x \n", 680 - wlp, wss, buf, dev_addr->data[1], dev_addr->data[0]); 681 return result; 682 } 683 ··· 710 struct wlp_wss *wss = &wlp->wss; 711 int result; 712 struct device *dev = &wlp->rc->uwb_dev.dev; 713 - char buf[WLP_WSS_UUID_STRSIZE]; 714 715 mutex_lock(&wss->mutex); 716 - wlp_wss_uuid_print(buf, sizeof(buf), &wss->wssid); 717 - d_fnstart(5, dev, "wlp %p, wss %p (wssid %s), neighbor %02x:%02x \n", 718 - wlp, wss, buf, dev_addr->data[1], dev_addr->data[0]); 719 if (wss->state < WLP_WSS_STATE_ACTIVE) { 720 if (printk_ratelimit()) 721 dev_err(dev, "WLP: Attempting to connect with " ··· 762 BUG_ON(wlp->start_queue == NULL); 763 wlp->start_queue(wlp); 764 mutex_unlock(&wss->mutex); 765 - d_fnend(5, dev, "wlp %p, wss %p (wssid %s)\n", wlp, wss, buf); 766 } 767 768 /** ··· 780 struct sk_buff *skb = _skb; 781 struct wlp_frame_std_abbrv_hdr *std_hdr; 782 783 - d_fnstart(6, dev, "wlp %p \n", wlp); 784 if (eda_entry->state == WLP_WSS_CONNECTED) { 785 /* Add WLP header */ 786 BUG_ON(skb_headroom(skb) < sizeof(*std_hdr)); ··· 797 dev_addr->data[0]); 798 result = -EINVAL; 799 } 800 - d_fnend(6, dev, "wlp %p \n", wlp); 801 return result; 802 } 803 ··· 816 { 817 int result = 0; 818 struct device *dev = &wlp->rc->uwb_dev.dev; 819 - struct uwb_dev_addr *dev_addr = &eda_entry->dev_addr; 820 - unsigned char *eth_addr = eda_entry->eth_addr; 821 struct sk_buff *skb = _skb; 822 struct wlp_assoc_conn_ctx *conn_ctx; 823 824 - d_fnstart(5, dev, "wlp %p\n", wlp); 825 - d_printf(5, dev, "To neighbor %02x:%02x with eth " 826 - "%02x:%02x:%02x:%02x:%02x:%02x\n", dev_addr->data[1], 827 - dev_addr->data[0], eth_addr[0], eth_addr[1], eth_addr[2], 828 - eth_addr[3], eth_addr[4], eth_addr[5]); 829 if (eda_entry->state == WLP_WSS_UNCONNECTED) { 830 /* We don't want any more packets while we set up connection */ 831 BUG_ON(wlp->stop_queue == NULL); ··· 845 "previously. Not retrying. \n"); 846 result = -ENONET; 847 goto out; 848 - } else { /* eda_entry->state == WLP_WSS_CONNECTED */ 849 - d_printf(5, dev, "Neighbor is connected, preparing frame.\n"); 850 result = wlp_wss_prep_hdr(wlp, eda_entry, skb); 851 - } 852 out: 853 - d_fnend(5, dev, "wlp %p, result = %d \n", wlp, result); 854 return result; 855 } 856 ··· 870 struct sk_buff *copy; 871 struct uwb_dev_addr *dev_addr = &eda_entry->dev_addr; 872 873 - d_fnstart(5, dev, "to neighbor %02x:%02x, skb (%p) \n", 874 - dev_addr->data[1], dev_addr->data[0], skb); 875 copy = skb_copy(skb, GFP_ATOMIC); 876 if (copy == NULL) { 877 if (printk_ratelimit()) ··· 899 dev_kfree_skb_irq(copy);/*we need to free if tx fails */ 900 } 901 out: 902 - d_fnend(5, dev, "to neighbor %02x:%02x \n", dev_addr->data[1], 903 - dev_addr->data[0]); 904 return result; 905 } 906 ··· 914 struct wlp *wlp = container_of(wss, struct wlp, wss); 915 struct device *dev = &wlp->rc->uwb_dev.dev; 916 int result = 0; 917 - d_fnstart(5, dev, "wss (%p) \n", wss); 918 mutex_lock(&wss->mutex); 919 wss->kobj.parent = &net_dev->dev.kobj; 920 if (!is_valid_ether_addr(net_dev->dev_addr)) { ··· 927 sizeof(wss->virtual_addr.data)); 928 out: 929 mutex_unlock(&wss->mutex); 930 - d_fnend(5, dev, "wss (%p) \n", wss); 931 return result; 932 } 933 EXPORT_SYMBOL_GPL(wlp_wss_setup); ··· 943 void wlp_wss_remove(struct wlp_wss *wss) 944 { 945 struct wlp *wlp = container_of(wss, struct wlp, wss); 946 - struct device *dev = &wlp->rc->uwb_dev.dev; 947 - d_fnstart(5, dev, "wss (%p) \n", wss); 948 mutex_lock(&wss->mutex); 949 if (wss->state == WLP_WSS_STATE_ACTIVE) 950 uwb_rc_ie_rm(wlp->rc, UWB_IE_WLP); ··· 957 wlp_eda_release(&wlp->eda); 958 wlp_eda_init(&wlp->eda); 959 mutex_unlock(&wss->mutex); 960 - d_fnend(5, dev, "wss (%p) \n", wss); 961 } 962 EXPORT_SYMBOL_GPL(wlp_wss_remove);
··· 43 * wlp_wss_release() 44 * wlp_wss_reset() 45 */ 46 #include <linux/etherdevice.h> /* for is_valid_ether_addr */ 47 #include <linux/skbuff.h> 48 #include <linux/wlp.h> 49 50 + #include "wlp-internal.h" 51 52 size_t wlp_wss_key_print(char *buf, size_t bufsize, u8 *key) 53 { ··· 116 */ 117 void wlp_wss_reset(struct wlp_wss *wss) 118 { 119 memset(&wss->wssid, 0, sizeof(wss->wssid)); 120 wss->hash = 0; 121 memset(&wss->name[0], 0, sizeof(wss->name)); ··· 127 memset(&wss->master_key[0], 0, sizeof(wss->master_key)); 128 wss->tag = 0; 129 wss->state = WLP_WSS_STATE_NONE; 130 } 131 132 /** ··· 145 struct device *dev = &wlp->rc->uwb_dev.dev; 146 int result; 147 148 result = kobject_set_name(&wss->kobj, "wss-%s", wssid_str); 149 if (result < 0) 150 return result; ··· 162 result); 163 goto error_sysfs_create_group; 164 } 165 return 0; 166 error_sysfs_create_group: 167 ··· 214 struct wlp *wlp = container_of(wss, struct wlp, wss); 215 struct device *dev = &wlp->rc->uwb_dev.dev; 216 struct wlp_neighbor_e *neighbor; 217 int result = -ENXIO; 218 struct uwb_dev_addr *dev_addr; 219 220 mutex_lock(&wlp->nbmutex); 221 list_for_each_entry(neighbor, &wlp->neighbors, node) { 222 dev_addr = &neighbor->uwb_dev->dev_addr; 223 if (!memcmp(dest, dev_addr, sizeof(*dest))) { 224 + result = wlp_enroll_neighbor(wlp, neighbor, wss, wssid); 225 break; 226 } 227 } ··· 237 dev_err(dev, "WLP: Cannot find neighbor %02x:%02x. \n", 238 dest->data[1], dest->data[0]); 239 mutex_unlock(&wlp->nbmutex); 240 return result; 241 } 242 ··· 260 char buf[WLP_WSS_UUID_STRSIZE]; 261 int result = -ENXIO; 262 263 + 264 mutex_lock(&wlp->nbmutex); 265 list_for_each_entry(neighbor, &wlp->neighbors, node) { 266 list_for_each_entry(wssid_e, &neighbor->wssid, node) { 267 if (!memcmp(wssid, &wssid_e->wssid, sizeof(*wssid))) { 268 result = wlp_enroll_neighbor(wlp, neighbor, 269 wss, wssid); 270 if (result == 0) /* enrollment success */ ··· 279 } 280 } 281 out: 282 + if (result == -ENXIO) { 283 + wlp_wss_uuid_print(buf, sizeof(buf), wssid); 284 dev_err(dev, "WLP: Cannot find WSSID %s in cache. \n", buf); 285 + } 286 mutex_unlock(&wlp->nbmutex); 287 return result; 288 } 289 ··· 307 struct uwb_dev_addr bcast = {.data = {0xff, 0xff} }; 308 309 wlp_wss_uuid_print(buf, sizeof(buf), wssid); 310 + 311 if (wss->state != WLP_WSS_STATE_NONE) { 312 dev_err(dev, "WLP: Already enrolled in WSS %s.\n", buf); 313 result = -EEXIST; 314 goto error; 315 } 316 + if (!memcmp(&bcast, devaddr, sizeof(bcast))) 317 result = wlp_wss_enroll_discovered(wss, wssid); 318 + else 319 result = wlp_wss_enroll_target(wss, wssid, devaddr); 320 if (result < 0) { 321 dev_err(dev, "WLP: Unable to enroll into WSS %s, result %d \n", 322 buf, result); 323 goto error; 324 } 325 + dev_dbg(dev, "Successfully enrolled into WSS %s \n", buf); 326 result = wlp_wss_sysfs_add(wss, buf); 327 if (result < 0) { 328 dev_err(dev, "WLP: Unable to set up sysfs for WSS kobject.\n"); ··· 363 u8 hash; /* only include one hash */ 364 } ie_data; 365 366 BUG_ON(wss->state != WLP_WSS_STATE_ENROLLED); 367 wss->hash = wlp_wss_comp_wssid_hash(&wss->wssid); 368 wss->tag = wss->hash; ··· 382 wss->state = WLP_WSS_STATE_ACTIVE; 383 result = 0; 384 error_wlp_ie: 385 return result; 386 } 387 ··· 405 int result = 0; 406 char buf[WLP_WSS_UUID_STRSIZE]; 407 408 mutex_lock(&wss->mutex); 409 result = wlp_wss_enroll(wss, wssid, devaddr); 410 if (result < 0) { ··· 424 error_activate: 425 error_enroll: 426 mutex_unlock(&wss->mutex); 427 return result; 428 } 429 ··· 447 struct device *dev = &wlp->rc->uwb_dev.dev; 448 int result = 0; 449 char buf[WLP_WSS_UUID_STRSIZE]; 450 + 451 result = wlp_wss_uuid_print(buf, sizeof(buf), wssid); 452 + 453 if (!mutex_trylock(&wss->mutex)) { 454 dev_err(dev, "WLP: WLP association session in progress.\n"); 455 return -EBUSY; ··· 498 result = 0; 499 out: 500 mutex_unlock(&wss->mutex); 501 return result; 502 } 503 ··· 520 { 521 int result = 0; 522 struct device *dev = &wlp->rc->uwb_dev.dev; 523 DECLARE_COMPLETION_ONSTACK(completion); 524 struct wlp_session session; 525 struct sk_buff *skb; 526 struct wlp_frame_assoc *resp; 527 struct wlp_uuid wssid; 528 529 mutex_lock(&wlp->mutex); 530 /* Send C1 association frame */ 531 result = wlp_send_assoc_frame(wlp, wss, dev_addr, WLP_ASSOC_C1); ··· 565 /* Parse message in session->data: it will be either C2 or F0 */ 566 skb = session.data; 567 resp = (void *) skb->data; 568 if (resp->type == WLP_ASSOC_F0) { 569 result = wlp_parse_f0(wlp, skb); 570 if (result < 0) ··· 584 result = 0; 585 goto error_resp_parse; 586 } 587 + if (!memcmp(&wssid, &wss->wssid, sizeof(wssid))) 588 result = 1; 589 + else { 590 dev_err(dev, "WLP: Received a C2 frame without matching " 591 "WSSID.\n"); 592 result = 0; ··· 598 out: 599 wlp->session = NULL; 600 mutex_unlock(&wlp->mutex); 601 return result; 602 } 603 ··· 620 { 621 struct device *dev = &wlp->rc->uwb_dev.dev; 622 int result = 0; 623 624 if (!memcmp(wssid, &wss->wssid, sizeof(*wssid))) { 625 /* Update EDA cache */ 626 result = wlp_eda_update_node(&wlp->eda, dev_addr, wss, 627 (void *) virt_addr->data, *tag, ··· 638 dev_err(dev, "WLP: Unable to update EDA cache " 639 "with new connected neighbor information.\n"); 640 } else { 641 + dev_err(dev, "WLP: Neighbor does not have matching WSSID.\n"); 642 result = -EINVAL; 643 } 644 return result; 645 } 646 ··· 665 { 666 int result; 667 struct device *dev = &wlp->rc->uwb_dev.dev; 668 struct wlp_uuid wssid; 669 u8 tag; 670 struct uwb_mac_addr virt_addr; ··· 674 struct wlp_frame_assoc *resp; 675 struct sk_buff *skb; 676 677 mutex_lock(&wlp->mutex); 678 /* Send C3 association frame */ 679 result = wlp_send_assoc_frame(wlp, wss, dev_addr, WLP_ASSOC_C3); ··· 711 /* Parse message in session->data: it will be either C4 or F0 */ 712 skb = session.data; 713 resp = (void *) skb->data; 714 if (resp->type == WLP_ASSOC_F0) { 715 result = wlp_parse_f0(wlp, skb); 716 if (result < 0) ··· 744 WLP_WSS_CONNECT_FAILED); 745 wlp->session = NULL; 746 mutex_unlock(&wlp->mutex); 747 return result; 748 } 749 ··· 780 struct wlp_wss *wss = &wlp->wss; 781 int result; 782 struct device *dev = &wlp->rc->uwb_dev.dev; 783 784 mutex_lock(&wss->mutex); 785 if (wss->state < WLP_WSS_STATE_ACTIVE) { 786 if (printk_ratelimit()) 787 dev_err(dev, "WLP: Attempting to connect with " ··· 836 BUG_ON(wlp->start_queue == NULL); 837 wlp->start_queue(wlp); 838 mutex_unlock(&wss->mutex); 839 } 840 841 /** ··· 855 struct sk_buff *skb = _skb; 856 struct wlp_frame_std_abbrv_hdr *std_hdr; 857 858 if (eda_entry->state == WLP_WSS_CONNECTED) { 859 /* Add WLP header */ 860 BUG_ON(skb_headroom(skb) < sizeof(*std_hdr)); ··· 873 dev_addr->data[0]); 874 result = -EINVAL; 875 } 876 return result; 877 } 878 ··· 893 { 894 int result = 0; 895 struct device *dev = &wlp->rc->uwb_dev.dev; 896 struct sk_buff *skb = _skb; 897 struct wlp_assoc_conn_ctx *conn_ctx; 898 899 if (eda_entry->state == WLP_WSS_UNCONNECTED) { 900 /* We don't want any more packets while we set up connection */ 901 BUG_ON(wlp->stop_queue == NULL); ··· 929 "previously. Not retrying. \n"); 930 result = -ENONET; 931 goto out; 932 + } else /* eda_entry->state == WLP_WSS_CONNECTED */ 933 result = wlp_wss_prep_hdr(wlp, eda_entry, skb); 934 out: 935 return result; 936 } 937 ··· 957 struct sk_buff *copy; 958 struct uwb_dev_addr *dev_addr = &eda_entry->dev_addr; 959 960 copy = skb_copy(skb, GFP_ATOMIC); 961 if (copy == NULL) { 962 if (printk_ratelimit()) ··· 988 dev_kfree_skb_irq(copy);/*we need to free if tx fails */ 989 } 990 out: 991 return result; 992 } 993 ··· 1005 struct wlp *wlp = container_of(wss, struct wlp, wss); 1006 struct device *dev = &wlp->rc->uwb_dev.dev; 1007 int result = 0; 1008 + 1009 mutex_lock(&wss->mutex); 1010 wss->kobj.parent = &net_dev->dev.kobj; 1011 if (!is_valid_ether_addr(net_dev->dev_addr)) { ··· 1018 sizeof(wss->virtual_addr.data)); 1019 out: 1020 mutex_unlock(&wss->mutex); 1021 return result; 1022 } 1023 EXPORT_SYMBOL_GPL(wlp_wss_setup); ··· 1035 void wlp_wss_remove(struct wlp_wss *wss) 1036 { 1037 struct wlp *wlp = container_of(wss, struct wlp, wss); 1038 + 1039 mutex_lock(&wss->mutex); 1040 if (wss->state == WLP_WSS_STATE_ACTIVE) 1041 uwb_rc_ie_rm(wlp->rc, UWB_IE_WLP); ··· 1050 wlp_eda_release(&wlp->eda); 1051 wlp_eda_init(&wlp->eda); 1052 mutex_unlock(&wss->mutex); 1053 } 1054 EXPORT_SYMBOL_GPL(wlp_wss_remove);
+1
include/linux/usb/wusb-wa.h
··· 51 WUSB_REQ_GET_TIME = 25, 52 WUSB_REQ_SET_STREAM_IDX = 26, 53 WUSB_REQ_SET_WUSB_MAS = 27, 54 }; 55 56
··· 51 WUSB_REQ_GET_TIME = 25, 52 WUSB_REQ_SET_STREAM_IDX = 26, 53 WUSB_REQ_SET_WUSB_MAS = 27, 54 + WUSB_REQ_CHAN_STOP = 28, 55 }; 56 57
+94 -29
include/linux/uwb.h
··· 30 #include <linux/device.h> 31 #include <linux/mutex.h> 32 #include <linux/timer.h> 33 #include <linux/workqueue.h> 34 #include <linux/uwb/spec.h> 35 ··· 67 struct uwb_dev_addr dev_addr; 68 int beacon_slot; 69 DECLARE_BITMAP(streams, UWB_NUM_STREAMS); 70 }; 71 #define to_uwb_dev(d) container_of(d, struct uwb_dev, dev) 72 ··· 88 struct mutex mutex; 89 }; 90 91 /** 92 * struct uwb_mas_bm - a bitmap of all MAS in a superframe 93 * @bm: a bitmap of length #UWB_NUM_MAS 94 */ 95 struct uwb_mas_bm { 96 DECLARE_BITMAP(bm, UWB_NUM_MAS); 97 }; 98 99 /** ··· 138 * FIXME: further target states TBD. 139 */ 140 enum uwb_rsv_state { 141 - UWB_RSV_STATE_NONE, 142 UWB_RSV_STATE_O_INITIATED, 143 UWB_RSV_STATE_O_PENDING, 144 UWB_RSV_STATE_O_MODIFIED, 145 UWB_RSV_STATE_O_ESTABLISHED, 146 UWB_RSV_STATE_T_ACCEPTED, 147 UWB_RSV_STATE_T_DENIED, 148 UWB_RSV_STATE_T_PENDING, 149 150 UWB_RSV_STATE_LAST, 151 }; ··· 178 struct uwb_dev *dev; 179 struct uwb_dev_addr devaddr; 180 }; 181 }; 182 183 /* ··· 223 * 224 * @status: negotiation status 225 * @stream: stream index allocated for this reservation 226 * @mas: reserved MAS 227 * @drp_ie: the DRP IE 228 * @ie_valid: true iff the DRP IE matches the reservation parameters ··· 239 struct uwb_rc *rc; 240 struct list_head rc_node; 241 struct list_head pal_node; 242 243 struct uwb_dev *owner; 244 struct uwb_rsv_target target; 245 enum uwb_drp_type type; 246 int max_mas; 247 int min_mas; 248 - int sparsity; 249 bool is_multicast; 250 251 uwb_rsv_cb_f callback; 252 void *pal_priv; 253 254 enum uwb_rsv_state state; 255 u8 stream; 256 struct uwb_mas_bm mas; 257 struct uwb_ie_drp *drp_ie; 258 bool ie_valid; 259 struct timer_list timer; 260 - bool expired; 261 }; 262 263 static const ··· 303 bool ie_valid; 304 }; 305 306 307 const char *uwb_rsv_state_str(enum uwb_rsv_state state); 308 const char *uwb_rsv_type_str(enum uwb_drp_type type); ··· 324 void uwb_rsv_terminate(struct uwb_rsv *rsv); 325 326 void uwb_rsv_accept(struct uwb_rsv *rsv, uwb_rsv_cb_f cb, void *pal_priv); 327 328 /** 329 * Radio Control Interface instance ··· 388 u8 ctx_roll; 389 390 int beaconing; /* Beaconing state [channel number] */ 391 int scanning; 392 enum uwb_scan_type scan_type:3; 393 unsigned ready:1; 394 struct uwb_notifs_chain notifs_chain; 395 396 struct uwb_drp_avail drp_avail; 397 struct list_head reservations; 398 struct mutex rsvs_mutex; 399 struct workqueue_struct *rsv_workq; 400 - struct work_struct rsv_update_work; 401 402 struct mutex ies_mutex; 403 struct uwb_rc_cmd_set_ie *ies; 404 size_t ies_capacity; 405 406 - spinlock_t pal_lock; 407 struct list_head pals; 408 409 struct uwb_dbg *dbg; 410 }; ··· 422 423 /** 424 * struct uwb_pal - a UWB PAL 425 - * @name: descriptive name for this PAL (wushc, wlp, etc.). 426 * @device: a device for the PAL. Used to link the PAL and the radio 427 * controller in sysfs. 428 * @new_rsv: called when a peer requests a reservation (may be NULL if 429 * the PAL cannot accept reservation requests). 430 * 431 * A Protocol Adaptation Layer (PAL) is a user of the WiMedia UWB 432 * radio platform (e.g., WUSB, WLP or Bluetooth UWB AMP). ··· 453 struct list_head node; 454 const char *name; 455 struct device *device; 456 - void (*new_rsv)(struct uwb_rsv *rsv); 457 }; 458 459 void uwb_pal_init(struct uwb_pal *pal); 460 - int uwb_pal_register(struct uwb_rc *rc, struct uwb_pal *pal); 461 - void uwb_pal_unregister(struct uwb_rc *rc, struct uwb_pal *pal); 462 463 /* 464 * General public API ··· 521 struct uwb_rccb *cmd, size_t cmd_size, 522 u8 expected_type, u16 expected_event, 523 struct uwb_rceb **preply); 524 - ssize_t uwb_rc_get_ie(struct uwb_rc *, struct uwb_rc_evt_get_ie **); 525 - int uwb_bg_joined(struct uwb_rc *rc); 526 527 size_t __uwb_addr_print(char *, size_t, const unsigned char *, int); 528 ··· 596 void uwb_rc_neh_grok(struct uwb_rc *, void *, size_t); 597 void uwb_rc_neh_error(struct uwb_rc *, int); 598 void uwb_rc_reset_all(struct uwb_rc *rc); 599 600 /** 601 * uwb_rsv_is_owner - is the owner of this reservation the RC? ··· 609 } 610 611 /** 612 - * Events generated by UWB that can be passed to any listeners 613 * 614 * Higher layers can register callback functions with the radio 615 * controller using uwb_notifs_register(). The radio controller ··· 619 * nodes when an event occurs. 620 */ 621 enum uwb_notifs { 622 - UWB_NOTIF_BG_JOIN = 0, /* radio controller joined a beacon group */ 623 - UWB_NOTIF_BG_LEAVE = 1, /* radio controller left a beacon group */ 624 UWB_NOTIF_ONAIR, 625 UWB_NOTIF_OFFAIR, 626 }; ··· 730 731 /* Information Element handling */ 732 733 - /* For representing the state of writing to a buffer when iterating */ 734 - struct uwb_buf_ctx { 735 - char *buf; 736 - size_t bytes, size; 737 - }; 738 - 739 - typedef int (*uwb_ie_f)(struct uwb_dev *, const struct uwb_ie_hdr *, 740 - size_t, void *); 741 struct uwb_ie_hdr *uwb_ie_next(void **ptr, size_t *len); 742 - ssize_t uwb_ie_for_each(struct uwb_dev *uwb_dev, uwb_ie_f fn, void *data, 743 - const void *buf, size_t size); 744 - int uwb_ie_dump_hex(struct uwb_dev *, const struct uwb_ie_hdr *, 745 - size_t, void *); 746 - int uwb_rc_set_ie(struct uwb_rc *, struct uwb_rc_cmd_set_ie *); 747 - struct uwb_ie_hdr *uwb_ie_next(void **ptr, size_t *len); 748 - 749 750 /* 751 * Transmission statistics
··· 30 #include <linux/device.h> 31 #include <linux/mutex.h> 32 #include <linux/timer.h> 33 + #include <linux/wait.h> 34 #include <linux/workqueue.h> 35 #include <linux/uwb/spec.h> 36 ··· 66 struct uwb_dev_addr dev_addr; 67 int beacon_slot; 68 DECLARE_BITMAP(streams, UWB_NUM_STREAMS); 69 + DECLARE_BITMAP(last_availability_bm, UWB_NUM_MAS); 70 }; 71 #define to_uwb_dev(d) container_of(d, struct uwb_dev, dev) 72 ··· 86 struct mutex mutex; 87 }; 88 89 + /* Beacon cache list */ 90 + struct uwb_beca { 91 + struct list_head list; 92 + size_t entries; 93 + struct mutex mutex; 94 + }; 95 + 96 + /* Event handling thread. */ 97 + struct uwbd { 98 + int pid; 99 + struct task_struct *task; 100 + wait_queue_head_t wq; 101 + struct list_head event_list; 102 + spinlock_t event_list_lock; 103 + }; 104 + 105 /** 106 * struct uwb_mas_bm - a bitmap of all MAS in a superframe 107 * @bm: a bitmap of length #UWB_NUM_MAS 108 */ 109 struct uwb_mas_bm { 110 DECLARE_BITMAP(bm, UWB_NUM_MAS); 111 + DECLARE_BITMAP(unsafe_bm, UWB_NUM_MAS); 112 + int safe; 113 + int unsafe; 114 }; 115 116 /** ··· 117 * FIXME: further target states TBD. 118 */ 119 enum uwb_rsv_state { 120 + UWB_RSV_STATE_NONE = 0, 121 UWB_RSV_STATE_O_INITIATED, 122 UWB_RSV_STATE_O_PENDING, 123 UWB_RSV_STATE_O_MODIFIED, 124 UWB_RSV_STATE_O_ESTABLISHED, 125 + UWB_RSV_STATE_O_TO_BE_MOVED, 126 + UWB_RSV_STATE_O_MOVE_EXPANDING, 127 + UWB_RSV_STATE_O_MOVE_COMBINING, 128 + UWB_RSV_STATE_O_MOVE_REDUCING, 129 UWB_RSV_STATE_T_ACCEPTED, 130 UWB_RSV_STATE_T_DENIED, 131 + UWB_RSV_STATE_T_CONFLICT, 132 UWB_RSV_STATE_T_PENDING, 133 + UWB_RSV_STATE_T_EXPANDING_ACCEPTED, 134 + UWB_RSV_STATE_T_EXPANDING_CONFLICT, 135 + UWB_RSV_STATE_T_EXPANDING_PENDING, 136 + UWB_RSV_STATE_T_EXPANDING_DENIED, 137 + UWB_RSV_STATE_T_RESIZED, 138 139 UWB_RSV_STATE_LAST, 140 }; ··· 147 struct uwb_dev *dev; 148 struct uwb_dev_addr devaddr; 149 }; 150 + }; 151 + 152 + struct uwb_rsv_move { 153 + struct uwb_mas_bm final_mas; 154 + struct uwb_ie_drp *companion_drp_ie; 155 + struct uwb_mas_bm companion_mas; 156 }; 157 158 /* ··· 186 * 187 * @status: negotiation status 188 * @stream: stream index allocated for this reservation 189 + * @tiebreaker: conflict tiebreaker for this reservation 190 * @mas: reserved MAS 191 * @drp_ie: the DRP IE 192 * @ie_valid: true iff the DRP IE matches the reservation parameters ··· 201 struct uwb_rc *rc; 202 struct list_head rc_node; 203 struct list_head pal_node; 204 + struct kref kref; 205 206 struct uwb_dev *owner; 207 struct uwb_rsv_target target; 208 enum uwb_drp_type type; 209 int max_mas; 210 int min_mas; 211 + int max_interval; 212 bool is_multicast; 213 214 uwb_rsv_cb_f callback; 215 void *pal_priv; 216 217 enum uwb_rsv_state state; 218 + bool needs_release_companion_mas; 219 u8 stream; 220 + u8 tiebreaker; 221 struct uwb_mas_bm mas; 222 struct uwb_ie_drp *drp_ie; 223 + struct uwb_rsv_move mv; 224 bool ie_valid; 225 struct timer_list timer; 226 + struct work_struct handle_timeout_work; 227 }; 228 229 static const ··· 261 bool ie_valid; 262 }; 263 264 + struct uwb_drp_backoff_win { 265 + u8 window; 266 + u8 n; 267 + int total_expired; 268 + struct timer_list timer; 269 + bool can_reserve_extra_mases; 270 + }; 271 272 const char *uwb_rsv_state_str(enum uwb_rsv_state state); 273 const char *uwb_rsv_type_str(enum uwb_drp_type type); ··· 275 void uwb_rsv_terminate(struct uwb_rsv *rsv); 276 277 void uwb_rsv_accept(struct uwb_rsv *rsv, uwb_rsv_cb_f cb, void *pal_priv); 278 + 279 + void uwb_rsv_get_usable_mas(struct uwb_rsv *orig_rsv, struct uwb_mas_bm *mas); 280 281 /** 282 * Radio Control Interface instance ··· 337 u8 ctx_roll; 338 339 int beaconing; /* Beaconing state [channel number] */ 340 + int beaconing_forced; 341 int scanning; 342 enum uwb_scan_type scan_type:3; 343 unsigned ready:1; 344 struct uwb_notifs_chain notifs_chain; 345 + struct uwb_beca uwb_beca; 346 347 + struct uwbd uwbd; 348 + 349 + struct uwb_drp_backoff_win bow; 350 struct uwb_drp_avail drp_avail; 351 struct list_head reservations; 352 + struct list_head cnflt_alien_list; 353 + struct uwb_mas_bm cnflt_alien_bitmap; 354 struct mutex rsvs_mutex; 355 + spinlock_t rsvs_lock; 356 struct workqueue_struct *rsv_workq; 357 358 + struct delayed_work rsv_update_work; 359 + struct delayed_work rsv_alien_bp_work; 360 + int set_drp_ie_pending; 361 struct mutex ies_mutex; 362 struct uwb_rc_cmd_set_ie *ies; 363 size_t ies_capacity; 364 365 struct list_head pals; 366 + int active_pals; 367 368 struct uwb_dbg *dbg; 369 }; ··· 361 362 /** 363 * struct uwb_pal - a UWB PAL 364 + * @name: descriptive name for this PAL (wusbhc, wlp, etc.). 365 * @device: a device for the PAL. Used to link the PAL and the radio 366 * controller in sysfs. 367 + * @rc: the radio controller the PAL uses. 368 + * @channel_changed: called when the channel used by the radio changes. 369 + * A channel of -1 means the channel has been stopped. 370 * @new_rsv: called when a peer requests a reservation (may be NULL if 371 * the PAL cannot accept reservation requests). 372 + * @channel: channel being used by the PAL; 0 if the PAL isn't using 373 + * the radio; -1 if the PAL wishes to use the radio but 374 + * cannot. 375 + * @debugfs_dir: a debugfs directory which the PAL can use for its own 376 + * debugfs files. 377 * 378 * A Protocol Adaptation Layer (PAL) is a user of the WiMedia UWB 379 * radio platform (e.g., WUSB, WLP or Bluetooth UWB AMP). ··· 384 struct list_head node; 385 const char *name; 386 struct device *device; 387 + struct uwb_rc *rc; 388 + 389 + void (*channel_changed)(struct uwb_pal *pal, int channel); 390 + void (*new_rsv)(struct uwb_pal *pal, struct uwb_rsv *rsv); 391 + 392 + int channel; 393 + struct dentry *debugfs_dir; 394 }; 395 396 void uwb_pal_init(struct uwb_pal *pal); 397 + int uwb_pal_register(struct uwb_pal *pal); 398 + void uwb_pal_unregister(struct uwb_pal *pal); 399 + 400 + int uwb_radio_start(struct uwb_pal *pal); 401 + void uwb_radio_stop(struct uwb_pal *pal); 402 403 /* 404 * General public API ··· 443 struct uwb_rccb *cmd, size_t cmd_size, 444 u8 expected_type, u16 expected_event, 445 struct uwb_rceb **preply); 446 447 size_t __uwb_addr_print(char *, size_t, const unsigned char *, int); 448 ··· 520 void uwb_rc_neh_grok(struct uwb_rc *, void *, size_t); 521 void uwb_rc_neh_error(struct uwb_rc *, int); 522 void uwb_rc_reset_all(struct uwb_rc *rc); 523 + void uwb_rc_pre_reset(struct uwb_rc *rc); 524 + void uwb_rc_post_reset(struct uwb_rc *rc); 525 526 /** 527 * uwb_rsv_is_owner - is the owner of this reservation the RC? ··· 531 } 532 533 /** 534 + * enum uwb_notifs - UWB events that can be passed to any listeners 535 + * @UWB_NOTIF_ONAIR: a new neighbour has joined the beacon group. 536 + * @UWB_NOTIF_OFFAIR: a neighbour has left the beacon group. 537 * 538 * Higher layers can register callback functions with the radio 539 * controller using uwb_notifs_register(). The radio controller ··· 539 * nodes when an event occurs. 540 */ 541 enum uwb_notifs { 542 UWB_NOTIF_ONAIR, 543 UWB_NOTIF_OFFAIR, 544 }; ··· 652 653 /* Information Element handling */ 654 655 struct uwb_ie_hdr *uwb_ie_next(void **ptr, size_t *len); 656 + int uwb_rc_ie_add(struct uwb_rc *uwb_rc, const struct uwb_ie_hdr *ies, size_t size); 657 + int uwb_rc_ie_rm(struct uwb_rc *uwb_rc, enum uwb_ie element_id); 658 659 /* 660 * Transmission statistics
+12 -1
include/linux/uwb/debug-cmd.h
··· 32 enum uwb_dbg_cmd_type { 33 UWB_DBG_CMD_RSV_ESTABLISH = 1, 34 UWB_DBG_CMD_RSV_TERMINATE = 2, 35 }; 36 37 struct uwb_dbg_cmd_rsv_establish { ··· 43 __u8 type; 44 __u16 max_mas; 45 __u16 min_mas; 46 - __u8 sparsity; 47 }; 48 49 struct uwb_dbg_cmd_rsv_terminate { 50 int index; 51 }; 52 53 struct uwb_dbg_cmd { ··· 60 union { 61 struct uwb_dbg_cmd_rsv_establish rsv_establish; 62 struct uwb_dbg_cmd_rsv_terminate rsv_terminate; 63 }; 64 }; 65
··· 32 enum uwb_dbg_cmd_type { 33 UWB_DBG_CMD_RSV_ESTABLISH = 1, 34 UWB_DBG_CMD_RSV_TERMINATE = 2, 35 + UWB_DBG_CMD_IE_ADD = 3, 36 + UWB_DBG_CMD_IE_RM = 4, 37 + UWB_DBG_CMD_RADIO_START = 5, 38 + UWB_DBG_CMD_RADIO_STOP = 6, 39 }; 40 41 struct uwb_dbg_cmd_rsv_establish { ··· 39 __u8 type; 40 __u16 max_mas; 41 __u16 min_mas; 42 + __u8 max_interval; 43 }; 44 45 struct uwb_dbg_cmd_rsv_terminate { 46 int index; 47 + }; 48 + 49 + struct uwb_dbg_cmd_ie { 50 + __u8 data[128]; 51 + int len; 52 }; 53 54 struct uwb_dbg_cmd { ··· 51 union { 52 struct uwb_dbg_cmd_rsv_establish rsv_establish; 53 struct uwb_dbg_cmd_rsv_terminate rsv_terminate; 54 + struct uwb_dbg_cmd_ie ie_add; 55 + struct uwb_dbg_cmd_ie ie_rm; 56 }; 57 }; 58
-82
include/linux/uwb/debug.h
··· 1 - /* 2 - * Ultra Wide Band 3 - * Debug Support 4 - * 5 - * Copyright (C) 2005-2006 Intel Corporation 6 - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> 7 - * 8 - * This program is free software; you can redistribute it and/or 9 - * modify it under the terms of the GNU General Public License version 10 - * 2 as published by the Free Software Foundation. 11 - * 12 - * This program is distributed in the hope that it will be useful, 13 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 - * GNU General Public License for more details. 16 - * 17 - * You should have received a copy of the GNU General Public License 18 - * along with this program; if not, write to the Free Software 19 - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 20 - * 02110-1301, USA. 21 - * 22 - * 23 - * FIXME: doc 24 - * Invoke like: 25 - * 26 - * #define D_LOCAL 4 27 - * #include <linux/uwb/debug.h> 28 - * 29 - * At the end of your include files. 30 - */ 31 - #include <linux/types.h> 32 - 33 - struct device; 34 - extern void dump_bytes(struct device *dev, const void *_buf, size_t rsize); 35 - 36 - /* Master debug switch; !0 enables, 0 disables */ 37 - #define D_MASTER (!0) 38 - 39 - /* Local (per-file) debug switch; #define before #including */ 40 - #ifndef D_LOCAL 41 - #define D_LOCAL 0 42 - #endif 43 - 44 - #undef __d_printf 45 - #undef d_fnstart 46 - #undef d_fnend 47 - #undef d_printf 48 - #undef d_dump 49 - 50 - #define __d_printf(l, _tag, _dev, f, a...) \ 51 - do { \ 52 - struct device *__dev = (_dev); \ 53 - if (D_MASTER && D_LOCAL >= (l)) { \ 54 - char __head[64] = ""; \ 55 - if (_dev != NULL) { \ 56 - if ((unsigned long)__dev < 4096) \ 57 - printk(KERN_ERR "E: Corrupt dev %p\n", \ 58 - __dev); \ 59 - else \ 60 - snprintf(__head, sizeof(__head), \ 61 - "%s %s: ", \ 62 - dev_driver_string(__dev), \ 63 - __dev->bus_id); \ 64 - } \ 65 - printk(KERN_ERR "%s%s" _tag ": " f, __head, \ 66 - __func__, ## a); \ 67 - } \ 68 - } while (0 && _dev) 69 - 70 - #define d_fnstart(l, _dev, f, a...) \ 71 - __d_printf(l, " FNSTART", _dev, f, ## a) 72 - #define d_fnend(l, _dev, f, a...) \ 73 - __d_printf(l, " FNEND", _dev, f, ## a) 74 - #define d_printf(l, _dev, f, a...) \ 75 - __d_printf(l, "", _dev, f, ## a) 76 - #define d_dump(l, _dev, ptr, size) \ 77 - do { \ 78 - struct device *__dev = _dev; \ 79 - if (D_MASTER && D_LOCAL >= (l)) \ 80 - dump_bytes(__dev, ptr, size); \ 81 - } while (0 && _dev) 82 - #define d_test(l) (D_MASTER && D_LOCAL >= (l))
···
+53
include/linux/uwb/spec.h
··· 59 #define UWB_MAS_PER_ZONE (UWB_NUM_MAS / UWB_NUM_ZONES) 60 61 /* 62 * Number of streams per DRP reservation between a pair of devices. 63 * 64 * [ECMA-368] section 16.8.6. ··· 97 * [ECMA-368] section 17.16 98 */ 99 enum { UWB_MAX_LOST_BEACONS = 3 }; 100 101 /* 102 * Length of a superframe in microseconds. ··· 225 UWB_DRP_REASON_MODIFIED, 226 }; 227 228 /** 229 * DRP Notification Reason Codes (WHCI 0.95 [3.1.4.9]) 230 */ ··· 283 UWB_APP_SPEC_PROBE_IE = 15, 284 UWB_IDENTIFICATION_IE = 19, 285 UWB_MASTER_KEY_ID_IE = 20, 286 UWB_IE_WLP = 250, /* WiMedia Logical Link Control Protocol WLP 0.99 */ 287 UWB_APP_SPEC_IE = 255, 288 }; ··· 396 struct uwb_ie_hdr hdr; 397 DECLARE_BITMAP(bmp, UWB_NUM_MAS); 398 } __attribute__((packed)); 399 400 /** 401 * The Vendor ID is set to an OUI that indicates the vendor of the device.
··· 59 #define UWB_MAS_PER_ZONE (UWB_NUM_MAS / UWB_NUM_ZONES) 60 61 /* 62 + * Number of MAS required before a row can be considered available. 63 + */ 64 + #define UWB_USABLE_MAS_PER_ROW (UWB_NUM_ZONES - 1) 65 + 66 + /* 67 * Number of streams per DRP reservation between a pair of devices. 68 * 69 * [ECMA-368] section 16.8.6. ··· 92 * [ECMA-368] section 17.16 93 */ 94 enum { UWB_MAX_LOST_BEACONS = 3 }; 95 + 96 + /* 97 + * mDRPBackOffWinMin 98 + * 99 + * The minimum number of superframes to wait before trying to reserve 100 + * extra MAS. 101 + * 102 + * [ECMA-368] section 17.16 103 + */ 104 + enum { UWB_DRP_BACKOFF_WIN_MIN = 2 }; 105 + 106 + /* 107 + * mDRPBackOffWinMax 108 + * 109 + * The maximum number of superframes to wait before trying to reserve 110 + * extra MAS. 111 + * 112 + * [ECMA-368] section 17.16 113 + */ 114 + enum { UWB_DRP_BACKOFF_WIN_MAX = 16 }; 115 116 /* 117 * Length of a superframe in microseconds. ··· 200 UWB_DRP_REASON_MODIFIED, 201 }; 202 203 + /** Relinquish Request Reason Codes ([ECMA-368] table 113) */ 204 + enum uwb_relinquish_req_reason { 205 + UWB_RELINQUISH_REQ_REASON_NON_SPECIFIC = 0, 206 + UWB_RELINQUISH_REQ_REASON_OVER_ALLOCATION, 207 + }; 208 + 209 /** 210 * DRP Notification Reason Codes (WHCI 0.95 [3.1.4.9]) 211 */ ··· 252 UWB_APP_SPEC_PROBE_IE = 15, 253 UWB_IDENTIFICATION_IE = 19, 254 UWB_MASTER_KEY_ID_IE = 20, 255 + UWB_RELINQUISH_REQUEST_IE = 21, 256 UWB_IE_WLP = 250, /* WiMedia Logical Link Control Protocol WLP 0.99 */ 257 UWB_APP_SPEC_IE = 255, 258 }; ··· 364 struct uwb_ie_hdr hdr; 365 DECLARE_BITMAP(bmp, UWB_NUM_MAS); 366 } __attribute__((packed)); 367 + 368 + /* Relinqish Request IE ([ECMA-368] section 16.8.19). */ 369 + struct uwb_relinquish_request_ie { 370 + struct uwb_ie_hdr hdr; 371 + __le16 relinquish_req_control; 372 + struct uwb_dev_addr dev_addr; 373 + struct uwb_drp_alloc allocs[]; 374 + } __attribute__((packed)); 375 + 376 + static inline int uwb_ie_relinquish_req_reason_code(struct uwb_relinquish_request_ie *ie) 377 + { 378 + return (le16_to_cpu(ie->relinquish_req_control) >> 0) & 0xf; 379 + } 380 + 381 + static inline void uwb_ie_relinquish_req_set_reason_code(struct uwb_relinquish_request_ie *ie, 382 + int reason_code) 383 + { 384 + u16 ctrl = le16_to_cpu(ie->relinquish_req_control); 385 + ctrl = (ctrl & ~(0xf << 0)) | (reason_code << 0); 386 + ie->relinquish_req_control = cpu_to_le16(ctrl); 387 + } 388 389 /** 390 * The Vendor ID is set to an OUI that indicates the vendor of the device.
+2
include/linux/uwb/umc.h
··· 89 void (*remove)(struct umc_dev *); 90 int (*suspend)(struct umc_dev *, pm_message_t state); 91 int (*resume)(struct umc_dev *); 92 93 struct device_driver driver; 94 };
··· 89 void (*remove)(struct umc_dev *); 90 int (*suspend)(struct umc_dev *, pm_message_t state); 91 int (*resume)(struct umc_dev *); 92 + int (*pre_reset)(struct umc_dev *); 93 + int (*post_reset)(struct umc_dev *); 94 95 struct device_driver driver; 96 };
+2 -1
include/linux/wlp.h
··· 646 struct wlp { 647 struct mutex mutex; 648 struct uwb_rc *rc; /* UWB radio controller */ 649 struct uwb_pal pal; 650 struct wlp_eda eda; 651 struct wlp_uuid uuid; ··· 676 static struct wlp_wss_attribute wss_attr_##_name = __ATTR(_name, _mode, \ 677 _show, _store) 678 679 - extern int wlp_setup(struct wlp *, struct uwb_rc *); 680 extern void wlp_remove(struct wlp *); 681 extern ssize_t wlp_neighborhood_show(struct wlp *, char *); 682 extern int wlp_wss_setup(struct net_device *, struct wlp_wss *);
··· 646 struct wlp { 647 struct mutex mutex; 648 struct uwb_rc *rc; /* UWB radio controller */ 649 + struct net_device *ndev; 650 struct uwb_pal pal; 651 struct wlp_eda eda; 652 struct wlp_uuid uuid; ··· 675 static struct wlp_wss_attribute wss_attr_##_name = __ATTR(_name, _mode, \ 676 _show, _store) 677 678 + extern int wlp_setup(struct wlp *, struct uwb_rc *, struct net_device *ndev); 679 extern void wlp_remove(struct wlp *); 680 extern ssize_t wlp_neighborhood_show(struct wlp *, char *); 681 extern int wlp_wss_setup(struct net_device *, struct wlp_wss *);