Merge branch 'for-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/dvrabel/uwb

* 'for-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/dvrabel/uwb: (31 commits)
uwb: remove beacon cache entry after calling uwb_notify()
uwb: remove unused include/linux/uwb/debug.h
uwb: use print_hex_dump()
uwb: use dev_dbg() for debug messages
uwb: fix memory leak in uwb_rc_notif()
wusb: fix oops when terminating a non-existant reservation
uwb: fix oops when terminating an already terminated reservation
uwb: improved MAS allocator and reservation conflict handling
wusb: add debug files for ASL, PZL and DI to the whci-hcd driver
uwb: fix oops in debug PAL's reservation callback
uwb: clean up whci_wait_for() timeout error message
wusb: whci-hcd shouldn't do ASL/PZL updates while channel is inactive
uwb: remove unused beacon group join/leave events
wlp: start/stop radio on network interface up/down
uwb: add basic radio manager
uwb: add pal parameter to new reservation callback
uwb: fix races between events and neh timers
uwb: don't unbind the radio controller driver when resetting
uwb: per-radio controller event thread and beacon cache
uwb: add commands to add/remove IEs to the debug interface
...

+3252 -2837
+8 -6
Documentation/ABI/testing/sysfs-class-uwb_rc
··· 32 32 Description: 33 33 Write: 34 34 35 - <channel> [<bpst offset>] 35 + <channel> 36 36 37 - to start beaconing on a specific channel, or stop 38 - beaconing if <channel> is -1. Valid channels depends 39 - on the radio controller's supported band groups. 37 + to force a specific channel to be used when beaconing, 38 + or, if <channel> is -1, to prohibit beaconing. If 39 + <channel> is 0, then the default channel selection 40 + algorithm will be used. Valid channels depends on the 41 + radio controller's supported band groups. 40 42 41 - <bpst offset> may be used to try and join a specific 42 - beacon group if more than one was found during a scan. 43 + Reading returns the currently active channel, or -1 if 44 + the radio controller is not beaconing. 43 45 44 46 What: /sys/class/uwb_rc/uwbN/scan 45 47 Date: July 2008
-9
Documentation/usb/wusb-cbaf
··· 80 80 start) 81 81 for dev in ${2:-$hdevs} 82 82 do 83 - uwb_rc=$(readlink -f $dev/uwb_rc) 84 - if cat $uwb_rc/beacon | grep -q -- "-1" 85 - then 86 - echo 13 0 > $uwb_rc/beacon 87 - echo I: started beaconing on ch 13 on $(basename $uwb_rc) >&2 88 - fi 89 83 echo $host_CHID > $dev/wusb_chid 90 84 echo I: started host $(basename $dev) >&2 91 85 done ··· 89 95 do 90 96 echo 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 > $dev/wusb_chid 91 97 echo I: stopped host $(basename $dev) >&2 92 - uwb_rc=$(readlink -f $dev/uwb_rc) 93 - echo -1 | cat > $uwb_rc/beacon 94 - echo I: stopped beaconing on $(basename $uwb_rc) >&2 95 98 done 96 99 ;; 97 100 set-chid)
+55 -104
drivers/usb/host/hwa-hc.c
··· 54 54 * DWA). 55 55 */ 56 56 #include <linux/kernel.h> 57 - #include <linux/version.h> 58 57 #include <linux/init.h> 59 58 #include <linux/module.h> 60 59 #include <linux/workqueue.h> ··· 62 63 #include "../wusbcore/wa-hc.h" 63 64 #include "../wusbcore/wusbhc.h" 64 65 65 - #define D_LOCAL 0 66 - #include <linux/uwb/debug.h> 67 - 68 66 struct hwahc { 69 67 struct wusbhc wusbhc; /* has to be 1st */ 70 68 struct wahc wa; 71 - u8 buffer[16]; /* for misc usb transactions */ 72 69 }; 73 70 74 - /** 71 + /* 75 72 * FIXME should be wusbhc 76 73 * 77 74 * NOTE: we need to cache the Cluster ID because later...there is no ··· 121 126 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); 122 127 struct device *dev = &hwahc->wa.usb_iface->dev; 123 128 124 - d_fnstart(4, dev, "(hwahc %p)\n", hwahc); 125 129 mutex_lock(&wusbhc->mutex); 126 130 wa_nep_disarm(&hwahc->wa); 127 131 result = __wa_set_feature(&hwahc->wa, WA_RESET); ··· 128 134 dev_err(dev, "error commanding HC to reset: %d\n", result); 129 135 goto error_unlock; 130 136 } 131 - d_printf(3, dev, "reset: waiting for device to change state\n"); 132 137 result = __wa_wait_status(&hwahc->wa, WA_STATUS_RESETTING, 0); 133 138 if (result < 0) { 134 139 dev_err(dev, "error waiting for HC to reset: %d\n", result); ··· 135 142 } 136 143 error_unlock: 137 144 mutex_unlock(&wusbhc->mutex); 138 - d_fnend(4, dev, "(hwahc %p) = %d\n", hwahc, result); 139 145 return result; 140 146 } 141 147 ··· 147 155 int result; 148 156 struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); 149 157 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); 150 - struct device *dev = &hwahc->wa.usb_iface->dev; 151 158 152 - /* Set up a Host Info WUSB Information Element */ 153 - d_fnstart(4, dev, "(hwahc %p)\n", hwahc); 154 159 result = -ENOSPC; 155 160 mutex_lock(&wusbhc->mutex); 156 - /* Start the numbering from the top so that the bottom 157 - * range of the unauth addr space is used for devices, 158 - * the top for HCs; use 0xfe - RC# */ 159 161 addr = wusb_cluster_id_get(); 160 162 if (addr == 0) 161 163 goto error_cluster_id_get; ··· 157 171 if (result < 0) 158 172 goto error_set_cluster_id; 159 173 160 - result = wa_nep_arm(&hwahc->wa, GFP_KERNEL); 161 - if (result < 0) { 162 - dev_err(dev, "cannot listen to notifications: %d\n", result); 163 - goto error_stop; 164 - } 165 174 usb_hcd->uses_new_polling = 1; 166 175 usb_hcd->poll_rh = 1; 167 176 usb_hcd->state = HC_STATE_RUNNING; 168 177 result = 0; 169 178 out: 170 179 mutex_unlock(&wusbhc->mutex); 171 - d_fnend(4, dev, "(hwahc %p) = %d\n", hwahc, result); 172 180 return result; 173 181 174 - error_stop: 175 - __wa_stop(&hwahc->wa); 176 182 error_set_cluster_id: 177 183 wusb_cluster_id_put(wusbhc->cluster_id); 178 184 error_cluster_id_get: 179 185 goto out; 180 186 181 - } 182 - 183 - /* 184 - * FIXME: break this function up 185 - */ 186 - static int __hwahc_op_wusbhc_start(struct wusbhc *wusbhc) 187 - { 188 - int result; 189 - struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); 190 - struct device *dev = &hwahc->wa.usb_iface->dev; 191 - 192 - /* Set up a Host Info WUSB Information Element */ 193 - d_fnstart(4, dev, "(hwahc %p)\n", hwahc); 194 - result = -ENOSPC; 195 - 196 - result = __wa_set_feature(&hwahc->wa, WA_ENABLE); 197 - if (result < 0) { 198 - dev_err(dev, "error commanding HC to start: %d\n", result); 199 - goto error_stop; 200 - } 201 - result = __wa_wait_status(&hwahc->wa, WA_ENABLE, WA_ENABLE); 202 - if (result < 0) { 203 - dev_err(dev, "error waiting for HC to start: %d\n", result); 204 - goto error_stop; 205 - } 206 - result = 0; 207 - out: 208 - d_fnend(4, dev, "(hwahc %p) = %d\n", hwahc, result); 209 - return result; 210 - 211 - error_stop: 212 - result = __wa_clear_feature(&hwahc->wa, WA_ENABLE); 213 - goto out; 214 187 } 215 188 216 189 static int hwahc_op_suspend(struct usb_hcd *usb_hcd, pm_message_t msg) ··· 191 246 return -ENOSYS; 192 247 } 193 248 194 - static void __hwahc_op_wusbhc_stop(struct wusbhc *wusbhc) 195 - { 196 - int result; 197 - struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); 198 - struct device *dev = &hwahc->wa.usb_iface->dev; 199 - 200 - d_fnstart(4, dev, "(hwahc %p)\n", hwahc); 201 - /* Nothing for now */ 202 - d_fnend(4, dev, "(hwahc %p) = %d\n", hwahc, result); 203 - return; 204 - } 205 - 206 249 /* 207 250 * No need to abort pipes, as when this is called, all the children 208 251 * has been disconnected and that has done it [through ··· 199 266 */ 200 267 static void hwahc_op_stop(struct usb_hcd *usb_hcd) 201 268 { 202 - int result; 203 269 struct wusbhc *wusbhc = usb_hcd_to_wusbhc(usb_hcd); 204 - struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); 205 - struct wahc *wa = &hwahc->wa; 206 - struct device *dev = &wa->usb_iface->dev; 207 270 208 - d_fnstart(4, dev, "(hwahc %p)\n", hwahc); 209 271 mutex_lock(&wusbhc->mutex); 210 - wusbhc_stop(wusbhc); 211 - wa_nep_disarm(&hwahc->wa); 212 - result = __wa_stop(&hwahc->wa); 213 272 wusb_cluster_id_put(wusbhc->cluster_id); 214 273 mutex_unlock(&wusbhc->mutex); 215 - d_fnend(4, dev, "(hwahc %p) = %d\n", hwahc, result); 216 - return; 217 274 } 218 275 219 276 static int hwahc_op_get_frame_number(struct usb_hcd *usb_hcd) ··· 246 323 struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); 247 324 248 325 rpipe_ep_disable(&hwahc->wa, ep); 326 + } 327 + 328 + static int __hwahc_op_wusbhc_start(struct wusbhc *wusbhc) 329 + { 330 + int result; 331 + struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); 332 + struct device *dev = &hwahc->wa.usb_iface->dev; 333 + 334 + result = __wa_set_feature(&hwahc->wa, WA_ENABLE); 335 + if (result < 0) { 336 + dev_err(dev, "error commanding HC to start: %d\n", result); 337 + goto error_stop; 338 + } 339 + result = __wa_wait_status(&hwahc->wa, WA_ENABLE, WA_ENABLE); 340 + if (result < 0) { 341 + dev_err(dev, "error waiting for HC to start: %d\n", result); 342 + goto error_stop; 343 + } 344 + result = wa_nep_arm(&hwahc->wa, GFP_KERNEL); 345 + if (result < 0) { 346 + dev_err(dev, "cannot listen to notifications: %d\n", result); 347 + goto error_stop; 348 + } 349 + return result; 350 + 351 + error_stop: 352 + __wa_clear_feature(&hwahc->wa, WA_ENABLE); 353 + return result; 354 + } 355 + 356 + static void __hwahc_op_wusbhc_stop(struct wusbhc *wusbhc, int delay) 357 + { 358 + struct hwahc *hwahc = container_of(wusbhc, struct hwahc, wusbhc); 359 + struct wahc *wa = &hwahc->wa; 360 + u8 iface_no = wa->usb_iface->cur_altsetting->desc.bInterfaceNumber; 361 + int ret; 362 + 363 + ret = usb_control_msg(wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), 364 + WUSB_REQ_CHAN_STOP, 365 + USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, 366 + delay * 1000, 367 + iface_no, 368 + NULL, 0, 1000 /* FIXME: arbitrary */); 369 + if (ret == 0) 370 + msleep(delay); 371 + 372 + wa_nep_disarm(&hwahc->wa); 373 + __wa_stop(&hwahc->wa); 249 374 } 250 375 251 376 /* ··· 552 581 itr_size = le16_to_cpu(usb_dev->actconfig->desc.wTotalLength); 553 582 while (itr_size >= sizeof(*hdr)) { 554 583 hdr = (struct usb_descriptor_header *) itr; 555 - d_printf(3, dev, "Extra device descriptor: " 556 - "type %02x/%u bytes @ %zu (%zu left)\n", 557 - hdr->bDescriptorType, hdr->bLength, 558 - (itr - usb_dev->rawdescriptors[actconfig_idx]), 559 - itr_size); 584 + dev_dbg(dev, "Extra device descriptor: " 585 + "type %02x/%u bytes @ %zu (%zu left)\n", 586 + hdr->bDescriptorType, hdr->bLength, 587 + (itr - usb_dev->rawdescriptors[actconfig_idx]), 588 + itr_size); 560 589 if (hdr->bDescriptorType == USB_DT_WIRE_ADAPTER) 561 590 goto found; 562 591 itr += hdr->bLength; ··· 765 794 { 766 795 struct wusbhc *wusbhc = &hwahc->wusbhc; 767 796 768 - d_fnstart(1, NULL, "(hwahc %p)\n", hwahc); 769 797 mutex_lock(&wusbhc->mutex); 770 798 __wa_destroy(&hwahc->wa); 771 799 wusbhc_destroy(&hwahc->wusbhc); ··· 774 804 usb_put_intf(hwahc->wa.usb_iface); 775 805 usb_put_dev(hwahc->wa.usb_dev); 776 806 mutex_unlock(&wusbhc->mutex); 777 - d_fnend(1, NULL, "(hwahc %p) = void\n", hwahc); 778 807 } 779 808 780 809 static void hwahc_init(struct hwahc *hwahc) ··· 790 821 struct hwahc *hwahc; 791 822 struct device *dev = &usb_iface->dev; 792 823 793 - d_fnstart(4, dev, "(%p, %p)\n", usb_iface, id); 794 824 result = -ENOMEM; 795 825 usb_hcd = usb_create_hcd(&hwahc_hc_driver, &usb_iface->dev, "wusb-hwa"); 796 826 if (usb_hcd == NULL) { ··· 816 848 dev_err(dev, "Cannot setup phase B of WUSBHC: %d\n", result); 817 849 goto error_wusbhc_b_create; 818 850 } 819 - d_fnend(4, dev, "(%p, %p) = 0\n", usb_iface, id); 820 851 return 0; 821 852 822 853 error_wusbhc_b_create: ··· 825 858 error_hwahc_create: 826 859 usb_put_hcd(usb_hcd); 827 860 error_alloc: 828 - d_fnend(4, dev, "(%p, %p) = %d\n", usb_iface, id, result); 829 861 return result; 830 862 } 831 863 ··· 838 872 wusbhc = usb_hcd_to_wusbhc(usb_hcd); 839 873 hwahc = container_of(wusbhc, struct hwahc, wusbhc); 840 874 841 - d_fnstart(1, NULL, "(hwahc %p [usb_iface %p])\n", hwahc, usb_iface); 842 875 wusbhc_b_destroy(&hwahc->wusbhc); 843 876 usb_remove_hcd(usb_hcd); 844 877 hwahc_destroy(hwahc); 845 878 usb_put_hcd(usb_hcd); 846 - d_fnend(1, NULL, "(hwahc %p [usb_iface %p]) = void\n", hwahc, 847 - usb_iface); 848 879 } 849 880 850 - /** USB device ID's that we handle */ 851 881 static struct usb_device_id hwahc_id_table[] = { 852 882 /* FIXME: use class labels for this */ 853 883 { USB_INTERFACE_INFO(0xe0, 0x02, 0x01), }, ··· 860 898 861 899 static int __init hwahc_driver_init(void) 862 900 { 863 - int result; 864 - result = usb_register(&hwahc_driver); 865 - if (result < 0) { 866 - printk(KERN_ERR "WA-CDS: Cannot register USB driver: %d\n", 867 - result); 868 - goto error_usb_register; 869 - } 870 - return 0; 871 - 872 - error_usb_register: 873 - return result; 874 - 901 + return usb_register(&hwahc_driver); 875 902 } 876 903 module_init(hwahc_driver_init); 877 904
+1
drivers/usb/host/whci/Kbuild
··· 2 2 3 3 whci-hcd-y := \ 4 4 asl.o \ 5 + debug.o \ 5 6 hcd.o \ 6 7 hw.o \ 7 8 init.o \
+18 -28
drivers/usb/host/whci/asl.c
··· 19 19 #include <linux/dma-mapping.h> 20 20 #include <linux/uwb/umc.h> 21 21 #include <linux/usb.h> 22 - #define D_LOCAL 0 23 - #include <linux/uwb/debug.h> 24 22 25 23 #include "../../wusbcore/wusbhc.h" 26 24 27 25 #include "whcd.h" 28 - 29 - #if D_LOCAL >= 4 30 - static void dump_asl(struct whc *whc, const char *tag) 31 - { 32 - struct device *dev = &whc->umc->dev; 33 - struct whc_qset *qset; 34 - 35 - d_printf(4, dev, "ASL %s\n", tag); 36 - 37 - list_for_each_entry(qset, &whc->async_list, list_node) { 38 - dump_qset(qset, dev); 39 - } 40 - } 41 - #else 42 - static inline void dump_asl(struct whc *whc, const char *tag) 43 - { 44 - } 45 - #endif 46 - 47 26 48 27 static void qset_get_next_prev(struct whc *whc, struct whc_qset *qset, 49 28 struct whc_qset **next, struct whc_qset **prev) ··· 158 179 1000, "stop ASL"); 159 180 } 160 181 182 + /** 183 + * asl_update - request an ASL update and wait for the hardware to be synced 184 + * @whc: the WHCI HC 185 + * @wusbcmd: WUSBCMD value to start the update. 186 + * 187 + * If the WUSB HC is inactive (i.e., the ASL is stopped) then the 188 + * update must be skipped as the hardware may not respond to update 189 + * requests. 190 + */ 161 191 void asl_update(struct whc *whc, uint32_t wusbcmd) 162 192 { 163 - whc_write_wusbcmd(whc, wusbcmd, wusbcmd); 164 - wait_event(whc->async_list_wq, 165 - (le_readl(whc->base + WUSBCMD) & WUSBCMD_ASYNC_UPDATED) == 0); 193 + struct wusbhc *wusbhc = &whc->wusbhc; 194 + 195 + mutex_lock(&wusbhc->mutex); 196 + if (wusbhc->active) { 197 + whc_write_wusbcmd(whc, wusbcmd, wusbcmd); 198 + wait_event(whc->async_list_wq, 199 + (le_readl(whc->base + WUSBCMD) & WUSBCMD_ASYNC_UPDATED) == 0); 200 + } 201 + mutex_unlock(&wusbhc->mutex); 166 202 } 167 203 168 204 /** ··· 196 202 197 203 spin_lock_irq(&whc->lock); 198 204 199 - dump_asl(whc, "before processing"); 200 - 201 205 /* 202 206 * Transerve the software list backwards so new qsets can be 203 207 * safely inserted into the ASL without making it non-circular. ··· 208 216 209 217 update |= process_qset(whc, qset); 210 218 } 211 - 212 - dump_asl(whc, "after processing"); 213 219 214 220 spin_unlock_irq(&whc->lock); 215 221
+189
drivers/usb/host/whci/debug.c
··· 1 + /* 2 + * Wireless Host Controller (WHC) debug. 3 + * 4 + * Copyright (C) 2008 Cambridge Silicon Radio Ltd. 5 + * 6 + * This program is free software; you can redistribute it and/or 7 + * modify it under the terms of the GNU General Public License version 8 + * 2 as published by the Free Software Foundation. 9 + * 10 + * This program is distributed in the hope that it will be useful, 11 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 + * GNU General Public License for more details. 14 + * 15 + * You should have received a copy of the GNU General Public License 16 + * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 + */ 18 + #include <linux/kernel.h> 19 + #include <linux/debugfs.h> 20 + #include <linux/seq_file.h> 21 + 22 + #include "../../wusbcore/wusbhc.h" 23 + 24 + #include "whcd.h" 25 + 26 + struct whc_dbg { 27 + struct dentry *di_f; 28 + struct dentry *asl_f; 29 + struct dentry *pzl_f; 30 + }; 31 + 32 + void qset_print(struct seq_file *s, struct whc_qset *qset) 33 + { 34 + struct whc_std *std; 35 + struct urb *urb = NULL; 36 + int i; 37 + 38 + seq_printf(s, "qset %08x\n", (u32)qset->qset_dma); 39 + seq_printf(s, " -> %08x\n", (u32)qset->qh.link); 40 + seq_printf(s, " info: %08x %08x %08x\n", 41 + qset->qh.info1, qset->qh.info2, qset->qh.info3); 42 + seq_printf(s, " sts: %04x errs: %d\n", qset->qh.status, qset->qh.err_count); 43 + seq_printf(s, " TD: sts: %08x opts: %08x\n", 44 + qset->qh.overlay.qtd.status, qset->qh.overlay.qtd.options); 45 + 46 + for (i = 0; i < WHCI_QSET_TD_MAX; i++) { 47 + seq_printf(s, " %c%c TD[%d]: sts: %08x opts: %08x ptr: %08x\n", 48 + i == qset->td_start ? 'S' : ' ', 49 + i == qset->td_end ? 'E' : ' ', 50 + i, qset->qtd[i].status, qset->qtd[i].options, 51 + (u32)qset->qtd[i].page_list_ptr); 52 + } 53 + seq_printf(s, " ntds: %d\n", qset->ntds); 54 + list_for_each_entry(std, &qset->stds, list_node) { 55 + if (urb != std->urb) { 56 + urb = std->urb; 57 + seq_printf(s, " urb %p transferred: %d bytes\n", urb, 58 + urb->actual_length); 59 + } 60 + if (std->qtd) 61 + seq_printf(s, " sTD[%td]: %zu bytes @ %08x\n", 62 + std->qtd - &qset->qtd[0], 63 + std->len, std->num_pointers ? 64 + (u32)(std->pl_virt[0].buf_ptr) : (u32)std->dma_addr); 65 + else 66 + seq_printf(s, " sTD[-]: %zd bytes @ %08x\n", 67 + std->len, std->num_pointers ? 68 + (u32)(std->pl_virt[0].buf_ptr) : (u32)std->dma_addr); 69 + } 70 + } 71 + 72 + static int di_print(struct seq_file *s, void *p) 73 + { 74 + struct whc *whc = s->private; 75 + char buf[72]; 76 + int d; 77 + 78 + for (d = 0; d < whc->n_devices; d++) { 79 + struct di_buf_entry *di = &whc->di_buf[d]; 80 + 81 + bitmap_scnprintf(buf, sizeof(buf), 82 + (unsigned long *)di->availability_info, UWB_NUM_MAS); 83 + 84 + seq_printf(s, "DI[%d]\n", d); 85 + seq_printf(s, " availability: %s\n", buf); 86 + seq_printf(s, " %c%c key idx: %d dev addr: %d\n", 87 + (di->addr_sec_info & WHC_DI_SECURE) ? 'S' : ' ', 88 + (di->addr_sec_info & WHC_DI_DISABLE) ? 'D' : ' ', 89 + (di->addr_sec_info & WHC_DI_KEY_IDX_MASK) >> 8, 90 + (di->addr_sec_info & WHC_DI_DEV_ADDR_MASK)); 91 + } 92 + return 0; 93 + } 94 + 95 + static int asl_print(struct seq_file *s, void *p) 96 + { 97 + struct whc *whc = s->private; 98 + struct whc_qset *qset; 99 + 100 + list_for_each_entry(qset, &whc->async_list, list_node) { 101 + qset_print(s, qset); 102 + } 103 + 104 + return 0; 105 + } 106 + 107 + static int pzl_print(struct seq_file *s, void *p) 108 + { 109 + struct whc *whc = s->private; 110 + struct whc_qset *qset; 111 + int period; 112 + 113 + for (period = 0; period < 5; period++) { 114 + seq_printf(s, "Period %d\n", period); 115 + list_for_each_entry(qset, &whc->periodic_list[period], list_node) { 116 + qset_print(s, qset); 117 + } 118 + } 119 + return 0; 120 + } 121 + 122 + static int di_open(struct inode *inode, struct file *file) 123 + { 124 + return single_open(file, di_print, inode->i_private); 125 + } 126 + 127 + static int asl_open(struct inode *inode, struct file *file) 128 + { 129 + return single_open(file, asl_print, inode->i_private); 130 + } 131 + 132 + static int pzl_open(struct inode *inode, struct file *file) 133 + { 134 + return single_open(file, pzl_print, inode->i_private); 135 + } 136 + 137 + static struct file_operations di_fops = { 138 + .open = di_open, 139 + .read = seq_read, 140 + .llseek = seq_lseek, 141 + .release = single_release, 142 + .owner = THIS_MODULE, 143 + }; 144 + 145 + static struct file_operations asl_fops = { 146 + .open = asl_open, 147 + .read = seq_read, 148 + .llseek = seq_lseek, 149 + .release = single_release, 150 + .owner = THIS_MODULE, 151 + }; 152 + 153 + static struct file_operations pzl_fops = { 154 + .open = pzl_open, 155 + .read = seq_read, 156 + .llseek = seq_lseek, 157 + .release = single_release, 158 + .owner = THIS_MODULE, 159 + }; 160 + 161 + void whc_dbg_init(struct whc *whc) 162 + { 163 + if (whc->wusbhc.pal.debugfs_dir == NULL) 164 + return; 165 + 166 + whc->dbg = kzalloc(sizeof(struct whc_dbg), GFP_KERNEL); 167 + if (whc->dbg == NULL) 168 + return; 169 + 170 + whc->dbg->di_f = debugfs_create_file("di", 0444, 171 + whc->wusbhc.pal.debugfs_dir, whc, 172 + &di_fops); 173 + whc->dbg->asl_f = debugfs_create_file("asl", 0444, 174 + whc->wusbhc.pal.debugfs_dir, whc, 175 + &asl_fops); 176 + whc->dbg->pzl_f = debugfs_create_file("pzl", 0444, 177 + whc->wusbhc.pal.debugfs_dir, whc, 178 + &pzl_fops); 179 + } 180 + 181 + void whc_dbg_clean_up(struct whc *whc) 182 + { 183 + if (whc->dbg) { 184 + debugfs_remove(whc->dbg->pzl_f); 185 + debugfs_remove(whc->dbg->asl_f); 186 + debugfs_remove(whc->dbg->di_f); 187 + kfree(whc->dbg); 188 + } 189 + }
+3 -3
drivers/usb/host/whci/hcd.c
··· 15 15 * You should have received a copy of the GNU General Public License 16 16 * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 17 */ 18 - #include <linux/version.h> 19 18 #include <linux/kernel.h> 20 19 #include <linux/init.h> 21 20 #include <linux/uwb/umc.h> ··· 90 91 struct whc *whc = wusbhc_to_whc(wusbhc); 91 92 92 93 mutex_lock(&wusbhc->mutex); 93 - 94 - wusbhc_stop(wusbhc); 95 94 96 95 /* stop HC */ 97 96 le_writel(0, whc->base + WUSBINTR); ··· 273 276 goto error_wusbhc_b_create; 274 277 } 275 278 279 + whc_dbg_init(whc); 280 + 276 281 return 0; 277 282 278 283 error_wusbhc_b_create: ··· 298 299 struct whc *whc = wusbhc_to_whc(wusbhc); 299 300 300 301 if (usb_hcd) { 302 + whc_dbg_clean_up(whc); 301 303 wusbhc_b_destroy(wusbhc); 302 304 usb_remove_hcd(usb_hcd); 303 305 wusbhc_destroy(wusbhc);
+5 -3
drivers/usb/host/whci/hw.c
··· 50 50 unsigned long flags; 51 51 dma_addr_t dma_addr; 52 52 int t; 53 + int ret = 0; 53 54 54 55 mutex_lock(&whc->mutex); 55 56 ··· 62 61 dev_err(&whc->umc->dev, "generic command timeout (%04x/%04x)\n", 63 62 le_readl(whc->base + WUSBGENCMDSTS), 64 63 le_readl(whc->base + WUSBGENCMDPARAMS)); 65 - return -ETIMEDOUT; 64 + ret = -ETIMEDOUT; 65 + goto out; 66 66 } 67 67 68 68 if (addr) { ··· 82 80 whc->base + WUSBGENCMDSTS); 83 81 84 82 spin_unlock_irqrestore(&whc->lock, flags); 85 - 83 + out: 86 84 mutex_unlock(&whc->mutex); 87 85 88 - return 0; 86 + return ret; 89 87 }
-1
drivers/usb/host/whci/int.c
··· 15 15 * You should have received a copy of the GNU General Public License 16 16 * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 17 */ 18 - #include <linux/version.h> 19 18 #include <linux/kernel.h> 20 19 #include <linux/init.h> 21 20 #include <linux/uwb/umc.h>
+18 -31
drivers/usb/host/whci/pzl.c
··· 19 19 #include <linux/dma-mapping.h> 20 20 #include <linux/uwb/umc.h> 21 21 #include <linux/usb.h> 22 - #define D_LOCAL 0 23 - #include <linux/uwb/debug.h> 24 22 25 23 #include "../../wusbcore/wusbhc.h" 26 24 27 25 #include "whcd.h" 28 - 29 - #if D_LOCAL >= 4 30 - static void dump_pzl(struct whc *whc, const char *tag) 31 - { 32 - struct device *dev = &whc->umc->dev; 33 - struct whc_qset *qset; 34 - int period = 0; 35 - 36 - d_printf(4, dev, "PZL %s\n", tag); 37 - 38 - for (period = 0; period < 5; period++) { 39 - d_printf(4, dev, "Period %d\n", period); 40 - list_for_each_entry(qset, &whc->periodic_list[period], list_node) { 41 - dump_qset(qset, dev); 42 - } 43 - } 44 - } 45 - #else 46 - static inline void dump_pzl(struct whc *whc, const char *tag) 47 - { 48 - } 49 - #endif 50 26 51 27 static void update_pzl_pointers(struct whc *whc, int period, u64 addr) 52 28 { ··· 171 195 1000, "stop PZL"); 172 196 } 173 197 198 + /** 199 + * pzl_update - request a PZL update and wait for the hardware to be synced 200 + * @whc: the WHCI HC 201 + * @wusbcmd: WUSBCMD value to start the update. 202 + * 203 + * If the WUSB HC is inactive (i.e., the PZL is stopped) then the 204 + * update must be skipped as the hardware may not respond to update 205 + * requests. 206 + */ 174 207 void pzl_update(struct whc *whc, uint32_t wusbcmd) 175 208 { 176 - whc_write_wusbcmd(whc, wusbcmd, wusbcmd); 177 - wait_event(whc->periodic_list_wq, 178 - (le_readl(whc->base + WUSBCMD) & WUSBCMD_PERIODIC_UPDATED) == 0); 209 + struct wusbhc *wusbhc = &whc->wusbhc; 210 + 211 + mutex_lock(&wusbhc->mutex); 212 + if (wusbhc->active) { 213 + whc_write_wusbcmd(whc, wusbcmd, wusbcmd); 214 + wait_event(whc->periodic_list_wq, 215 + (le_readl(whc->base + WUSBCMD) & WUSBCMD_PERIODIC_UPDATED) == 0); 216 + } 217 + mutex_unlock(&wusbhc->mutex); 179 218 } 180 219 181 220 static void update_pzl_hw_view(struct whc *whc) ··· 226 235 227 236 spin_lock_irq(&whc->lock); 228 237 229 - dump_pzl(whc, "before processing"); 230 - 231 238 for (period = 4; period >= 0; period--) { 232 239 list_for_each_entry_safe(qset, t, &whc->periodic_list[period], list_node) { 233 240 if (!qset->in_hw_list) ··· 236 247 237 248 if (update & (WHC_UPDATE_ADDED | WHC_UPDATE_REMOVED)) 238 249 update_pzl_hw_view(whc); 239 - 240 - dump_pzl(whc, "after processing"); 241 250 242 251 spin_unlock_irq(&whc->lock); 243 252
-40
drivers/usb/host/whci/qset.c
··· 24 24 25 25 #include "whcd.h" 26 26 27 - void dump_qset(struct whc_qset *qset, struct device *dev) 28 - { 29 - struct whc_std *std; 30 - struct urb *urb = NULL; 31 - int i; 32 - 33 - dev_dbg(dev, "qset %08x\n", (u32)qset->qset_dma); 34 - dev_dbg(dev, " -> %08x\n", (u32)qset->qh.link); 35 - dev_dbg(dev, " info: %08x %08x %08x\n", 36 - qset->qh.info1, qset->qh.info2, qset->qh.info3); 37 - dev_dbg(dev, " sts: %04x errs: %d\n", qset->qh.status, qset->qh.err_count); 38 - dev_dbg(dev, " TD: sts: %08x opts: %08x\n", 39 - qset->qh.overlay.qtd.status, qset->qh.overlay.qtd.options); 40 - 41 - for (i = 0; i < WHCI_QSET_TD_MAX; i++) { 42 - dev_dbg(dev, " %c%c TD[%d]: sts: %08x opts: %08x ptr: %08x\n", 43 - i == qset->td_start ? 'S' : ' ', 44 - i == qset->td_end ? 'E' : ' ', 45 - i, qset->qtd[i].status, qset->qtd[i].options, 46 - (u32)qset->qtd[i].page_list_ptr); 47 - } 48 - dev_dbg(dev, " ntds: %d\n", qset->ntds); 49 - list_for_each_entry(std, &qset->stds, list_node) { 50 - if (urb != std->urb) { 51 - urb = std->urb; 52 - dev_dbg(dev, " urb %p transferred: %d bytes\n", urb, 53 - urb->actual_length); 54 - } 55 - if (std->qtd) 56 - dev_dbg(dev, " sTD[%td]: %zu bytes @ %08x\n", 57 - std->qtd - &qset->qtd[0], 58 - std->len, std->num_pointers ? 59 - (u32)(std->pl_virt[0].buf_ptr) : (u32)std->dma_addr); 60 - else 61 - dev_dbg(dev, " sTD[-]: %zd bytes @ %08x\n", 62 - std->len, std->num_pointers ? 63 - (u32)(std->pl_virt[0].buf_ptr) : (u32)std->dma_addr); 64 - } 65 - } 66 - 67 27 struct whc_qset *qset_alloc(struct whc *whc, gfp_t mem_flags) 68 28 { 69 29 struct whc_qset *qset;
+9 -2
drivers/usb/host/whci/whcd.h
··· 21 21 #define __WHCD_H 22 22 23 23 #include <linux/uwb/whci.h> 24 + #include <linux/uwb/umc.h> 24 25 #include <linux/workqueue.h> 25 26 26 27 #include "whci-hc.h" ··· 29 28 /* Generic command timeout. */ 30 29 #define WHC_GENCMD_TIMEOUT_MS 100 31 30 31 + struct whc_dbg; 32 32 33 33 struct whc { 34 34 struct wusbhc wusbhc; ··· 71 69 struct list_head periodic_removed_list; 72 70 wait_queue_head_t periodic_list_wq; 73 71 struct work_struct periodic_work; 72 + 73 + struct whc_dbg *dbg; 74 74 }; 75 75 76 76 #define wusbhc_to_whc(w) (container_of((w), struct whc, wusbhc)) ··· 140 136 141 137 /* wusb.c */ 142 138 int whc_wusbhc_start(struct wusbhc *wusbhc); 143 - void whc_wusbhc_stop(struct wusbhc *wusbhc); 139 + void whc_wusbhc_stop(struct wusbhc *wusbhc, int delay); 144 140 int whc_mmcie_add(struct wusbhc *wusbhc, u8 interval, u8 repeat_cnt, 145 141 u8 handle, struct wuie_hdr *wuie); 146 142 int whc_mmcie_rm(struct wusbhc *wusbhc, u8 handle); ··· 194 190 struct whc_qtd *qtd); 195 191 enum whc_update qset_add_qtds(struct whc *whc, struct whc_qset *qset); 196 192 void qset_remove_complete(struct whc *whc, struct whc_qset *qset); 197 - void dump_qset(struct whc_qset *qset, struct device *dev); 198 193 void pzl_update(struct whc *whc, uint32_t wusbcmd); 199 194 void asl_update(struct whc *whc, uint32_t wusbcmd); 195 + 196 + /* debug.c */ 197 + void whc_dbg_init(struct whc *whc); 198 + void whc_dbg_clean_up(struct whc *whc); 200 199 201 200 #endif /* #ifndef __WHCD_H */
+2
drivers/usb/host/whci/whci-hc.h
··· 410 410 # define WUSBDNTSCTRL_SLOTS(s) ((s) << 0) 411 411 412 412 #define WUSBTIME 0x68 413 + # define WUSBTIME_CHANNEL_TIME_MASK 0x00ffffff 414 + 413 415 #define WUSBBPST 0x6c 414 416 #define WUSBDIBUPDATED 0x70 415 417
+12 -31
drivers/usb/host/whci/wusb.c
··· 15 15 * You should have received a copy of the GNU General Public License 16 16 * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 17 */ 18 - #include <linux/version.h> 19 18 #include <linux/kernel.h> 20 19 #include <linux/init.h> 21 20 #include <linux/uwb/umc.h> 22 - #define D_LOCAL 1 23 - #include <linux/uwb/debug.h> 24 21 25 22 #include "../../wusbcore/wusbhc.h" 26 23 27 24 #include "whcd.h" 28 25 29 - #if D_LOCAL >= 1 30 - static void dump_di(struct whc *whc, int idx) 31 - { 32 - struct di_buf_entry *di = &whc->di_buf[idx]; 33 - struct device *dev = &whc->umc->dev; 34 - char buf[128]; 35 - 36 - bitmap_scnprintf(buf, sizeof(buf), (unsigned long *)di->availability_info, UWB_NUM_MAS); 37 - 38 - d_printf(1, dev, "DI[%d]\n", idx); 39 - d_printf(1, dev, " availability: %s\n", buf); 40 - d_printf(1, dev, " %c%c key idx: %d dev addr: %d\n", 41 - (di->addr_sec_info & WHC_DI_SECURE) ? 'S' : ' ', 42 - (di->addr_sec_info & WHC_DI_DISABLE) ? 'D' : ' ', 43 - (di->addr_sec_info & WHC_DI_KEY_IDX_MASK) >> 8, 44 - (di->addr_sec_info & WHC_DI_DEV_ADDR_MASK)); 45 - } 46 - #else 47 - static inline void dump_di(struct whc *whc, int idx) 48 - { 49 - } 50 - #endif 51 - 52 26 static int whc_update_di(struct whc *whc, int idx) 53 27 { 54 28 int offset = idx / 32; 55 29 u32 bit = 1 << (idx % 32); 56 - 57 - dump_di(whc, idx); 58 30 59 31 le_writel(bit, whc->base + WUSBDIBUPDATED + offset); 60 32 ··· 36 64 } 37 65 38 66 /* 39 - * WHCI starts and stops MMCs based on there being a valid GTK so 40 - * these need only start/stop the asynchronous and periodic schedules. 67 + * WHCI starts MMCs based on there being a valid GTK so these need 68 + * only start/stop the asynchronous and periodic schedules and send a 69 + * channel stop command. 41 70 */ 42 71 43 72 int whc_wusbhc_start(struct wusbhc *wusbhc) ··· 51 78 return 0; 52 79 } 53 80 54 - void whc_wusbhc_stop(struct wusbhc *wusbhc) 81 + void whc_wusbhc_stop(struct wusbhc *wusbhc, int delay) 55 82 { 56 83 struct whc *whc = wusbhc_to_whc(wusbhc); 84 + u32 stop_time, now_time; 85 + int ret; 57 86 58 87 pzl_stop(whc); 59 88 asl_stop(whc); 89 + 90 + now_time = le_readl(whc->base + WUSBTIME) & WUSBTIME_CHANNEL_TIME_MASK; 91 + stop_time = (now_time + ((delay * 8) << 7)) & 0x00ffffff; 92 + ret = whc_do_gencmd(whc, WUSBGENCMDSTS_CHAN_STOP, stop_time, NULL, 0); 93 + if (ret == 0) 94 + msleep(delay); 60 95 } 61 96 62 97 int whc_mmcie_add(struct wusbhc *wusbhc, u8 interval, u8 repeat_cnt,
-1
drivers/usb/wusbcore/cbaf.c
··· 88 88 */ 89 89 #include <linux/module.h> 90 90 #include <linux/ctype.h> 91 - #include <linux/version.h> 92 91 #include <linux/usb.h> 93 92 #include <linux/interrupt.h> 94 93 #include <linux/delay.h>
+29 -50
drivers/usb/wusbcore/crypto.c
··· 51 51 #include <linux/uwb.h> 52 52 #include <linux/usb/wusb.h> 53 53 #include <linux/scatterlist.h> 54 - #define D_LOCAL 0 55 - #include <linux/uwb/debug.h> 56 54 55 + static int debug_crypto_verify = 0; 56 + 57 + module_param(debug_crypto_verify, int, 0); 58 + MODULE_PARM_DESC(debug_crypto_verify, "verify the key generation algorithms"); 59 + 60 + static void wusb_key_dump(const void *buf, size_t len) 61 + { 62 + print_hex_dump(KERN_ERR, " ", DUMP_PREFIX_OFFSET, 16, 1, 63 + buf, len, 0); 64 + } 57 65 58 66 /* 59 67 * Block of data, as understood by AES-CCM ··· 211 203 const u8 bzero[16] = { 0 }; 212 204 size_t zero_padding; 213 205 214 - d_fnstart(3, NULL, "(tfm_cbc %p, tfm_aes %p, mic %p, " 215 - "n %p, a %p, b %p, blen %zu)\n", 216 - tfm_cbc, tfm_aes, mic, n, a, b, blen); 217 206 /* 218 207 * These checks should be compile time optimized out 219 208 * ensure @a fills b1's mac_header and following fields ··· 252 247 b1.la = cpu_to_be16(blen + 14); 253 248 memcpy(&b1.mac_header, a, sizeof(*a)); 254 249 255 - d_printf(4, NULL, "I: B0 (%zu bytes)\n", sizeof(b0)); 256 - d_dump(4, NULL, &b0, sizeof(b0)); 257 - d_printf(4, NULL, "I: B1 (%zu bytes)\n", sizeof(b1)); 258 - d_dump(4, NULL, &b1, sizeof(b1)); 259 - d_printf(4, NULL, "I: B (%zu bytes)\n", blen); 260 - d_dump(4, NULL, b, blen); 261 - d_printf(4, NULL, "I: B 0-padding (%zu bytes)\n", zero_padding); 262 - d_printf(4, NULL, "D: IV before crypto (%zu)\n", ivsize); 263 - d_dump(4, NULL, iv, ivsize); 264 - 265 250 sg_init_table(sg, ARRAY_SIZE(sg)); 266 251 sg_set_buf(&sg[0], &b0, sizeof(b0)); 267 252 sg_set_buf(&sg[1], &b1, sizeof(b1)); ··· 268 273 result); 269 274 goto error_cbc_crypt; 270 275 } 271 - d_printf(4, NULL, "D: MIC tag\n"); 272 - d_dump(4, NULL, iv, ivsize); 273 276 274 277 /* Now we crypt the MIC Tag (*iv) with Ax -- values per WUSB1.0[6.5] 275 278 * The procedure is to AES crypt the A0 block and XOR the MIC ··· 282 289 ax.counter = 0; 283 290 crypto_cipher_encrypt_one(tfm_aes, (void *)&ax, (void *)&ax); 284 291 bytewise_xor(mic, &ax, iv, 8); 285 - d_printf(4, NULL, "D: CTR[MIC]\n"); 286 - d_dump(4, NULL, &ax, 8); 287 - d_printf(4, NULL, "D: CCM-MIC tag\n"); 288 - d_dump(4, NULL, mic, 8); 289 292 result = 8; 290 293 error_cbc_crypt: 291 294 kfree(dst_buf); 292 295 error_dst_buf: 293 - d_fnend(3, NULL, "(tfm_cbc %p, tfm_aes %p, mic %p, " 294 - "n %p, a %p, b %p, blen %zu)\n", 295 - tfm_cbc, tfm_aes, mic, n, a, b, blen); 296 296 return result; 297 297 } 298 298 ··· 306 320 struct crypto_cipher *tfm_aes; 307 321 u64 sfn = 0; 308 322 __le64 sfn_le; 309 - 310 - d_fnstart(3, NULL, "(out %p, out_size %zu, key %p, _n %p, " 311 - "a %p, b %p, blen %zu, len %zu)\n", out, out_size, 312 - key, _n, a, b, blen, len); 313 323 314 324 tfm_cbc = crypto_alloc_blkcipher("cbc(aes)", 0, CRYPTO_ALG_ASYNC); 315 325 if (IS_ERR(tfm_cbc)) { ··· 348 366 error_setkey_cbc: 349 367 crypto_free_blkcipher(tfm_cbc); 350 368 error_alloc_cbc: 351 - d_fnend(3, NULL, "(out %p, out_size %zu, key %p, _n %p, " 352 - "a %p, b %p, blen %zu, len %zu) = %d\n", out, out_size, 353 - key, _n, a, b, blen, len, (int)bytes); 354 369 return result; 355 370 } 356 371 ··· 401 422 "mismatch between MIC result and WUSB1.0[A2]\n"); 402 423 hs_size = sizeof(stv_hsmic_hs) - sizeof(stv_hsmic_hs.MIC); 403 424 printk(KERN_ERR "E: Handshake2 in: (%zu bytes)\n", hs_size); 404 - dump_bytes(NULL, &stv_hsmic_hs, hs_size); 425 + wusb_key_dump(&stv_hsmic_hs, hs_size); 405 426 printk(KERN_ERR "E: CCM Nonce in: (%zu bytes)\n", 406 427 sizeof(stv_hsmic_n)); 407 - dump_bytes(NULL, &stv_hsmic_n, sizeof(stv_hsmic_n)); 428 + wusb_key_dump(&stv_hsmic_n, sizeof(stv_hsmic_n)); 408 429 printk(KERN_ERR "E: MIC out:\n"); 409 - dump_bytes(NULL, mic, sizeof(mic)); 430 + wusb_key_dump(mic, sizeof(mic)); 410 431 printk(KERN_ERR "E: MIC out (from WUSB1.0[A.2]):\n"); 411 - dump_bytes(NULL, stv_hsmic_hs.MIC, sizeof(stv_hsmic_hs.MIC)); 432 + wusb_key_dump(stv_hsmic_hs.MIC, sizeof(stv_hsmic_hs.MIC)); 412 433 result = -EINVAL; 413 434 } else 414 435 result = 0; ··· 476 497 printk(KERN_ERR "E: WUSB key derivation test: " 477 498 "mismatch between key derivation result " 478 499 "and WUSB1.0[A1] Errata 2006/12\n"); 479 - printk(KERN_ERR "E: keydvt in: key (%zu bytes)\n", 480 - sizeof(stv_key_a1)); 481 - dump_bytes(NULL, stv_key_a1, sizeof(stv_key_a1)); 482 - printk(KERN_ERR "E: keydvt in: nonce (%zu bytes)\n", 483 - sizeof(stv_keydvt_n_a1)); 484 - dump_bytes(NULL, &stv_keydvt_n_a1, sizeof(stv_keydvt_n_a1)); 485 - printk(KERN_ERR "E: keydvt in: hnonce & dnonce (%zu bytes)\n", 486 - sizeof(stv_keydvt_in_a1)); 487 - dump_bytes(NULL, &stv_keydvt_in_a1, sizeof(stv_keydvt_in_a1)); 500 + printk(KERN_ERR "E: keydvt in: key\n"); 501 + wusb_key_dump(stv_key_a1, sizeof(stv_key_a1)); 502 + printk(KERN_ERR "E: keydvt in: nonce\n"); 503 + wusb_key_dump( &stv_keydvt_n_a1, sizeof(stv_keydvt_n_a1)); 504 + printk(KERN_ERR "E: keydvt in: hnonce & dnonce\n"); 505 + wusb_key_dump(&stv_keydvt_in_a1, sizeof(stv_keydvt_in_a1)); 488 506 printk(KERN_ERR "E: keydvt out: KCK\n"); 489 - dump_bytes(NULL, &keydvt_out.kck, sizeof(keydvt_out.kck)); 507 + wusb_key_dump(&keydvt_out.kck, sizeof(keydvt_out.kck)); 490 508 printk(KERN_ERR "E: keydvt out: PTK\n"); 491 - dump_bytes(NULL, &keydvt_out.ptk, sizeof(keydvt_out.ptk)); 509 + wusb_key_dump(&keydvt_out.ptk, sizeof(keydvt_out.ptk)); 492 510 result = -EINVAL; 493 511 } else 494 512 result = 0; ··· 502 526 { 503 527 int result; 504 528 505 - result = wusb_key_derive_verify(); 506 - if (result < 0) 507 - return result; 508 - return wusb_oob_mic_verify(); 529 + if (debug_crypto_verify) { 530 + result = wusb_key_derive_verify(); 531 + if (result < 0) 532 + return result; 533 + return wusb_oob_mic_verify(); 534 + } 535 + return 0; 509 536 } 510 537 511 538 void wusb_crypto_exit(void)
-4
drivers/usb/wusbcore/dev-sysfs.c
··· 28 28 #include <linux/workqueue.h> 29 29 #include "wusbhc.h" 30 30 31 - #undef D_LOCAL 32 - #define D_LOCAL 4 33 - #include <linux/uwb/debug.h> 34 - 35 31 static ssize_t wusb_disconnect_store(struct device *dev, 36 32 struct device_attribute *attr, 37 33 const char *buf, size_t size)
+27 -204
drivers/usb/wusbcore/devconnect.c
··· 57 57 * Called by notif.c:wusb_handle_dn_connect() 58 58 * when a DN_Connect is received. 59 59 * 60 - * wusbhc_devconnect_auth() Called by rh.c:wusbhc_rh_port_reset() when 61 - * doing the device connect sequence. 62 - * 63 60 * wusb_devconnect_acked() Ack done, release resources. 64 61 * 65 62 * wusb_handle_dn_alive() Called by notif.c:wusb_handle_dn() ··· 65 68 * wusb_handle_dn_disconnect()Called by notif.c:wusb_handle_dn() to 66 69 * process a disconenct request from a 67 70 * device. 68 - * 69 - * wusb_dev_reset() Called by rh.c:wusbhc_rh_port_reset() when 70 - * resetting a device. 71 71 * 72 72 * __wusb_dev_disable() Called by rh.c:wusbhc_rh_clear_port_feat() when 73 73 * disabling a port. ··· 90 96 #include <linux/ctype.h> 91 97 #include <linux/workqueue.h> 92 98 #include "wusbhc.h" 93 - 94 - #undef D_LOCAL 95 - #define D_LOCAL 1 96 - #include <linux/uwb/debug.h> 97 99 98 100 static void wusbhc_devconnect_acked_work(struct work_struct *work); 99 101 ··· 230 240 list_add_tail(&wusb_dev->cack_node, &wusbhc->cack_list); 231 241 wusbhc->cack_count++; 232 242 wusbhc_fill_cack_ie(wusbhc); 243 + 233 244 return wusb_dev; 234 245 } 235 246 ··· 241 250 */ 242 251 static void wusbhc_cack_rm(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) 243 252 { 244 - struct device *dev = wusbhc->dev; 245 - d_fnstart(3, dev, "(wusbhc %p wusb_dev %p)\n", wusbhc, wusb_dev); 246 253 list_del_init(&wusb_dev->cack_node); 247 254 wusbhc->cack_count--; 248 255 wusbhc_fill_cack_ie(wusbhc); 249 - d_fnend(3, dev, "(wusbhc %p wusb_dev %p) = void\n", wusbhc, wusb_dev); 250 256 } 251 257 252 258 /* ··· 251 263 static 252 264 void wusbhc_devconnect_acked(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) 253 265 { 254 - struct device *dev = wusbhc->dev; 255 - d_fnstart(3, dev, "(wusbhc %p wusb_dev %p)\n", wusbhc, wusb_dev); 256 266 wusbhc_cack_rm(wusbhc, wusb_dev); 257 267 if (wusbhc->cack_count) 258 268 wusbhc_mmcie_set(wusbhc, 0, 0, &wusbhc->cack_ie.hdr); 259 269 else 260 270 wusbhc_mmcie_rm(wusbhc, &wusbhc->cack_ie.hdr); 261 - d_fnend(3, dev, "(wusbhc %p wusb_dev %p) = void\n", wusbhc, wusb_dev); 262 271 } 263 272 264 273 static void wusbhc_devconnect_acked_work(struct work_struct *work) ··· 305 320 struct wusb_port *port; 306 321 unsigned idx, devnum; 307 322 308 - d_fnstart(3, dev, "(%p, %p, %s)\n", wusbhc, dnc, pr_cdid); 309 323 mutex_lock(&wusbhc->mutex); 310 324 311 325 /* Check we are not handling it already */ ··· 350 366 port->wusb_dev = wusb_dev; 351 367 port->status |= USB_PORT_STAT_CONNECTION; 352 368 port->change |= USB_PORT_STAT_C_CONNECTION; 353 - port->reset_count = 0; 354 369 /* Now the port status changed to connected; khubd will 355 370 * pick the change up and try to reset the port to bring it to 356 371 * the enabled state--so this process returns up to the stack 357 - * and it calls back into wusbhc_rh_port_reset() who will call 358 - * devconnect_auth(). 372 + * and it calls back into wusbhc_rh_port_reset(). 359 373 */ 360 374 error_unlock: 361 375 mutex_unlock(&wusbhc->mutex); 362 - d_fnend(3, dev, "(%p, %p, %s) = void\n", wusbhc, dnc, pr_cdid); 363 376 return; 364 377 365 378 } ··· 379 398 static void __wusbhc_dev_disconnect(struct wusbhc *wusbhc, 380 399 struct wusb_port *port) 381 400 { 382 - struct device *dev = wusbhc->dev; 383 401 struct wusb_dev *wusb_dev = port->wusb_dev; 384 402 385 - d_fnstart(3, dev, "(wusbhc %p, port %p)\n", wusbhc, port); 386 403 port->status &= ~(USB_PORT_STAT_CONNECTION | USB_PORT_STAT_ENABLE 387 404 | USB_PORT_STAT_SUSPEND | USB_PORT_STAT_RESET 388 405 | USB_PORT_STAT_LOW_SPEED | USB_PORT_STAT_HIGH_SPEED); ··· 392 413 wusb_dev_put(wusb_dev); 393 414 } 394 415 port->wusb_dev = NULL; 395 - /* don't reset the reset_count to zero or wusbhc_rh_port_reset will get 396 - * confused! We only reset to zero when we connect a new device. 397 - */ 398 416 399 417 /* After a device disconnects, change the GTK (see [WUSB] 400 418 * section 6.2.11.2). */ 401 419 wusbhc_gtk_rekey(wusbhc); 402 420 403 - d_fnend(3, dev, "(wusbhc %p, port %p) = void\n", wusbhc, port); 404 421 /* The Wireless USB part has forgotten about the device already; now 405 422 * khubd's timer will pick up the disconnection and remove the USB 406 423 * device from the system 407 424 */ 408 - } 409 - 410 - /* 411 - * Authenticate a device into the WUSB Cluster 412 - * 413 - * Called from the Root Hub code (rh.c:wusbhc_rh_port_reset()) when 414 - * asking for a reset on a port that is not enabled (ie: first connect 415 - * on the port). 416 - * 417 - * Performs the 4way handshake to allow the device to comunicate w/ the 418 - * WUSB Cluster securely; once done, issue a request to the device for 419 - * it to change to address 0. 420 - * 421 - * This mimics the reset step of Wired USB that once resetting a 422 - * device, leaves the port in enabled state and the dev with the 423 - * default address (0). 424 - * 425 - * WUSB1.0[7.1.2] 426 - * 427 - * @port_idx: port where the change happened--This is the index into 428 - * the wusbhc port array, not the USB port number. 429 - */ 430 - int wusbhc_devconnect_auth(struct wusbhc *wusbhc, u8 port_idx) 431 - { 432 - struct device *dev = wusbhc->dev; 433 - struct wusb_port *port = wusb_port_by_idx(wusbhc, port_idx); 434 - 435 - d_fnstart(3, dev, "(%p, %u)\n", wusbhc, port_idx); 436 - port->status &= ~USB_PORT_STAT_RESET; 437 - port->status |= USB_PORT_STAT_ENABLE; 438 - port->change |= USB_PORT_STAT_C_RESET | USB_PORT_STAT_C_ENABLE; 439 - d_fnend(3, dev, "(%p, %u) = 0\n", wusbhc, port_idx); 440 - return 0; 441 425 } 442 426 443 427 /* ··· 470 528 */ 471 529 static void wusbhc_keep_alive_run(struct work_struct *ws) 472 530 { 473 - struct delayed_work *dw = 474 - container_of(ws, struct delayed_work, work); 475 - struct wusbhc *wusbhc = 476 - container_of(dw, struct wusbhc, keep_alive_timer); 531 + struct delayed_work *dw = container_of(ws, struct delayed_work, work); 532 + struct wusbhc *wusbhc = container_of(dw, struct wusbhc, keep_alive_timer); 477 533 478 - d_fnstart(5, wusbhc->dev, "(wusbhc %p)\n", wusbhc); 479 - if (wusbhc->active) { 480 - mutex_lock(&wusbhc->mutex); 481 - __wusbhc_keep_alive(wusbhc); 482 - mutex_unlock(&wusbhc->mutex); 483 - queue_delayed_work(wusbd, &wusbhc->keep_alive_timer, 484 - (wusbhc->trust_timeout * CONFIG_HZ)/1000/2); 485 - } 486 - d_fnend(5, wusbhc->dev, "(wusbhc %p) = void\n", wusbhc); 487 - return; 534 + mutex_lock(&wusbhc->mutex); 535 + __wusbhc_keep_alive(wusbhc); 536 + mutex_unlock(&wusbhc->mutex); 537 + 538 + queue_delayed_work(wusbd, &wusbhc->keep_alive_timer, 539 + msecs_to_jiffies(wusbhc->trust_timeout / 2)); 488 540 } 489 541 490 542 /* ··· 521 585 */ 522 586 static void wusbhc_handle_dn_alive(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) 523 587 { 524 - struct device *dev = wusbhc->dev; 525 - 526 - d_printf(2, dev, "DN ALIVE: device 0x%02x pong\n", wusb_dev->addr); 527 - 528 588 mutex_lock(&wusbhc->mutex); 529 589 wusb_dev->entry_ts = jiffies; 530 590 __wusbhc_keep_alive(wusbhc); ··· 553 621 "no-beacon" 554 622 }; 555 623 556 - d_fnstart(3, dev, "(%p, %p, %zu)\n", wusbhc, dn_hdr, size); 557 624 if (size < sizeof(*dnc)) { 558 625 dev_err(dev, "DN CONNECT: short notification (%zu < %zu)\n", 559 626 size, sizeof(*dnc)); 560 - goto out; 627 + return; 561 628 } 562 629 563 630 dnc = container_of(dn_hdr, struct wusb_dn_connect, hdr); ··· 568 637 wusb_dn_connect_new_connection(dnc) ? "connect" : "reconnect"); 569 638 /* ACK the connect */ 570 639 wusbhc_devconnect_ack(wusbhc, dnc, pr_cdid); 571 - out: 572 - d_fnend(3, dev, "(%p, %p, %zu) = void\n", 573 - wusbhc, dn_hdr, size); 574 - return; 575 640 } 576 641 577 642 /* ··· 586 659 mutex_lock(&wusbhc->mutex); 587 660 __wusbhc_dev_disconnect(wusbhc, wusb_port_by_idx(wusbhc, wusb_dev->port_idx)); 588 661 mutex_unlock(&wusbhc->mutex); 589 - } 590 - 591 - /* 592 - * Reset a WUSB device on a HWA 593 - * 594 - * @wusbhc 595 - * @port_idx Index of the port where the device is 596 - * 597 - * In Wireless USB, a reset is more or less equivalent to a full 598 - * disconnect; so we just do a full disconnect and send the device a 599 - * Device Reset IE (WUSB1.0[7.5.11]) giving it a few millisecs (6 MMCs). 600 - * 601 - * @wusbhc should be refcounted and unlocked 602 - */ 603 - int wusbhc_dev_reset(struct wusbhc *wusbhc, u8 port_idx) 604 - { 605 - int result; 606 - struct device *dev = wusbhc->dev; 607 - struct wusb_dev *wusb_dev; 608 - struct wuie_reset *ie; 609 - 610 - d_fnstart(3, dev, "(%p, %u)\n", wusbhc, port_idx); 611 - mutex_lock(&wusbhc->mutex); 612 - result = 0; 613 - wusb_dev = wusb_port_by_idx(wusbhc, port_idx)->wusb_dev; 614 - if (wusb_dev == NULL) { 615 - /* reset no device? ignore */ 616 - dev_dbg(dev, "RESET: no device at port %u, ignoring\n", 617 - port_idx); 618 - goto error_unlock; 619 - } 620 - result = -ENOMEM; 621 - ie = kzalloc(sizeof(*ie), GFP_KERNEL); 622 - if (ie == NULL) 623 - goto error_unlock; 624 - ie->hdr.bLength = sizeof(ie->hdr) + sizeof(ie->CDID); 625 - ie->hdr.bIEIdentifier = WUIE_ID_RESET_DEVICE; 626 - ie->CDID = wusb_dev->cdid; 627 - result = wusbhc_mmcie_set(wusbhc, 0xff, 6, &ie->hdr); 628 - if (result < 0) { 629 - dev_err(dev, "RESET: cant's set MMC: %d\n", result); 630 - goto error_kfree; 631 - } 632 - __wusbhc_dev_disconnect(wusbhc, wusb_port_by_idx(wusbhc, port_idx)); 633 - 634 - /* 120ms, hopefully 6 MMCs (FIXME) */ 635 - msleep(120); 636 - wusbhc_mmcie_rm(wusbhc, &ie->hdr); 637 - error_kfree: 638 - kfree(ie); 639 - error_unlock: 640 - mutex_unlock(&wusbhc->mutex); 641 - d_fnend(3, dev, "(%p, %u) = %d\n", wusbhc, port_idx, result); 642 - return result; 643 662 } 644 663 645 664 /* ··· 608 735 struct device *dev = wusbhc->dev; 609 736 struct wusb_dev *wusb_dev; 610 737 611 - d_fnstart(3, dev, "(%p, %p)\n", wusbhc, dn_hdr); 612 - 613 738 if (size < sizeof(struct wusb_dn_hdr)) { 614 739 dev_err(dev, "DN data shorter than DN header (%d < %d)\n", 615 740 (int)size, (int)sizeof(struct wusb_dn_hdr)); 616 - goto out; 741 + return; 617 742 } 618 743 619 744 wusb_dev = wusbhc_find_dev_by_addr(wusbhc, srcaddr); 620 745 if (wusb_dev == NULL && dn_hdr->bType != WUSB_DN_CONNECT) { 621 746 dev_dbg(dev, "ignoring DN %d from unconnected device %02x\n", 622 747 dn_hdr->bType, srcaddr); 623 - goto out; 748 + return; 624 749 } 625 750 626 751 switch (dn_hdr->bType) { ··· 643 772 dev_warn(dev, "unknown DN %u (%d octets) from %u\n", 644 773 dn_hdr->bType, (int)size, srcaddr); 645 774 } 646 - out: 647 - d_fnend(3, dev, "(%p, %p) = void\n", wusbhc, dn_hdr); 648 - return; 649 775 } 650 776 EXPORT_SYMBOL_GPL(wusbhc_handle_dn); 651 777 ··· 672 804 struct wusb_dev *wusb_dev; 673 805 struct wuie_disconnect *ie; 674 806 675 - d_fnstart(3, dev, "(%p, %u)\n", wusbhc, port_idx); 676 - result = 0; 677 807 wusb_dev = wusb_port_by_idx(wusbhc, port_idx)->wusb_dev; 678 808 if (wusb_dev == NULL) { 679 809 /* reset no device? ignore */ 680 810 dev_dbg(dev, "DISCONNECT: no device at port %u, ignoring\n", 681 811 port_idx); 682 - goto error; 812 + return; 683 813 } 684 814 __wusbhc_dev_disconnect(wusbhc, wusb_port_by_idx(wusbhc, port_idx)); 685 815 686 - result = -ENOMEM; 687 816 ie = kzalloc(sizeof(*ie), GFP_KERNEL); 688 817 if (ie == NULL) 689 - goto error; 818 + return; 690 819 ie->hdr.bLength = sizeof(*ie); 691 820 ie->hdr.bIEIdentifier = WUIE_ID_DEVICE_DISCONNECT; 692 821 ie->bDeviceAddress = wusb_dev->addr; 693 822 result = wusbhc_mmcie_set(wusbhc, 0, 0, &ie->hdr); 694 - if (result < 0) { 823 + if (result < 0) 695 824 dev_err(dev, "DISCONNECT: can't set MMC: %d\n", result); 696 - goto error_kfree; 825 + else { 826 + /* At least 6 MMCs, assuming at least 1 MMC per zone. */ 827 + msleep(7*4); 828 + wusbhc_mmcie_rm(wusbhc, &ie->hdr); 697 829 } 698 - 699 - /* 120ms, hopefully 6 MMCs */ 700 - msleep(100); 701 - wusbhc_mmcie_rm(wusbhc, &ie->hdr); 702 - error_kfree: 703 830 kfree(ie); 704 - error: 705 - d_fnend(3, dev, "(%p, %u) = %d\n", wusbhc, port_idx, result); 706 - return; 707 - } 708 - 709 - static void wusb_cap_descr_printf(const unsigned level, struct device *dev, 710 - const struct usb_wireless_cap_descriptor *wcd) 711 - { 712 - d_printf(level, dev, 713 - "WUSB Capability Descriptor\n" 714 - " bDevCapabilityType 0x%02x\n" 715 - " bmAttributes 0x%02x\n" 716 - " wPhyRates 0x%04x\n" 717 - " bmTFITXPowerInfo 0x%02x\n" 718 - " bmFFITXPowerInfo 0x%02x\n" 719 - " bmBandGroup 0x%04x\n" 720 - " bReserved 0x%02x\n", 721 - wcd->bDevCapabilityType, 722 - wcd->bmAttributes, 723 - le16_to_cpu(wcd->wPHYRates), 724 - wcd->bmTFITXPowerInfo, 725 - wcd->bmFFITXPowerInfo, 726 - wcd->bmBandGroup, 727 - wcd->bReserved); 728 831 } 729 832 730 833 /* ··· 738 899 } 739 900 cap_size = cap_hdr->bLength; 740 901 cap_type = cap_hdr->bDevCapabilityType; 741 - d_printf(4, dev, "BOS Capability: 0x%02x (%zu bytes)\n", 742 - cap_type, cap_size); 743 902 if (cap_size == 0) 744 903 break; 745 904 if (cap_size > top - itr) { ··· 749 912 result = -EBADF; 750 913 goto error_bad_cap; 751 914 } 752 - d_dump(3, dev, itr, cap_size); 753 915 switch (cap_type) { 754 916 case USB_CAP_TYPE_WIRELESS_USB: 755 917 if (cap_size != sizeof(*wusb_dev->wusb_cap_descr)) ··· 756 920 "descriptor is %zu bytes vs %zu " 757 921 "needed\n", cap_size, 758 922 sizeof(*wusb_dev->wusb_cap_descr)); 759 - else { 923 + else 760 924 wusb_dev->wusb_cap_descr = itr; 761 - wusb_cap_descr_printf(3, dev, itr); 762 - } 763 925 break; 764 926 default: 765 927 dev_err(dev, "BUG? Unknown BOS capability 0x%02x " ··· 822 988 "%zu bytes): %zd\n", desc_size, result); 823 989 goto error_get_descriptor; 824 990 } 825 - d_printf(2, dev, "Got BOS descriptor %zd bytes, %u capabilities\n", 826 - result, bos->bNumDeviceCaps); 827 - d_dump(2, dev, bos, result); 991 + 828 992 result = wusb_dev_bos_grok(usb_dev, wusb_dev, bos, result); 829 993 if (result < 0) 830 994 goto error_bad_bos; ··· 888 1056 if (usb_dev->wusb == 0 || usb_dev->devnum == 1) 889 1057 return; /* skip non wusb and wusb RHs */ 890 1058 891 - d_fnstart(3, dev, "(usb_dev %p)\n", usb_dev); 892 - 893 1059 wusbhc = wusbhc_get_by_usb_dev(usb_dev); 894 1060 if (wusbhc == NULL) 895 1061 goto error_nodev; ··· 917 1087 wusb_dev_put(wusb_dev); 918 1088 wusbhc_put(wusbhc); 919 1089 error_nodev: 920 - d_fnend(3, dev, "(usb_dev %p) = void\n", usb_dev); 921 1090 return; 922 1091 923 1092 wusb_dev_sysfs_rm(wusb_dev); ··· 1003 1174 1004 1175 void wusb_dev_destroy(struct kref *_wusb_dev) 1005 1176 { 1006 - struct wusb_dev *wusb_dev 1007 - = container_of(_wusb_dev, struct wusb_dev, refcnt); 1177 + struct wusb_dev *wusb_dev = container_of(_wusb_dev, struct wusb_dev, refcnt); 1178 + 1008 1179 list_del_init(&wusb_dev->cack_node); 1009 1180 wusb_dev_free(wusb_dev); 1010 - d_fnend(1, NULL, "%s (wusb_dev %p) = void\n", __func__, wusb_dev); 1011 1181 } 1012 1182 EXPORT_SYMBOL_GPL(wusb_dev_destroy); 1013 1183 ··· 1018 1190 */ 1019 1191 int wusbhc_devconnect_create(struct wusbhc *wusbhc) 1020 1192 { 1021 - d_fnstart(3, wusbhc->dev, "(wusbhc %p)\n", wusbhc); 1022 - 1023 1193 wusbhc->keep_alive_ie.hdr.bIEIdentifier = WUIE_ID_KEEP_ALIVE; 1024 1194 wusbhc->keep_alive_ie.hdr.bLength = sizeof(wusbhc->keep_alive_ie.hdr); 1025 1195 INIT_DELAYED_WORK(&wusbhc->keep_alive_timer, wusbhc_keep_alive_run); ··· 1026 1200 wusbhc->cack_ie.hdr.bLength = sizeof(wusbhc->cack_ie.hdr); 1027 1201 INIT_LIST_HEAD(&wusbhc->cack_list); 1028 1202 1029 - d_fnend(3, wusbhc->dev, "(wusbhc %p) = void\n", wusbhc); 1030 1203 return 0; 1031 1204 } 1032 1205 ··· 1034 1209 */ 1035 1210 void wusbhc_devconnect_destroy(struct wusbhc *wusbhc) 1036 1211 { 1037 - d_fnstart(3, wusbhc->dev, "(wusbhc %p)\n", wusbhc); 1038 - d_fnend(3, wusbhc->dev, "(wusbhc %p) = void\n", wusbhc); 1212 + /* no op */ 1039 1213 } 1040 1214 1041 1215 /* ··· 1046 1222 * FIXME: This also enables the keep alives but this is not necessary 1047 1223 * until there are connected and authenticated devices. 1048 1224 */ 1049 - int wusbhc_devconnect_start(struct wusbhc *wusbhc, 1050 - const struct wusb_ckhdid *chid) 1225 + int wusbhc_devconnect_start(struct wusbhc *wusbhc) 1051 1226 { 1052 1227 struct device *dev = wusbhc->dev; 1053 1228 struct wuie_host_info *hi; ··· 1059 1236 hi->hdr.bLength = sizeof(*hi); 1060 1237 hi->hdr.bIEIdentifier = WUIE_ID_HOST_INFO; 1061 1238 hi->attributes = cpu_to_le16((wusbhc->rsv->stream << 3) | WUIE_HI_CAP_ALL); 1062 - hi->CHID = *chid; 1239 + hi->CHID = wusbhc->chid; 1063 1240 result = wusbhc_mmcie_set(wusbhc, 0, 0, &hi->hdr); 1064 1241 if (result < 0) { 1065 1242 dev_err(dev, "Cannot add Host Info MMCIE: %d\n", result);
+41 -77
drivers/usb/wusbcore/mmc.c
··· 159 159 } 160 160 EXPORT_SYMBOL_GPL(wusbhc_mmcie_rm); 161 161 162 + static int wusbhc_mmc_start(struct wusbhc *wusbhc) 163 + { 164 + int ret; 165 + 166 + mutex_lock(&wusbhc->mutex); 167 + ret = wusbhc->start(wusbhc); 168 + if (ret >= 0) 169 + wusbhc->active = 1; 170 + mutex_unlock(&wusbhc->mutex); 171 + 172 + return ret; 173 + } 174 + 175 + static void wusbhc_mmc_stop(struct wusbhc *wusbhc) 176 + { 177 + mutex_lock(&wusbhc->mutex); 178 + wusbhc->active = 0; 179 + wusbhc->stop(wusbhc, WUSB_CHANNEL_STOP_DELAY_MS); 180 + mutex_unlock(&wusbhc->mutex); 181 + } 182 + 162 183 /* 163 184 * wusbhc_start - start transmitting MMCs and accepting connections 164 185 * @wusbhc: the HC to start 165 - * @chid: the CHID to use for this host 166 186 * 167 187 * Establishes a cluster reservation, enables device connections, and 168 188 * starts MMCs with appropriate DNTS parameters. 169 189 */ 170 - int wusbhc_start(struct wusbhc *wusbhc, const struct wusb_ckhdid *chid) 190 + int wusbhc_start(struct wusbhc *wusbhc) 171 191 { 172 192 int result; 173 193 struct device *dev = wusbhc->dev; ··· 201 181 goto error_rsv_establish; 202 182 } 203 183 204 - result = wusbhc_devconnect_start(wusbhc, chid); 184 + result = wusbhc_devconnect_start(wusbhc); 205 185 if (result < 0) { 206 186 dev_err(dev, "error enabling device connections: %d\n", result); 207 187 goto error_devconnect_start; ··· 219 199 dev_err(dev, "Cannot set DNTS parameters: %d\n", result); 220 200 goto error_set_num_dnts; 221 201 } 222 - result = wusbhc->start(wusbhc); 202 + result = wusbhc_mmc_start(wusbhc); 223 203 if (result < 0) { 224 204 dev_err(dev, "error starting wusbch: %d\n", result); 225 205 goto error_wusbhc_start; 226 206 } 227 - wusbhc->active = 1; 207 + 228 208 return 0; 229 209 230 210 error_wusbhc_start: ··· 239 219 } 240 220 241 221 /* 242 - * Disconnect all from the WUSB Channel 243 - * 244 - * Send a Host Disconnect IE in the MMC, wait, don't send it any more 245 - */ 246 - static int __wusbhc_host_disconnect_ie(struct wusbhc *wusbhc) 247 - { 248 - int result = -ENOMEM; 249 - struct wuie_host_disconnect *host_disconnect_ie; 250 - might_sleep(); 251 - host_disconnect_ie = kmalloc(sizeof(*host_disconnect_ie), GFP_KERNEL); 252 - if (host_disconnect_ie == NULL) 253 - goto error_alloc; 254 - host_disconnect_ie->hdr.bLength = sizeof(*host_disconnect_ie); 255 - host_disconnect_ie->hdr.bIEIdentifier = WUIE_ID_HOST_DISCONNECT; 256 - result = wusbhc_mmcie_set(wusbhc, 0, 0, &host_disconnect_ie->hdr); 257 - if (result < 0) 258 - goto error_mmcie_set; 259 - 260 - /* WUSB1.0[8.5.3.1 & 7.5.2] */ 261 - msleep(100); 262 - wusbhc_mmcie_rm(wusbhc, &host_disconnect_ie->hdr); 263 - error_mmcie_set: 264 - kfree(host_disconnect_ie); 265 - error_alloc: 266 - return result; 267 - } 268 - 269 - /* 270 222 * wusbhc_stop - stop transmitting MMCs 271 223 * @wusbhc: the HC to stop 272 224 * 273 - * Send a Host Disconnect IE, wait, remove all the MMCs (stop sending MMCs). 274 - * 275 - * If we can't allocate a Host Stop IE, screw it, we don't notify the 276 - * devices we are disconnecting... 225 + * Stops the WUSB channel and removes the cluster reservation. 277 226 */ 278 227 void wusbhc_stop(struct wusbhc *wusbhc) 279 228 { 280 - if (wusbhc->active) { 281 - wusbhc->active = 0; 282 - wusbhc->stop(wusbhc); 283 - wusbhc_sec_stop(wusbhc); 284 - __wusbhc_host_disconnect_ie(wusbhc); 285 - wusbhc_devconnect_stop(wusbhc); 286 - wusbhc_rsv_terminate(wusbhc); 287 - } 288 - } 289 - EXPORT_SYMBOL_GPL(wusbhc_stop); 290 - 291 - /* 292 - * Change the CHID in a WUSB Channel 293 - * 294 - * If it is just a new CHID, send a Host Disconnect IE and then change 295 - * the CHID IE. 296 - */ 297 - static int __wusbhc_chid_change(struct wusbhc *wusbhc, 298 - const struct wusb_ckhdid *chid) 299 - { 300 - int result = -ENOSYS; 301 - struct device *dev = wusbhc->dev; 302 - dev_err(dev, "%s() not implemented yet\n", __func__); 303 - return result; 304 - 305 - BUG_ON(wusbhc->wuie_host_info == NULL); 306 - __wusbhc_host_disconnect_ie(wusbhc); 307 - wusbhc->wuie_host_info->CHID = *chid; 308 - result = wusbhc_mmcie_set(wusbhc, 0, 0, &wusbhc->wuie_host_info->hdr); 309 - if (result < 0) 310 - dev_err(dev, "Can't update Host Info WUSB IE: %d\n", result); 311 - return result; 229 + wusbhc_mmc_stop(wusbhc); 230 + wusbhc_sec_stop(wusbhc); 231 + wusbhc_devconnect_stop(wusbhc); 232 + wusbhc_rsv_terminate(wusbhc); 312 233 } 313 234 314 235 /* ··· 267 306 chid = NULL; 268 307 269 308 mutex_lock(&wusbhc->mutex); 270 - if (wusbhc->active) { 271 - if (chid) 272 - result = __wusbhc_chid_change(wusbhc, chid); 273 - else 274 - wusbhc_stop(wusbhc); 275 - } else { 276 - if (chid) 277 - wusbhc_start(wusbhc, chid); 309 + if (chid) { 310 + if (wusbhc->active) { 311 + mutex_unlock(&wusbhc->mutex); 312 + return -EBUSY; 313 + } 314 + wusbhc->chid = *chid; 278 315 } 279 316 mutex_unlock(&wusbhc->mutex); 317 + 318 + if (chid) 319 + result = uwb_radio_start(&wusbhc->pal); 320 + else 321 + uwb_radio_stop(&wusbhc->pal); 280 322 return result; 281 323 } 282 324 EXPORT_SYMBOL_GPL(wusbhc_chid_set);
+14 -2
drivers/usb/wusbcore/pal.c
··· 18 18 */ 19 19 #include "wusbhc.h" 20 20 21 + static void wusbhc_channel_changed(struct uwb_pal *pal, int channel) 22 + { 23 + struct wusbhc *wusbhc = container_of(pal, struct wusbhc, pal); 24 + 25 + if (channel < 0) 26 + wusbhc_stop(wusbhc); 27 + else 28 + wusbhc_start(wusbhc); 29 + } 30 + 21 31 /** 22 32 * wusbhc_pal_register - register the WUSB HC as a UWB PAL 23 33 * @wusbhc: the WUSB HC ··· 38 28 39 29 wusbhc->pal.name = "wusbhc"; 40 30 wusbhc->pal.device = wusbhc->usb_hcd.self.controller; 31 + wusbhc->pal.rc = wusbhc->uwb_rc; 32 + wusbhc->pal.channel_changed = wusbhc_channel_changed; 41 33 42 - return uwb_pal_register(wusbhc->uwb_rc, &wusbhc->pal); 34 + return uwb_pal_register(&wusbhc->pal); 43 35 } 44 36 45 37 /** ··· 50 38 */ 51 39 void wusbhc_pal_unregister(struct wusbhc *wusbhc) 52 40 { 53 - uwb_pal_unregister(wusbhc->uwb_rc, &wusbhc->pal); 41 + uwb_pal_unregister(&wusbhc->pal); 54 42 }
+12 -9
drivers/usb/wusbcore/reservation.c
··· 48 48 { 49 49 struct wusbhc *wusbhc = rsv->pal_priv; 50 50 struct device *dev = wusbhc->dev; 51 + struct uwb_mas_bm mas; 51 52 char buf[72]; 52 53 53 54 switch (rsv->state) { 54 55 case UWB_RSV_STATE_O_ESTABLISHED: 55 - bitmap_scnprintf(buf, sizeof(buf), rsv->mas.bm, UWB_NUM_MAS); 56 + uwb_rsv_get_usable_mas(rsv, &mas); 57 + bitmap_scnprintf(buf, sizeof(buf), mas.bm, UWB_NUM_MAS); 56 58 dev_dbg(dev, "established reservation: %s\n", buf); 57 - wusbhc_bwa_set(wusbhc, rsv->stream, &rsv->mas); 59 + wusbhc_bwa_set(wusbhc, rsv->stream, &mas); 58 60 break; 59 61 case UWB_RSV_STATE_NONE: 60 62 dev_dbg(dev, "removed reservation\n"); 61 63 wusbhc_bwa_set(wusbhc, 0, NULL); 62 - wusbhc->rsv = NULL; 63 64 break; 64 65 default: 65 66 dev_dbg(dev, "unexpected reservation state: %d\n", rsv->state); ··· 87 86 bcid.data[0] = wusbhc->cluster_id; 88 87 bcid.data[1] = 0; 89 88 90 - rsv->owner = &rc->uwb_dev; 91 89 rsv->target.type = UWB_RSV_TARGET_DEVADDR; 92 90 rsv->target.devaddr = bcid; 93 91 rsv->type = UWB_DRP_TYPE_PRIVATE; 94 - rsv->max_mas = 256; 95 - rsv->min_mas = 16; /* one MAS per zone? */ 96 - rsv->sparsity = 16; /* at least one MAS in each zone? */ 92 + rsv->max_mas = 256; /* try to get as much as possible */ 93 + rsv->min_mas = 15; /* one MAS per zone */ 94 + rsv->max_interval = 1; /* max latency is one zone */ 97 95 rsv->is_multicast = true; 98 96 99 97 ret = uwb_rsv_establish(rsv); ··· 105 105 106 106 107 107 /** 108 - * wusbhc_rsv_terminate - terminate any cluster reservation 108 + * wusbhc_rsv_terminate - terminate the cluster reservation 109 109 * @wusbhc: the WUSB host whose reservation is to be terminated 110 110 */ 111 111 void wusbhc_rsv_terminate(struct wusbhc *wusbhc) 112 112 { 113 - if (wusbhc->rsv) 113 + if (wusbhc->rsv) { 114 114 uwb_rsv_terminate(wusbhc->rsv); 115 + uwb_rsv_destroy(wusbhc->rsv); 116 + wusbhc->rsv = NULL; 117 + } 115 118 }
+37 -67
drivers/usb/wusbcore/rh.c
··· 71 71 */ 72 72 #include "wusbhc.h" 73 73 74 - #define D_LOCAL 0 75 - #include <linux/uwb/debug.h> 76 - 77 74 /* 78 75 * Reset a fake port 79 76 * 80 - * This can be called to reset a port from any other state or to reset 81 - * it when connecting. In Wireless USB they are different; when doing 82 - * a new connect that involves going over the authentication. When 83 - * just reseting, its a different story. 77 + * Using a Reset Device IE is too heavyweight as it causes the device 78 + * to enter the UnConnected state and leave the cluster, this can mean 79 + * that when the device reconnects it is connected to a different fake 80 + * port. 84 81 * 85 - * The Linux USB stack resets a port twice before it considers it 86 - * enabled, so we have to detect and ignore that. 82 + * Instead, reset authenticated devices with a SetAddress(0), followed 83 + * by a SetAddresss(AuthAddr). 84 + * 85 + * For unauthenticated devices just pretend to reset but do nothing. 86 + * If the device initialization continues to fail it will eventually 87 + * time out after TrustTimeout and enter the UnConnected state. 87 88 * 88 89 * @wusbhc is assumed referenced and @wusbhc->mutex unlocked. 89 90 * ··· 98 97 { 99 98 int result = 0; 100 99 struct wusb_port *port = wusb_port_by_idx(wusbhc, port_idx); 100 + struct wusb_dev *wusb_dev = port->wusb_dev; 101 101 102 - d_fnstart(3, wusbhc->dev, "(wusbhc %p port_idx %u)\n", 103 - wusbhc, port_idx); 104 - if (port->reset_count == 0) { 105 - wusbhc_devconnect_auth(wusbhc, port_idx); 106 - port->reset_count++; 107 - } else if (port->reset_count == 1) 108 - /* see header */ 109 - d_printf(2, wusbhc->dev, "Ignoring second reset on port_idx " 110 - "%u\n", port_idx); 102 + port->status |= USB_PORT_STAT_RESET; 103 + port->change |= USB_PORT_STAT_C_RESET; 104 + 105 + if (wusb_dev->addr & WUSB_DEV_ADDR_UNAUTH) 106 + result = 0; 111 107 else 112 - result = wusbhc_dev_reset(wusbhc, port_idx); 113 - d_fnend(3, wusbhc->dev, "(wusbhc %p port_idx %u) = %d\n", 114 - wusbhc, port_idx, result); 108 + result = wusb_dev_update_address(wusbhc, wusb_dev); 109 + 110 + port->status &= ~USB_PORT_STAT_RESET; 111 + port->status |= USB_PORT_STAT_ENABLE; 112 + port->change |= USB_PORT_STAT_C_RESET | USB_PORT_STAT_C_ENABLE; 113 + 115 114 return result; 116 115 } 117 116 ··· 139 138 size_t cnt, size; 140 139 unsigned long *buf = (unsigned long *) _buf; 141 140 142 - d_fnstart(1, wusbhc->dev, "(wusbhc %p)\n", wusbhc); 143 141 /* WE DON'T LOCK, see comment */ 144 142 size = wusbhc->ports_max + 1 /* hub bit */; 145 143 size = (size + 8 - 1) / 8; /* round to bytes */ ··· 147 147 set_bit(cnt + 1, buf); 148 148 else 149 149 clear_bit(cnt + 1, buf); 150 - d_fnend(1, wusbhc->dev, "(wusbhc %p) %u, buffer:\n", wusbhc, (int)size); 151 - d_dump(1, wusbhc->dev, _buf, size); 152 150 return size; 153 151 } 154 152 EXPORT_SYMBOL_GPL(wusbhc_rh_status_data); ··· 195 197 static int wusbhc_rh_clear_hub_feat(struct wusbhc *wusbhc, u16 feature) 196 198 { 197 199 int result; 198 - struct device *dev = wusbhc->dev; 199 200 200 - d_fnstart(4, dev, "(%p, feature 0x%04u)\n", wusbhc, feature); 201 201 switch (feature) { 202 202 case C_HUB_LOCAL_POWER: 203 203 /* FIXME: maybe plug bit 0 to the power input status, ··· 207 211 default: 208 212 result = -EPIPE; 209 213 } 210 - d_fnend(4, dev, "(%p, feature 0x%04u), %d\n", wusbhc, feature, result); 211 214 return result; 212 215 } 213 216 ··· 233 238 static int wusbhc_rh_set_port_feat(struct wusbhc *wusbhc, u16 feature, 234 239 u8 selector, u8 port_idx) 235 240 { 236 - int result = -EINVAL; 237 241 struct device *dev = wusbhc->dev; 238 242 239 - d_fnstart(4, dev, "(feat 0x%04u, selector 0x%u, port_idx %d)\n", 240 - feature, selector, port_idx); 241 - 242 243 if (port_idx > wusbhc->ports_max) 243 - goto error; 244 + return -EINVAL; 244 245 245 246 switch (feature) { 246 247 /* According to USB2.0[11.24.2.13]p2, these features ··· 246 255 case USB_PORT_FEAT_C_SUSPEND: 247 256 case USB_PORT_FEAT_C_CONNECTION: 248 257 case USB_PORT_FEAT_C_RESET: 249 - result = 0; 250 - break; 251 - 258 + return 0; 252 259 case USB_PORT_FEAT_POWER: 253 260 /* No such thing, but we fake it works */ 254 261 mutex_lock(&wusbhc->mutex); 255 262 wusb_port_by_idx(wusbhc, port_idx)->status |= USB_PORT_STAT_POWER; 256 263 mutex_unlock(&wusbhc->mutex); 257 - result = 0; 258 - break; 264 + return 0; 259 265 case USB_PORT_FEAT_RESET: 260 - result = wusbhc_rh_port_reset(wusbhc, port_idx); 261 - break; 266 + return wusbhc_rh_port_reset(wusbhc, port_idx); 262 267 case USB_PORT_FEAT_ENABLE: 263 268 case USB_PORT_FEAT_SUSPEND: 264 269 dev_err(dev, "(port_idx %d) set feat %d/%d UNIMPLEMENTED\n", 265 270 port_idx, feature, selector); 266 - result = -ENOSYS; 267 - break; 271 + return -ENOSYS; 268 272 default: 269 273 dev_err(dev, "(port_idx %d) set feat %d/%d UNKNOWN\n", 270 274 port_idx, feature, selector); 271 - result = -EPIPE; 272 - break; 275 + return -EPIPE; 273 276 } 274 - error: 275 - d_fnend(4, dev, "(feat 0x%04u, selector 0x%u, port_idx %d) = %d\n", 276 - feature, selector, port_idx, result); 277 - return result; 277 + 278 + return 0; 278 279 } 279 280 280 281 /* ··· 277 294 static int wusbhc_rh_clear_port_feat(struct wusbhc *wusbhc, u16 feature, 278 295 u8 selector, u8 port_idx) 279 296 { 280 - int result = -EINVAL; 297 + int result = 0; 281 298 struct device *dev = wusbhc->dev; 282 299 283 - d_fnstart(4, dev, "(wusbhc %p feat 0x%04x selector %d port_idx %d)\n", 284 - wusbhc, feature, selector, port_idx); 285 - 286 300 if (port_idx > wusbhc->ports_max) 287 - goto error; 301 + return -EINVAL; 288 302 289 303 mutex_lock(&wusbhc->mutex); 290 - result = 0; 291 304 switch (feature) { 292 305 case USB_PORT_FEAT_POWER: /* fake port always on */ 293 306 /* According to USB2.0[11.24.2.7.1.4], no need to implement? */ ··· 303 324 break; 304 325 case USB_PORT_FEAT_SUSPEND: 305 326 case USB_PORT_FEAT_C_SUSPEND: 306 - case 0xffff: /* ??? FIXME */ 307 327 dev_err(dev, "(port_idx %d) Clear feat %d/%d UNIMPLEMENTED\n", 308 328 port_idx, feature, selector); 309 - /* dump_stack(); */ 310 329 result = -ENOSYS; 311 330 break; 312 331 default: ··· 314 337 break; 315 338 } 316 339 mutex_unlock(&wusbhc->mutex); 317 - error: 318 - d_fnend(4, dev, "(wusbhc %p feat 0x%04x selector %d port_idx %d) = " 319 - "%d\n", wusbhc, feature, selector, port_idx, result); 340 + 320 341 return result; 321 342 } 322 343 ··· 326 351 static int wusbhc_rh_get_port_status(struct wusbhc *wusbhc, u16 port_idx, 327 352 u32 *_buf, u16 wLength) 328 353 { 329 - int result = -EINVAL; 330 354 u16 *buf = (u16 *) _buf; 331 355 332 - d_fnstart(1, wusbhc->dev, "(wusbhc %p port_idx %u wLength %u)\n", 333 - wusbhc, port_idx, wLength); 334 356 if (port_idx > wusbhc->ports_max) 335 - goto error; 357 + return -EINVAL; 358 + 336 359 mutex_lock(&wusbhc->mutex); 337 360 buf[0] = cpu_to_le16(wusb_port_by_idx(wusbhc, port_idx)->status); 338 361 buf[1] = cpu_to_le16(wusb_port_by_idx(wusbhc, port_idx)->change); 339 - result = 0; 340 362 mutex_unlock(&wusbhc->mutex); 341 - error: 342 - d_fnend(1, wusbhc->dev, "(wusbhc %p) = %d, buffer:\n", wusbhc, result); 343 - d_dump(1, wusbhc->dev, _buf, wLength); 344 - return result; 363 + 364 + return 0; 345 365 } 346 366 347 367 /*
+6 -72
drivers/usb/wusbcore/security.c
··· 27 27 #include <linux/random.h> 28 28 #include "wusbhc.h" 29 29 30 - /* 31 - * DEBUG & SECURITY WARNING!!!! 32 - * 33 - * If you enable this past 1, the debug code will weaken the 34 - * cryptographic safety of the system (on purpose, for debugging). 35 - * 36 - * Weaken means: 37 - * we print secret keys and intermediate values all the way, 38 - */ 39 - #undef D_LOCAL 40 - #define D_LOCAL 2 41 - #include <linux/uwb/debug.h> 42 - 43 30 static void wusbhc_set_gtk_callback(struct urb *urb); 44 31 static void wusbhc_gtk_rekey_done_work(struct work_struct *work); 45 32 ··· 206 219 const void *itr, *top; 207 220 char buf[64]; 208 221 209 - d_fnstart(3, dev, "(usb_dev %p, wusb_dev %p)\n", usb_dev, wusb_dev); 210 222 result = usb_get_descriptor(usb_dev, USB_DT_SECURITY, 211 223 0, &secd, sizeof(secd)); 212 224 if (result < sizeof(secd)) { ··· 214 228 goto error_secd; 215 229 } 216 230 secd_size = le16_to_cpu(secd.wTotalLength); 217 - d_printf(5, dev, "got %d bytes of sec descriptor, total is %d\n", 218 - result, secd_size); 219 231 secd_buf = kmalloc(secd_size, GFP_KERNEL); 220 232 if (secd_buf == NULL) { 221 233 dev_err(dev, "Can't allocate space for security descriptors\n"); ··· 226 242 "not enough data: %d\n", result); 227 243 goto error_secd_all; 228 244 } 229 - d_printf(5, dev, "got %d bytes of sec descriptors\n", result); 230 245 bytes = 0; 231 246 itr = secd_buf + sizeof(secd); 232 247 top = secd_buf + result; ··· 262 279 goto error_no_ccm1; 263 280 } 264 281 wusb_dev->ccm1_etd = *ccm1_etd; 265 - dev_info(dev, "supported encryption: %s; using %s (0x%02x/%02x)\n", 266 - buf, wusb_et_name(ccm1_etd->bEncryptionType), 267 - ccm1_etd->bEncryptionValue, ccm1_etd->bAuthKeyIndex); 282 + dev_dbg(dev, "supported encryption: %s; using %s (0x%02x/%02x)\n", 283 + buf, wusb_et_name(ccm1_etd->bEncryptionType), 284 + ccm1_etd->bEncryptionValue, ccm1_etd->bAuthKeyIndex); 268 285 result = 0; 269 286 kfree(secd_buf); 270 287 out: 271 - d_fnend(3, dev, "(usb_dev %p, wusb_dev %p) = %d\n", 272 - usb_dev, wusb_dev, result); 273 288 return result; 274 289 275 290 ··· 284 303 /* Nothing so far */ 285 304 } 286 305 287 - static void hs_printk(unsigned level, struct device *dev, 288 - struct usb_handshake *hs) 289 - { 290 - d_printf(level, dev, 291 - " bMessageNumber: %u\n" 292 - " bStatus: %u\n" 293 - " tTKID: %02x %02x %02x\n" 294 - " CDID: %02x %02x %02x %02x %02x %02x %02x %02x\n" 295 - " %02x %02x %02x %02x %02x %02x %02x %02x\n" 296 - " nonce: %02x %02x %02x %02x %02x %02x %02x %02x\n" 297 - " %02x %02x %02x %02x %02x %02x %02x %02x\n" 298 - " MIC: %02x %02x %02x %02x %02x %02x %02x %02x\n", 299 - hs->bMessageNumber, hs->bStatus, 300 - hs->tTKID[2], hs->tTKID[1], hs->tTKID[0], 301 - hs->CDID[0], hs->CDID[1], hs->CDID[2], hs->CDID[3], 302 - hs->CDID[4], hs->CDID[5], hs->CDID[6], hs->CDID[7], 303 - hs->CDID[8], hs->CDID[9], hs->CDID[10], hs->CDID[11], 304 - hs->CDID[12], hs->CDID[13], hs->CDID[14], hs->CDID[15], 305 - hs->nonce[0], hs->nonce[1], hs->nonce[2], hs->nonce[3], 306 - hs->nonce[4], hs->nonce[5], hs->nonce[6], hs->nonce[7], 307 - hs->nonce[8], hs->nonce[9], hs->nonce[10], hs->nonce[11], 308 - hs->nonce[12], hs->nonce[13], hs->nonce[14], hs->nonce[15], 309 - hs->MIC[0], hs->MIC[1], hs->MIC[2], hs->MIC[3], 310 - hs->MIC[4], hs->MIC[5], hs->MIC[6], hs->MIC[7]); 311 - } 312 - 313 306 /** 314 307 * Update the address of an unauthenticated WUSB device 315 308 * ··· 293 338 * Before the device's address (as known by it) was usb_dev->devnum | 294 339 * 0x80 (unauthenticated address). With this we update it to usb_dev->devnum. 295 340 */ 296 - static int wusb_dev_update_address(struct wusbhc *wusbhc, 297 - struct wusb_dev *wusb_dev) 341 + int wusb_dev_update_address(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) 298 342 { 299 343 int result = -ENOMEM; 300 344 struct usb_device *usb_dev = wusb_dev->usb_dev; ··· 376 422 get_random_bytes(&hs[0].nonce, sizeof(hs[0].nonce)); 377 423 memset(hs[0].MIC, 0, sizeof(hs[0].MIC)); /* Per WUSB1.0[T7-22] */ 378 424 379 - d_printf(1, dev, "I: sending hs1:\n"); 380 - hs_printk(2, dev, &hs[0]); 381 - 382 425 result = usb_control_msg( 383 426 usb_dev, usb_sndctrlpipe(usb_dev, 0), 384 427 USB_REQ_SET_HANDSHAKE, ··· 396 445 dev_err(dev, "Handshake2: request failed: %d\n", result); 397 446 goto error_hs2; 398 447 } 399 - d_printf(1, dev, "got HS2:\n"); 400 - hs_printk(2, dev, &hs[1]); 401 448 402 449 result = -EINVAL; 403 450 if (hs[1].bMessageNumber != 2) { ··· 436 487 result); 437 488 goto error_hs2; 438 489 } 439 - d_printf(2, dev, "KCK:\n"); 440 - d_dump(2, dev, keydvt_out.kck, sizeof(keydvt_out.kck)); 441 - d_printf(2, dev, "PTK:\n"); 442 - d_dump(2, dev, keydvt_out.ptk, sizeof(keydvt_out.ptk)); 443 490 444 491 /* Compute MIC and verify it */ 445 492 result = wusb_oob_mic(mic, keydvt_out.kck, &ccm_n, &hs[1]); ··· 445 500 goto error_hs2; 446 501 } 447 502 448 - d_printf(2, dev, "MIC:\n"); 449 - d_dump(2, dev, mic, sizeof(mic)); 450 503 if (memcmp(hs[1].MIC, mic, sizeof(hs[1].MIC))) { 451 504 dev_err(dev, "Handshake2 failed: MIC mismatch\n"); 452 505 goto error_hs2; ··· 464 521 goto error_hs2; 465 522 } 466 523 467 - d_printf(1, dev, "I: sending hs3:\n"); 468 - hs_printk(2, dev, &hs[2]); 469 - 470 524 result = usb_control_msg( 471 525 usb_dev, usb_sndctrlpipe(usb_dev, 0), 472 526 USB_REQ_SET_HANDSHAKE, ··· 474 534 goto error_hs3; 475 535 } 476 536 477 - d_printf(1, dev, "I: turning on encryption on host for device\n"); 478 - d_dump(2, dev, keydvt_out.ptk, sizeof(keydvt_out.ptk)); 479 537 result = wusbhc->set_ptk(wusbhc, wusb_dev->port_idx, tkid, 480 538 keydvt_out.ptk, sizeof(keydvt_out.ptk)); 481 539 if (result < 0) 482 540 goto error_wusbhc_set_ptk; 483 541 484 - d_printf(1, dev, "I: setting a GTK\n"); 485 542 result = wusb_dev_set_gtk(wusbhc, wusb_dev); 486 543 if (result < 0) { 487 544 dev_err(dev, "Set GTK for device: request failed: %d\n", ··· 488 551 489 552 /* Update the device's address from unauth to auth */ 490 553 if (usb_dev->authenticated == 0) { 491 - d_printf(1, dev, "I: updating addres to auth from non-auth\n"); 492 554 result = wusb_dev_update_address(wusbhc, wusb_dev); 493 555 if (result < 0) 494 556 goto error_dev_update_address; 495 557 } 496 558 result = 0; 497 - d_printf(1, dev, "I: 4way handshke done, device authenticated\n"); 559 + dev_info(dev, "device authenticated\n"); 498 560 499 561 error_dev_update_address: 500 562 error_wusbhc_set_gtk: ··· 506 570 memset(&keydvt_in, 0, sizeof(keydvt_in)); 507 571 memset(&ccm_n, 0, sizeof(ccm_n)); 508 572 memset(mic, 0, sizeof(mic)); 509 - if (result < 0) { 510 - /* error path */ 573 + if (result < 0) 511 574 wusb_dev_set_encryption(usb_dev, 0); 512 - } 513 575 error_dev_set_encryption: 514 576 kfree(hs); 515 577 error_kzalloc:
+5 -11
drivers/usb/wusbcore/wa-nep.c
··· 51 51 */ 52 52 #include <linux/workqueue.h> 53 53 #include <linux/ctype.h> 54 - #include <linux/uwb/debug.h> 54 + 55 55 #include "wa-hc.h" 56 56 #include "wusbhc.h" 57 57 ··· 139 139 /* FIXME: unimplemented WA NOTIFs */ 140 140 /* fallthru */ 141 141 default: 142 - if (printk_ratelimit()) { 143 - dev_err(dev, "HWA: unknown notification 0x%x, " 144 - "%zu bytes; discarding\n", 145 - notif_hdr->bNotifyType, 146 - (size_t)notif_hdr->bLength); 147 - dump_bytes(dev, notif_hdr, 16); 148 - } 142 + dev_err(dev, "HWA: unknown notification 0x%x, " 143 + "%zu bytes; discarding\n", 144 + notif_hdr->bNotifyType, 145 + (size_t)notif_hdr->bLength); 149 146 break; 150 147 } 151 148 } ··· 157 160 * discard the data, as this should not happen. 158 161 */ 159 162 exhausted_buffer: 160 - if (!printk_ratelimit()) 161 - goto out; 162 163 dev_warn(dev, "HWA: device sent short notification, " 163 164 "%d bytes missing; discarding %d bytes.\n", 164 165 missing, (int)size); 165 - dump_bytes(dev, itr, size); 166 166 goto out; 167 167 } 168 168
+17 -51
drivers/usb/wusbcore/wa-rpipe.c
··· 60 60 #include <linux/init.h> 61 61 #include <asm/atomic.h> 62 62 #include <linux/bitmap.h> 63 + 63 64 #include "wusbhc.h" 64 65 #include "wa-hc.h" 65 - 66 - #define D_LOCAL 0 67 - #include <linux/uwb/debug.h> 68 - 69 66 70 67 static int __rpipe_get_descr(struct wahc *wa, 71 68 struct usb_rpipe_descriptor *descr, u16 index) ··· 73 76 /* Get the RPIPE descriptor -- we cannot use the usb_get_descriptor() 74 77 * function because the arguments are different. 75 78 */ 76 - d_printf(1, dev, "rpipe %u: get descr\n", index); 77 79 result = usb_control_msg( 78 80 wa->usb_dev, usb_rcvctrlpipe(wa->usb_dev, 0), 79 81 USB_REQ_GET_DESCRIPTOR, ··· 111 115 /* we cannot use the usb_get_descriptor() function because the 112 116 * arguments are different. 113 117 */ 114 - d_printf(1, dev, "rpipe %u: set descr\n", index); 115 118 result = usb_control_msg( 116 119 wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), 117 120 USB_REQ_SET_DESCRIPTOR, ··· 169 174 { 170 175 struct wa_rpipe *rpipe = container_of(_rpipe, struct wa_rpipe, refcnt); 171 176 u8 index = le16_to_cpu(rpipe->descr.wRPipeIndex); 172 - d_fnstart(1, NULL, "(rpipe %p %u)\n", rpipe, index); 177 + 173 178 if (rpipe->ep) 174 179 rpipe->ep->hcpriv = NULL; 175 180 rpipe_put_idx(rpipe->wa, index); 176 181 wa_put(rpipe->wa); 177 182 kfree(rpipe); 178 - d_fnend(1, NULL, "(rpipe %p %u)\n", rpipe, index); 179 183 } 180 184 EXPORT_SYMBOL_GPL(rpipe_destroy); 181 185 ··· 196 202 struct wa_rpipe *rpipe; 197 203 struct device *dev = &wa->usb_iface->dev; 198 204 199 - d_fnstart(3, dev, "(wa %p crs 0x%02x)\n", wa, crs); 200 205 rpipe = kzalloc(sizeof(*rpipe), gfp); 201 206 if (rpipe == NULL) 202 207 return -ENOMEM; ··· 216 223 } 217 224 *prpipe = NULL; 218 225 kfree(rpipe); 219 - d_fnend(3, dev, "(wa %p crs 0x%02x) = -ENXIO\n", wa, crs); 220 226 return -ENXIO; 221 227 222 228 found: 223 229 set_bit(rpipe_idx, wa->rpipe_bm); 224 230 rpipe->wa = wa_get(wa); 225 231 *prpipe = rpipe; 226 - d_fnstart(3, dev, "(wa %p crs 0x%02x) = 0\n", wa, crs); 227 232 return 0; 228 233 } 229 234 ··· 230 239 int result; 231 240 struct device *dev = &wa->usb_iface->dev; 232 241 233 - d_printf(1, dev, "rpipe %u: reset\n", index); 234 242 result = usb_control_msg( 235 243 wa->usb_dev, usb_sndctrlpipe(wa->usb_dev, 0), 236 244 USB_REQ_RPIPE_RESET, ··· 266 276 struct usb_descriptor_header *hdr; 267 277 struct usb_wireless_ep_comp_descriptor *epcd; 268 278 269 - d_fnstart(3, dev, "(ep %p)\n", ep); 270 279 if (ep->desc.bEndpointAddress == 0) { 271 280 epcd = &epc0; 272 281 goto out; ··· 299 310 itr_size -= hdr->bDescriptorType; 300 311 } 301 312 out: 302 - d_fnend(3, dev, "(ep %p) = %p\n", ep, epcd); 303 313 return epcd; 304 314 } 305 315 ··· 317 329 struct usb_wireless_ep_comp_descriptor *epcd; 318 330 u8 unauth; 319 331 320 - d_fnstart(3, dev, "(rpipe %p wa %p ep %p, urb %p)\n", 321 - rpipe, wa, ep, urb); 322 332 epcd = rpipe_epc_find(dev, ep); 323 333 if (epcd == NULL) { 324 334 dev_err(dev, "ep 0x%02x: can't find companion descriptor\n", ··· 336 350 /* FIXME: use maximum speed as supported or recommended by device */ 337 351 rpipe->descr.bSpeed = usb_pipeendpoint(urb->pipe) == 0 ? 338 352 UWB_PHY_RATE_53 : UWB_PHY_RATE_200; 339 - d_printf(2, dev, "addr %u (0x%02x) rpipe #%u ep# %u speed %d\n", 340 - urb->dev->devnum, urb->dev->devnum | unauth, 341 - le16_to_cpu(rpipe->descr.wRPipeIndex), 342 - usb_pipeendpoint(urb->pipe), rpipe->descr.bSpeed); 353 + 354 + dev_dbg(dev, "addr %u (0x%02x) rpipe #%u ep# %u speed %d\n", 355 + urb->dev->devnum, urb->dev->devnum | unauth, 356 + le16_to_cpu(rpipe->descr.wRPipeIndex), 357 + usb_pipeendpoint(urb->pipe), rpipe->descr.bSpeed); 358 + 343 359 /* see security.c:wusb_update_address() */ 344 360 if (unlikely(urb->dev->devnum == 0x80)) 345 361 rpipe->descr.bDeviceAddress = 0; ··· 372 384 } 373 385 result = 0; 374 386 error: 375 - d_fnend(3, dev, "(rpipe %p wa %p ep %p urb %p) = %d\n", 376 - rpipe, wa, ep, urb, result); 377 387 return result; 378 388 } 379 389 ··· 391 405 u8 unauth = (usb_dev->wusb && !usb_dev->authenticated) ? 0x80 : 0; 392 406 u8 portnum = wusb_port_no_to_idx(urb->dev->portnum); 393 407 394 - d_fnstart(3, dev, "(rpipe %p wa %p ep %p, urb %p)\n", 395 - rpipe, wa, ep, urb); 396 408 #define AIM_CHECK(rdf, val, text) \ 397 409 do { \ 398 410 if (rpipe->descr.rdf != (val)) { \ ··· 435 451 struct wa_rpipe *rpipe; 436 452 u8 eptype; 437 453 438 - d_fnstart(3, dev, "(wa %p ep %p urb %p gfp 0x%08x)\n", wa, ep, urb, 439 - gfp); 440 454 mutex_lock(&wa->rpipe_mutex); 441 455 rpipe = ep->hcpriv; 442 456 if (rpipe != NULL) { ··· 444 462 goto error; 445 463 } 446 464 __rpipe_get(rpipe); 447 - d_printf(2, dev, "ep 0x%02x: reusing rpipe %u\n", 448 - ep->desc.bEndpointAddress, 449 - le16_to_cpu(rpipe->descr.wRPipeIndex)); 465 + dev_dbg(dev, "ep 0x%02x: reusing rpipe %u\n", 466 + ep->desc.bEndpointAddress, 467 + le16_to_cpu(rpipe->descr.wRPipeIndex)); 450 468 } else { 451 469 /* hmm, assign idle rpipe, aim it */ 452 470 result = -ENOBUFS; ··· 462 480 ep->hcpriv = rpipe; 463 481 rpipe->ep = ep; 464 482 __rpipe_get(rpipe); /* for caching into ep->hcpriv */ 465 - d_printf(2, dev, "ep 0x%02x: using rpipe %u\n", 466 - ep->desc.bEndpointAddress, 467 - le16_to_cpu(rpipe->descr.wRPipeIndex)); 483 + dev_dbg(dev, "ep 0x%02x: using rpipe %u\n", 484 + ep->desc.bEndpointAddress, 485 + le16_to_cpu(rpipe->descr.wRPipeIndex)); 468 486 } 469 - d_dump(4, dev, &rpipe->descr, sizeof(rpipe->descr)); 470 487 error: 471 488 mutex_unlock(&wa->rpipe_mutex); 472 - d_fnend(3, dev, "(wa %p ep %p urb %p gfp 0x%08x)\n", wa, ep, urb, gfp); 473 489 return result; 474 490 } 475 491 ··· 487 507 void wa_rpipes_destroy(struct wahc *wa) 488 508 { 489 509 struct device *dev = &wa->usb_iface->dev; 490 - d_fnstart(3, dev, "(wa %p)\n", wa); 510 + 491 511 if (!bitmap_empty(wa->rpipe_bm, wa->rpipes)) { 492 512 char buf[256]; 493 513 WARN_ON(1); ··· 495 515 dev_err(dev, "BUG: pipes not released on exit: %s\n", buf); 496 516 } 497 517 kfree(wa->rpipe_bm); 498 - d_fnend(3, dev, "(wa %p)\n", wa); 499 518 } 500 519 501 520 /* ··· 509 530 */ 510 531 void rpipe_ep_disable(struct wahc *wa, struct usb_host_endpoint *ep) 511 532 { 512 - struct device *dev = &wa->usb_iface->dev; 513 533 struct wa_rpipe *rpipe; 514 - d_fnstart(2, dev, "(wa %p ep %p)\n", wa, ep); 534 + 515 535 mutex_lock(&wa->rpipe_mutex); 516 536 rpipe = ep->hcpriv; 517 537 if (rpipe != NULL) { 518 - unsigned rc = atomic_read(&rpipe->refcnt.refcount); 519 - int result; 520 538 u16 index = le16_to_cpu(rpipe->descr.wRPipeIndex); 521 539 522 - if (rc != 1) 523 - d_printf(1, dev, "(wa %p ep %p) rpipe %p refcnt %u\n", 524 - wa, ep, rpipe, rc); 525 - 526 - d_printf(1, dev, "rpipe %u: abort\n", index); 527 - result = usb_control_msg( 540 + usb_control_msg( 528 541 wa->usb_dev, usb_rcvctrlpipe(wa->usb_dev, 0), 529 542 USB_REQ_RPIPE_ABORT, 530 543 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_RPIPE, 531 544 0, index, NULL, 0, 1000 /* FIXME: arbitrary */); 532 - if (result < 0 && result != -ENODEV /* dev is gone */) 533 - d_printf(1, dev, "(wa %p rpipe %u): abort failed: %d\n", 534 - wa, index, result); 535 545 rpipe_put(rpipe); 536 546 } 537 547 mutex_unlock(&wa->rpipe_mutex); 538 - d_fnend(2, dev, "(wa %p ep %p)\n", wa, ep); 539 - return; 540 548 } 541 549 EXPORT_SYMBOL_GPL(rpipe_ep_disable);
+43 -137
drivers/usb/wusbcore/wa-xfer.c
··· 82 82 #include <linux/init.h> 83 83 #include <linux/spinlock.h> 84 84 #include <linux/hash.h> 85 + 85 86 #include "wa-hc.h" 86 87 #include "wusbhc.h" 87 - 88 - #undef D_LOCAL 89 - #define D_LOCAL 0 /* 0 disabled, > 0 different levels... */ 90 - #include <linux/uwb/debug.h> 91 88 92 89 enum { 93 90 WA_SEGS_MAX = 255, ··· 177 180 } 178 181 } 179 182 kfree(xfer); 180 - d_printf(2, NULL, "xfer %p destroyed\n", xfer); 181 183 } 182 184 183 185 static void wa_xfer_get(struct wa_xfer *xfer) ··· 186 190 187 191 static void wa_xfer_put(struct wa_xfer *xfer) 188 192 { 189 - d_fnstart(3, NULL, "(xfer %p) -- ref count bef put %d\n", 190 - xfer, atomic_read(&xfer->refcnt.refcount)); 191 193 kref_put(&xfer->refcnt, wa_xfer_destroy); 192 - d_fnend(3, NULL, "(xfer %p) = void\n", xfer); 193 194 } 194 195 195 196 /* ··· 202 209 static void wa_xfer_giveback(struct wa_xfer *xfer) 203 210 { 204 211 unsigned long flags; 205 - d_fnstart(3, NULL, "(xfer %p)\n", xfer); 212 + 206 213 spin_lock_irqsave(&xfer->wa->xfer_list_lock, flags); 207 214 list_del_init(&xfer->list_node); 208 215 spin_unlock_irqrestore(&xfer->wa->xfer_list_lock, flags); ··· 210 217 wusbhc_giveback_urb(xfer->wa->wusb, xfer->urb, xfer->result); 211 218 wa_put(xfer->wa); 212 219 wa_xfer_put(xfer); 213 - d_fnend(3, NULL, "(xfer %p) = void\n", xfer); 214 220 } 215 221 216 222 /* ··· 219 227 */ 220 228 static void wa_xfer_completion(struct wa_xfer *xfer) 221 229 { 222 - d_fnstart(3, NULL, "(xfer %p)\n", xfer); 223 230 if (xfer->wusb_dev) 224 231 wusb_dev_put(xfer->wusb_dev); 225 232 rpipe_put(xfer->ep->hcpriv); 226 233 wa_xfer_giveback(xfer); 227 - d_fnend(3, NULL, "(xfer %p) = void\n", xfer); 228 - return; 229 234 } 230 235 231 236 /* ··· 232 243 */ 233 244 static unsigned __wa_xfer_is_done(struct wa_xfer *xfer) 234 245 { 246 + struct device *dev = &xfer->wa->usb_iface->dev; 235 247 unsigned result, cnt; 236 248 struct wa_seg *seg; 237 249 struct urb *urb = xfer->urb; 238 250 unsigned found_short = 0; 239 251 240 - d_fnstart(3, NULL, "(xfer %p)\n", xfer); 241 252 result = xfer->segs_done == xfer->segs_submitted; 242 253 if (result == 0) 243 254 goto out; ··· 247 258 switch (seg->status) { 248 259 case WA_SEG_DONE: 249 260 if (found_short && seg->result > 0) { 250 - if (printk_ratelimit()) 251 - printk(KERN_ERR "xfer %p#%u: bad short " 252 - "segments (%zu)\n", xfer, cnt, 253 - seg->result); 261 + dev_dbg(dev, "xfer %p#%u: bad short segments (%zu)\n", 262 + xfer, cnt, seg->result); 254 263 urb->status = -EINVAL; 255 264 goto out; 256 265 } ··· 256 269 if (seg->result < xfer->seg_size 257 270 && cnt != xfer->segs-1) 258 271 found_short = 1; 259 - d_printf(2, NULL, "xfer %p#%u: DONE short %d " 260 - "result %zu urb->actual_length %d\n", 261 - xfer, seg->index, found_short, seg->result, 262 - urb->actual_length); 272 + dev_dbg(dev, "xfer %p#%u: DONE short %d " 273 + "result %zu urb->actual_length %d\n", 274 + xfer, seg->index, found_short, seg->result, 275 + urb->actual_length); 263 276 break; 264 277 case WA_SEG_ERROR: 265 278 xfer->result = seg->result; 266 - d_printf(2, NULL, "xfer %p#%u: ERROR result %zu\n", 267 - xfer, seg->index, seg->result); 279 + dev_dbg(dev, "xfer %p#%u: ERROR result %zu\n", 280 + xfer, seg->index, seg->result); 268 281 goto out; 269 282 case WA_SEG_ABORTED: 270 - WARN_ON(urb->status != -ECONNRESET 271 - && urb->status != -ENOENT); 272 - d_printf(2, NULL, "xfer %p#%u ABORTED: result %d\n", 273 - xfer, seg->index, urb->status); 283 + dev_dbg(dev, "xfer %p#%u ABORTED: result %d\n", 284 + xfer, seg->index, urb->status); 274 285 xfer->result = urb->status; 275 286 goto out; 276 287 default: 277 - /* if (printk_ratelimit()) */ 278 - printk(KERN_ERR "xfer %p#%u: " 279 - "is_done bad state %d\n", 280 - xfer, cnt, seg->status); 288 + dev_warn(dev, "xfer %p#%u: is_done bad state %d\n", 289 + xfer, cnt, seg->status); 281 290 xfer->result = -EINVAL; 282 - WARN_ON(1); 283 291 goto out; 284 292 } 285 293 } 286 294 xfer->result = 0; 287 295 out: 288 - d_fnend(3, NULL, "(xfer %p) = void\n", xfer); 289 296 return result; 290 297 } 291 298 ··· 405 424 struct urb *urb = xfer->urb; 406 425 struct wa_rpipe *rpipe = xfer->ep->hcpriv; 407 426 408 - d_fnstart(3, dev, "(xfer %p [rpipe %p] urb %p)\n", 409 - xfer, rpipe, urb); 410 427 switch (rpipe->descr.bmAttribute & 0x3) { 411 428 case USB_ENDPOINT_XFER_CONTROL: 412 429 *pxfer_type = WA_XFER_TYPE_CTL; ··· 451 472 if (xfer->segs == 0 && *pxfer_type == WA_XFER_TYPE_CTL) 452 473 xfer->segs = 1; 453 474 error: 454 - d_fnend(3, dev, "(xfer %p [rpipe %p] urb %p) = %d\n", 455 - xfer, rpipe, urb, (int)result); 456 475 return result; 457 476 } 458 477 459 - /** Fill in the common request header and xfer-type specific data. */ 478 + /* Fill in the common request header and xfer-type specific data. */ 460 479 static void __wa_xfer_setup_hdr0(struct wa_xfer *xfer, 461 480 struct wa_xfer_hdr *xfer_hdr0, 462 481 enum wa_xfer_type xfer_type, ··· 511 534 unsigned rpipe_ready = 0; 512 535 u8 done = 0; 513 536 514 - d_fnstart(3, NULL, "(urb %p [%d])\n", urb, urb->status); 515 537 switch (urb->status) { 516 538 case 0: 517 539 spin_lock_irqsave(&xfer->lock, flags); 518 540 wa = xfer->wa; 519 541 dev = &wa->usb_iface->dev; 520 - d_printf(2, dev, "xfer %p#%u: data out done (%d bytes)\n", 521 - xfer, seg->index, urb->actual_length); 542 + dev_dbg(dev, "xfer %p#%u: data out done (%d bytes)\n", 543 + xfer, seg->index, urb->actual_length); 522 544 if (seg->status < WA_SEG_PENDING) 523 545 seg->status = WA_SEG_PENDING; 524 546 seg->result = urb->actual_length; ··· 531 555 wa = xfer->wa; 532 556 dev = &wa->usb_iface->dev; 533 557 rpipe = xfer->ep->hcpriv; 534 - if (printk_ratelimit()) 535 - dev_err(dev, "xfer %p#%u: data out error %d\n", 536 - xfer, seg->index, urb->status); 558 + dev_dbg(dev, "xfer %p#%u: data out error %d\n", 559 + xfer, seg->index, urb->status); 537 560 if (edc_inc(&wa->nep_edc, EDC_MAX_ERRORS, 538 561 EDC_ERROR_TIMEFRAME)){ 539 562 dev_err(dev, "DTO: URB max acceptable errors " ··· 553 578 if (rpipe_ready) 554 579 wa_xfer_delayed_run(rpipe); 555 580 } 556 - d_fnend(3, NULL, "(urb %p [%d]) = void\n", urb, urb->status); 557 581 } 558 582 559 583 /* ··· 584 610 unsigned rpipe_ready; 585 611 u8 done = 0; 586 612 587 - d_fnstart(3, NULL, "(urb %p [%d])\n", urb, urb->status); 588 613 switch (urb->status) { 589 614 case 0: 590 615 spin_lock_irqsave(&xfer->lock, flags); 591 616 wa = xfer->wa; 592 617 dev = &wa->usb_iface->dev; 593 - d_printf(2, dev, "xfer %p#%u: request done\n", 594 - xfer, seg->index); 618 + dev_dbg(dev, "xfer %p#%u: request done\n", xfer, seg->index); 595 619 if (xfer->is_inbound && seg->status < WA_SEG_PENDING) 596 620 seg->status = WA_SEG_PENDING; 597 621 spin_unlock_irqrestore(&xfer->lock, flags); ··· 624 652 if (rpipe_ready) 625 653 wa_xfer_delayed_run(rpipe); 626 654 } 627 - d_fnend(3, NULL, "(urb %p [%d]) = void\n", urb, urb->status); 628 655 } 629 656 630 657 /* ··· 721 750 size_t xfer_hdr_size, cnt, transfer_size; 722 751 struct wa_xfer_hdr *xfer_hdr0, *xfer_hdr; 723 752 724 - d_fnstart(3, dev, "(xfer %p [rpipe %p] urb %p)\n", 725 - xfer, xfer->ep->hcpriv, urb); 726 - 727 753 result = __wa_xfer_setup_sizes(xfer, &xfer_type); 728 754 if (result < 0) 729 755 goto error_setup_sizes; ··· 756 788 result = 0; 757 789 error_setup_segs: 758 790 error_setup_sizes: 759 - d_fnend(3, dev, "(xfer %p [rpipe %p] urb %p) = %d\n", 760 - xfer, xfer->ep->hcpriv, urb, result); 761 791 return result; 762 792 } 763 793 ··· 809 843 struct wa_xfer *xfer; 810 844 unsigned long flags; 811 845 812 - d_fnstart(1, dev, "(rpipe #%d) %d segments available\n", 813 - le16_to_cpu(rpipe->descr.wRPipeIndex), 814 - atomic_read(&rpipe->segs_available)); 815 846 spin_lock_irqsave(&rpipe->seg_lock, flags); 816 847 while (atomic_read(&rpipe->segs_available) > 0 817 848 && !list_empty(&rpipe->seg_list)) { ··· 817 854 list_del(&seg->list_node); 818 855 xfer = seg->xfer; 819 856 result = __wa_seg_submit(rpipe, xfer, seg); 820 - d_printf(1, dev, "xfer %p#%u submitted from delayed " 821 - "[%d segments available] %d\n", 822 - xfer, seg->index, 823 - atomic_read(&rpipe->segs_available), result); 857 + dev_dbg(dev, "xfer %p#%u submitted from delayed [%d segments available] %d\n", 858 + xfer, seg->index, atomic_read(&rpipe->segs_available), result); 824 859 if (unlikely(result < 0)) { 825 860 spin_unlock_irqrestore(&rpipe->seg_lock, flags); 826 861 spin_lock_irqsave(&xfer->lock, flags); ··· 829 868 } 830 869 } 831 870 spin_unlock_irqrestore(&rpipe->seg_lock, flags); 832 - d_fnend(1, dev, "(rpipe #%d) = void, %d segments available\n", 833 - le16_to_cpu(rpipe->descr.wRPipeIndex), 834 - atomic_read(&rpipe->segs_available)); 835 - 836 871 } 837 872 838 873 /* ··· 851 894 u8 available; 852 895 u8 empty; 853 896 854 - d_fnstart(3, dev, "(xfer %p [rpipe %p])\n", 855 - xfer, xfer->ep->hcpriv); 856 - 857 897 spin_lock_irqsave(&wa->xfer_list_lock, flags); 858 898 list_add_tail(&xfer->list_node, &wa->xfer_list); 859 899 spin_unlock_irqrestore(&wa->xfer_list_lock, flags); ··· 862 908 available = atomic_read(&rpipe->segs_available); 863 909 empty = list_empty(&rpipe->seg_list); 864 910 seg = xfer->seg[cnt]; 865 - d_printf(2, dev, "xfer %p#%u: available %u empty %u (%s)\n", 866 - xfer, cnt, available, empty, 867 - available == 0 || !empty ? "delayed" : "submitted"); 911 + dev_dbg(dev, "xfer %p#%u: available %u empty %u (%s)\n", 912 + xfer, cnt, available, empty, 913 + available == 0 || !empty ? "delayed" : "submitted"); 868 914 if (available == 0 || !empty) { 869 - d_printf(1, dev, "xfer %p#%u: delayed\n", xfer, cnt); 915 + dev_dbg(dev, "xfer %p#%u: delayed\n", xfer, cnt); 870 916 seg->status = WA_SEG_DELAYED; 871 917 list_add_tail(&seg->list_node, &rpipe->seg_list); 872 918 } else { 873 919 result = __wa_seg_submit(rpipe, xfer, seg); 874 - if (result < 0) 920 + if (result < 0) { 921 + __wa_xfer_abort(xfer); 875 922 goto error_seg_submit; 923 + } 876 924 } 877 925 xfer->segs_submitted++; 878 926 } 879 - spin_unlock_irqrestore(&rpipe->seg_lock, flags); 880 - d_fnend(3, dev, "(xfer %p [rpipe %p]) = void\n", xfer, 881 - xfer->ep->hcpriv); 882 - return result; 883 - 884 927 error_seg_submit: 885 - __wa_xfer_abort(xfer); 886 928 spin_unlock_irqrestore(&rpipe->seg_lock, flags); 887 - d_fnend(3, dev, "(xfer %p [rpipe %p]) = void\n", xfer, 888 - xfer->ep->hcpriv); 889 929 return result; 890 930 } 891 931 ··· 912 964 struct urb *urb = xfer->urb; 913 965 struct wahc *wa = xfer->wa; 914 966 struct wusbhc *wusbhc = wa->wusb; 915 - struct device *dev = &wa->usb_iface->dev; 916 967 struct wusb_dev *wusb_dev; 917 968 unsigned done; 918 969 919 - d_fnstart(3, dev, "(wa %p urb %p)\n", wa, urb); 920 970 result = rpipe_get_by_ep(wa, xfer->ep, urb, xfer->gfp); 921 971 if (result < 0) 922 972 goto error_rpipe_get; ··· 943 997 if (result < 0) 944 998 goto error_xfer_submit; 945 999 spin_unlock_irqrestore(&xfer->lock, flags); 946 - d_fnend(3, dev, "(wa %p urb %p) = void\n", wa, urb); 947 1000 return; 948 1001 949 1002 /* this is basically wa_xfer_completion() broken up wa_xfer_giveback() ··· 960 1015 error_rpipe_get: 961 1016 xfer->result = result; 962 1017 wa_xfer_giveback(xfer); 963 - d_fnend(3, dev, "(wa %p urb %p) = (void) %d\n", wa, urb, result); 964 1018 return; 965 1019 966 1020 error_xfer_submit: ··· 968 1024 spin_unlock_irqrestore(&xfer->lock, flags); 969 1025 if (done) 970 1026 wa_xfer_completion(xfer); 971 - d_fnend(3, dev, "(wa %p urb %p) = (void) %d\n", wa, urb, result); 972 - return; 973 1027 } 974 1028 975 1029 /* ··· 983 1041 void wa_urb_enqueue_run(struct work_struct *ws) 984 1042 { 985 1043 struct wahc *wa = container_of(ws, struct wahc, xfer_work); 986 - struct device *dev = &wa->usb_iface->dev; 987 1044 struct wa_xfer *xfer, *next; 988 1045 struct urb *urb; 989 1046 990 - d_fnstart(3, dev, "(wa %p)\n", wa); 991 1047 spin_lock_irq(&wa->xfer_list_lock); 992 1048 list_for_each_entry_safe(xfer, next, &wa->xfer_delayed_list, 993 1049 list_node) { ··· 999 1059 spin_lock_irq(&wa->xfer_list_lock); 1000 1060 } 1001 1061 spin_unlock_irq(&wa->xfer_list_lock); 1002 - d_fnend(3, dev, "(wa %p) = void\n", wa); 1003 1062 } 1004 1063 EXPORT_SYMBOL_GPL(wa_urb_enqueue_run); 1005 1064 ··· 1023 1084 unsigned long my_flags; 1024 1085 unsigned cant_sleep = irqs_disabled() | in_atomic(); 1025 1086 1026 - d_fnstart(3, dev, "(wa %p ep %p urb %p [%d] gfp 0x%x)\n", 1027 - wa, ep, urb, urb->transfer_buffer_length, gfp); 1028 - 1029 1087 if (urb->transfer_buffer == NULL 1030 1088 && !(urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP) 1031 1089 && urb->transfer_buffer_length != 0) { ··· 1044 1108 xfer->gfp = gfp; 1045 1109 xfer->ep = ep; 1046 1110 urb->hcpriv = xfer; 1047 - d_printf(2, dev, "xfer %p urb %p pipe 0x%02x [%d bytes] %s %s %s\n", 1048 - xfer, urb, urb->pipe, urb->transfer_buffer_length, 1049 - urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? "dma" : "nodma", 1050 - urb->pipe & USB_DIR_IN ? "inbound" : "outbound", 1051 - cant_sleep ? "deferred" : "inline"); 1111 + 1112 + dev_dbg(dev, "xfer %p urb %p pipe 0x%02x [%d bytes] %s %s %s\n", 1113 + xfer, urb, urb->pipe, urb->transfer_buffer_length, 1114 + urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP ? "dma" : "nodma", 1115 + urb->pipe & USB_DIR_IN ? "inbound" : "outbound", 1116 + cant_sleep ? "deferred" : "inline"); 1117 + 1052 1118 if (cant_sleep) { 1053 1119 usb_get_urb(urb); 1054 1120 spin_lock_irqsave(&wa->xfer_list_lock, my_flags); ··· 1060 1122 } else { 1061 1123 wa_urb_enqueue_b(xfer); 1062 1124 } 1063 - d_fnend(3, dev, "(wa %p ep %p urb %p [%d] gfp 0x%x) = 0\n", 1064 - wa, ep, urb, urb->transfer_buffer_length, gfp); 1065 1125 return 0; 1066 1126 1067 1127 error_dequeued: 1068 1128 kfree(xfer); 1069 1129 error_kmalloc: 1070 - d_fnend(3, dev, "(wa %p ep %p urb %p [%d] gfp 0x%x) = %d\n", 1071 - wa, ep, urb, urb->transfer_buffer_length, gfp, result); 1072 1130 return result; 1073 1131 } 1074 1132 EXPORT_SYMBOL_GPL(wa_urb_enqueue); ··· 1089 1155 */ 1090 1156 int wa_urb_dequeue(struct wahc *wa, struct urb *urb) 1091 1157 { 1092 - struct device *dev = &wa->usb_iface->dev; 1093 1158 unsigned long flags, flags2; 1094 1159 struct wa_xfer *xfer; 1095 1160 struct wa_seg *seg; ··· 1096 1163 unsigned cnt; 1097 1164 unsigned rpipe_ready = 0; 1098 1165 1099 - d_fnstart(3, dev, "(wa %p, urb %p)\n", wa, urb); 1100 - 1101 - d_printf(1, dev, "xfer %p urb %p: aborting\n", urb->hcpriv, urb); 1102 1166 xfer = urb->hcpriv; 1103 1167 if (xfer == NULL) { 1104 1168 /* NOthing setup yet enqueue will see urb->status != ··· 1164 1234 wa_xfer_completion(xfer); 1165 1235 if (rpipe_ready) 1166 1236 wa_xfer_delayed_run(rpipe); 1167 - d_fnend(3, dev, "(wa %p, urb %p) = 0\n", wa, urb); 1168 1237 return 0; 1169 1238 1170 1239 out_unlock: 1171 1240 spin_unlock_irqrestore(&xfer->lock, flags); 1172 1241 out: 1173 - d_fnend(3, dev, "(wa %p, urb %p) = 0\n", wa, urb); 1174 1242 return 0; 1175 1243 1176 1244 dequeue_delayed: ··· 1178 1250 spin_unlock_irqrestore(&xfer->lock, flags); 1179 1251 wa_xfer_giveback(xfer); 1180 1252 usb_put_urb(urb); /* we got a ref in enqueue() */ 1181 - d_fnend(3, dev, "(wa %p, urb %p) = 0\n", wa, urb); 1182 1253 return 0; 1183 1254 } 1184 1255 EXPORT_SYMBOL_GPL(wa_urb_dequeue); ··· 1253 1326 u8 usb_status; 1254 1327 unsigned rpipe_ready = 0; 1255 1328 1256 - d_fnstart(3, dev, "(wa %p xfer %p)\n", wa, xfer); 1257 1329 spin_lock_irqsave(&xfer->lock, flags); 1258 1330 seg_idx = xfer_result->bTransferSegment & 0x7f; 1259 1331 if (unlikely(seg_idx >= xfer->segs)) ··· 1260 1334 seg = xfer->seg[seg_idx]; 1261 1335 rpipe = xfer->ep->hcpriv; 1262 1336 usb_status = xfer_result->bTransferStatus; 1263 - d_printf(2, dev, "xfer %p#%u: bTransferStatus 0x%02x (seg %u)\n", 1264 - xfer, seg_idx, usb_status, seg->status); 1337 + dev_dbg(dev, "xfer %p#%u: bTransferStatus 0x%02x (seg %u)\n", 1338 + xfer, seg_idx, usb_status, seg->status); 1265 1339 if (seg->status == WA_SEG_ABORTED 1266 1340 || seg->status == WA_SEG_ERROR) /* already handled */ 1267 1341 goto segment_aborted; ··· 1317 1391 wa_xfer_completion(xfer); 1318 1392 if (rpipe_ready) 1319 1393 wa_xfer_delayed_run(rpipe); 1320 - d_fnend(3, dev, "(wa %p xfer %p) = void\n", wa, xfer); 1321 1394 return; 1322 - 1323 1395 1324 1396 error_submit_buf_in: 1325 1397 if (edc_inc(&wa->dti_edc, EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) { ··· 1340 1416 wa_xfer_completion(xfer); 1341 1417 if (rpipe_ready) 1342 1418 wa_xfer_delayed_run(rpipe); 1343 - d_fnend(3, dev, "(wa %p xfer %p) = void [segment/DTI-submit error]\n", 1344 - wa, xfer); 1345 1419 return; 1346 - 1347 1420 1348 1421 error_bad_seg: 1349 1422 spin_unlock_irqrestore(&xfer->lock, flags); ··· 1352 1431 "exceeded, resetting device\n"); 1353 1432 wa_reset_all(wa); 1354 1433 } 1355 - d_fnend(3, dev, "(wa %p xfer %p) = void [bad seg]\n", wa, xfer); 1356 1434 return; 1357 - 1358 1435 1359 1436 segment_aborted: 1360 1437 /* nothing to do, as the aborter did the completion */ 1361 1438 spin_unlock_irqrestore(&xfer->lock, flags); 1362 - d_fnend(3, dev, "(wa %p xfer %p) = void [segment aborted]\n", 1363 - wa, xfer); 1364 - return; 1365 - 1366 1439 } 1367 1440 1368 1441 /* ··· 1380 1465 unsigned long flags; 1381 1466 u8 done = 0; 1382 1467 1383 - d_fnstart(3, NULL, "(urb %p [%d])\n", urb, urb->status); 1384 1468 switch (urb->status) { 1385 1469 case 0: 1386 1470 spin_lock_irqsave(&xfer->lock, flags); 1387 1471 wa = xfer->wa; 1388 1472 dev = &wa->usb_iface->dev; 1389 1473 rpipe = xfer->ep->hcpriv; 1390 - d_printf(2, dev, "xfer %p#%u: data in done (%zu bytes)\n", 1391 - xfer, seg->index, (size_t)urb->actual_length); 1474 + dev_dbg(dev, "xfer %p#%u: data in done (%zu bytes)\n", 1475 + xfer, seg->index, (size_t)urb->actual_length); 1392 1476 seg->status = WA_SEG_DONE; 1393 1477 seg->result = urb->actual_length; 1394 1478 xfer->segs_done++; ··· 1428 1514 if (rpipe_ready) 1429 1515 wa_xfer_delayed_run(rpipe); 1430 1516 } 1431 - d_fnend(3, NULL, "(urb %p [%d]) = void\n", urb, urb->status); 1432 1517 } 1433 1518 1434 1519 /* ··· 1466 1553 struct wa_xfer *xfer; 1467 1554 u8 usb_status; 1468 1555 1469 - d_fnstart(3, dev, "(%p)\n", wa); 1470 1556 BUG_ON(wa->dti_urb != urb); 1471 1557 switch (wa->dti_urb->status) { 1472 1558 case 0: 1473 1559 /* We have a xfer result buffer; check it */ 1474 - d_printf(2, dev, "DTI: xfer result %d bytes at %p\n", 1475 - urb->actual_length, urb->transfer_buffer); 1476 - d_dump(3, dev, urb->transfer_buffer, urb->actual_length); 1560 + dev_dbg(dev, "DTI: xfer result %d bytes at %p\n", 1561 + urb->actual_length, urb->transfer_buffer); 1477 1562 if (wa->dti_urb->actual_length != sizeof(*xfer_result)) { 1478 1563 dev_err(dev, "DTI Error: xfer result--bad size " 1479 1564 "xfer result (%d bytes vs %zu needed)\n", ··· 1533 1622 wa_reset_all(wa); 1534 1623 } 1535 1624 out: 1536 - d_fnend(3, dev, "(%p) = void\n", wa); 1537 1625 return; 1538 1626 } 1539 1627 ··· 1563 1653 struct wa_notif_xfer *notif_xfer; 1564 1654 const struct usb_endpoint_descriptor *dti_epd = wa->dti_epd; 1565 1655 1566 - d_fnstart(4, dev, "(%p, %p)\n", wa, notif_hdr); 1567 1656 notif_xfer = container_of(notif_hdr, struct wa_notif_xfer, hdr); 1568 1657 BUG_ON(notif_hdr->bNotifyType != WA_NOTIF_TRANSFER); 1569 1658 ··· 1602 1693 goto error_dti_urb_submit; 1603 1694 } 1604 1695 out: 1605 - d_fnend(4, dev, "(%p, %p) = void\n", wa, notif_hdr); 1606 1696 return; 1607 1697 1608 1698 error_dti_urb_submit: ··· 1612 1704 error_dti_urb_alloc: 1613 1705 error: 1614 1706 wa_reset_all(wa); 1615 - d_fnend(4, dev, "(%p, %p) = void\n", wa, notif_hdr); 1616 - return; 1617 1707 }
+19 -17
drivers/usb/wusbcore/wusbhc.h
··· 64 64 #include <linux/uwb.h> 65 65 #include <linux/usb/wusb.h> 66 66 67 + /* 68 + * Time from a WUSB channel stop request to the last transmitted MMC. 69 + * 70 + * This needs to be > 4.096 ms in case no MMCs can be transmitted in 71 + * zone 0. 72 + */ 73 + #define WUSB_CHANNEL_STOP_DELAY_MS 8 67 74 68 75 /** 69 76 * Wireless USB device ··· 154 147 u16 status; 155 148 u16 change; 156 149 struct wusb_dev *wusb_dev; /* connected device's info */ 157 - unsigned reset_count; 158 150 u32 ptk_tkid; 159 151 }; 160 152 ··· 204 198 * @mmcies_max Max number of Information Elements this HC can send 205 199 * in its MMC. Read-only. 206 200 * 201 + * @start Start the WUSB channel. 202 + * 203 + * @stop Stop the WUSB channel after the specified number of 204 + * milliseconds. Channel Stop IEs should be transmitted 205 + * as required by [WUSB] 4.16.2.1. 206 + * 207 207 * @mmcie_add HC specific operation (WHCI or HWA) for adding an 208 208 * MMCIE. 209 209 * 210 210 * @mmcie_rm HC specific operation (WHCI or HWA) for removing an 211 211 * MMCIE. 212 - * 213 - * @enc_types Array which describes the encryptions methods 214 - * supported by the host as described in WUSB1.0 -- 215 - * one entry per supported method. As of WUSB1.0 there 216 - * is only four methods, we make space for eight just in 217 - * case they decide to add some more (and pray they do 218 - * it in sequential order). if 'enc_types[enc_method] 219 - * != 0', then it is supported by the host. enc_method 220 - * is USB_ENC_TYPE*. 221 212 * 222 213 * @set_ptk: Set the PTK and enable encryption for a device. Or, if 223 214 * the supplied key is NULL, disable encryption for that ··· 252 249 struct uwb_pal pal; 253 250 254 251 unsigned trust_timeout; /* in jiffies */ 255 - struct wuie_host_info *wuie_host_info; /* Includes CHID */ 252 + struct wusb_ckhdid chid; 253 + struct wuie_host_info *wuie_host_info; 256 254 257 255 struct mutex mutex; /* locks everything else */ 258 256 u16 cluster_id; /* Wireless USB Cluster ID */ ··· 273 269 u8 mmcies_max; 274 270 /* FIXME: make wusbhc_ops? */ 275 271 int (*start)(struct wusbhc *wusbhc); 276 - void (*stop)(struct wusbhc *wusbhc); 272 + void (*stop)(struct wusbhc *wusbhc, int delay); 277 273 int (*mmcie_add)(struct wusbhc *wusbhc, u8 interval, u8 repeat_cnt, 278 274 u8 handle, struct wuie_hdr *wuie); 279 275 int (*mmcie_rm)(struct wusbhc *wusbhc, u8 handle); ··· 377 373 usb_put_hcd(&wusbhc->usb_hcd); 378 374 } 379 375 380 - int wusbhc_start(struct wusbhc *wusbhc, const struct wusb_ckhdid *chid); 376 + int wusbhc_start(struct wusbhc *wusbhc); 381 377 void wusbhc_stop(struct wusbhc *wusbhc); 382 378 extern int wusbhc_chid_set(struct wusbhc *, const struct wusb_ckhdid *); 383 379 384 380 /* Device connect handling */ 385 381 extern int wusbhc_devconnect_create(struct wusbhc *); 386 382 extern void wusbhc_devconnect_destroy(struct wusbhc *); 387 - extern int wusbhc_devconnect_start(struct wusbhc *wusbhc, 388 - const struct wusb_ckhdid *chid); 383 + extern int wusbhc_devconnect_start(struct wusbhc *wusbhc); 389 384 extern void wusbhc_devconnect_stop(struct wusbhc *wusbhc); 390 - extern int wusbhc_devconnect_auth(struct wusbhc *, u8); 391 385 extern void wusbhc_handle_dn(struct wusbhc *, u8 srcaddr, 392 386 struct wusb_dn_hdr *dn_hdr, size_t size); 393 - extern int wusbhc_dev_reset(struct wusbhc *wusbhc, u8 port); 394 387 extern void __wusbhc_dev_disable(struct wusbhc *wusbhc, u8 port); 395 388 extern int wusb_usb_ncb(struct notifier_block *nb, unsigned long val, 396 389 void *priv); ··· 433 432 extern int wusb_dev_4way_handshake(struct wusbhc *, struct wusb_dev *, 434 433 struct wusb_ckhdid *ck); 435 434 void wusbhc_gtk_rekey(struct wusbhc *wusbhc); 435 + int wusb_dev_update_address(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev); 436 436 437 437 438 438 /* WUSB Cluster ID handling */
+3
drivers/uwb/Makefile
··· 6 6 7 7 uwb-objs := \ 8 8 address.o \ 9 + allocator.o \ 9 10 beacon.o \ 10 11 driver.o \ 11 12 drp.o \ ··· 14 13 drp-ie.o \ 15 14 est.o \ 16 15 ie.o \ 16 + ie-rcv.o \ 17 17 lc-dev.o \ 18 18 lc-rc.o \ 19 19 neh.o \ 20 20 pal.o \ 21 + radio.o \ 21 22 reset.o \ 22 23 rsv.o \ 23 24 scan.o \
+1 -1
drivers/uwb/address.c
··· 28 28 #include <linux/device.h> 29 29 #include <linux/random.h> 30 30 #include <linux/etherdevice.h> 31 - #include <linux/uwb/debug.h> 31 + 32 32 #include "uwb-internal.h" 33 33 34 34
+386
drivers/uwb/allocator.c
··· 1 + /* 2 + * UWB reservation management. 3 + * 4 + * Copyright (C) 2008 Cambridge Silicon Radio Ltd. 5 + * 6 + * This program is free software; you can redistribute it and/or 7 + * modify it under the terms of the GNU General Public License version 8 + * 2 as published by the Free Software Foundation. 9 + * 10 + * This program is distributed in the hope that it will be useful, 11 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 + * GNU General Public License for more details. 14 + * 15 + * You should have received a copy of the GNU General Public License 16 + * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 + */ 18 + #include <linux/version.h> 19 + #include <linux/kernel.h> 20 + #include <linux/uwb.h> 21 + 22 + #include "uwb-internal.h" 23 + 24 + static void uwb_rsv_fill_column_alloc(struct uwb_rsv_alloc_info *ai) 25 + { 26 + int col, mas, safe_mas, unsafe_mas; 27 + unsigned char *bm = ai->bm; 28 + struct uwb_rsv_col_info *ci = ai->ci; 29 + unsigned char c; 30 + 31 + for (col = ci->csi.start_col; col < UWB_NUM_ZONES; col += ci->csi.interval) { 32 + 33 + safe_mas = ci->csi.safe_mas_per_col; 34 + unsafe_mas = ci->csi.unsafe_mas_per_col; 35 + 36 + for (mas = 0; mas < UWB_MAS_PER_ZONE; mas++ ) { 37 + if (bm[col * UWB_MAS_PER_ZONE + mas] == 0) { 38 + 39 + if (safe_mas > 0) { 40 + safe_mas--; 41 + c = UWB_RSV_MAS_SAFE; 42 + } else if (unsafe_mas > 0) { 43 + unsafe_mas--; 44 + c = UWB_RSV_MAS_UNSAFE; 45 + } else { 46 + break; 47 + } 48 + bm[col * UWB_MAS_PER_ZONE + mas] = c; 49 + } 50 + } 51 + } 52 + } 53 + 54 + static void uwb_rsv_fill_row_alloc(struct uwb_rsv_alloc_info *ai) 55 + { 56 + int mas, col, rows; 57 + unsigned char *bm = ai->bm; 58 + struct uwb_rsv_row_info *ri = &ai->ri; 59 + unsigned char c; 60 + 61 + rows = 1; 62 + c = UWB_RSV_MAS_SAFE; 63 + for (mas = UWB_MAS_PER_ZONE - 1; mas >= 0; mas--) { 64 + if (ri->avail[mas] == 1) { 65 + 66 + if (rows > ri->used_rows) { 67 + break; 68 + } else if (rows > 7) { 69 + c = UWB_RSV_MAS_UNSAFE; 70 + } 71 + 72 + for (col = 0; col < UWB_NUM_ZONES; col++) { 73 + if (bm[col * UWB_NUM_ZONES + mas] != UWB_RSV_MAS_NOT_AVAIL) { 74 + bm[col * UWB_NUM_ZONES + mas] = c; 75 + if(c == UWB_RSV_MAS_SAFE) 76 + ai->safe_allocated_mases++; 77 + else 78 + ai->unsafe_allocated_mases++; 79 + } 80 + } 81 + rows++; 82 + } 83 + } 84 + ai->total_allocated_mases = ai->safe_allocated_mases + ai->unsafe_allocated_mases; 85 + } 86 + 87 + /* 88 + * Find the best column set for a given availability, interval, num safe mas and 89 + * num unsafe mas. 90 + * 91 + * The different sets are tried in order as shown below, depending on the interval. 92 + * 93 + * interval = 16 94 + * deep = 0 95 + * set 1 -> { 8 } 96 + * deep = 1 97 + * set 1 -> { 4 } 98 + * set 2 -> { 12 } 99 + * deep = 2 100 + * set 1 -> { 2 } 101 + * set 2 -> { 6 } 102 + * set 3 -> { 10 } 103 + * set 4 -> { 14 } 104 + * deep = 3 105 + * set 1 -> { 1 } 106 + * set 2 -> { 3 } 107 + * set 3 -> { 5 } 108 + * set 4 -> { 7 } 109 + * set 5 -> { 9 } 110 + * set 6 -> { 11 } 111 + * set 7 -> { 13 } 112 + * set 8 -> { 15 } 113 + * 114 + * interval = 8 115 + * deep = 0 116 + * set 1 -> { 4 12 } 117 + * deep = 1 118 + * set 1 -> { 2 10 } 119 + * set 2 -> { 6 14 } 120 + * deep = 2 121 + * set 1 -> { 1 9 } 122 + * set 2 -> { 3 11 } 123 + * set 3 -> { 5 13 } 124 + * set 4 -> { 7 15 } 125 + * 126 + * interval = 4 127 + * deep = 0 128 + * set 1 -> { 2 6 10 14 } 129 + * deep = 1 130 + * set 1 -> { 1 5 9 13 } 131 + * set 2 -> { 3 7 11 15 } 132 + * 133 + * interval = 2 134 + * deep = 0 135 + * set 1 -> { 1 3 5 7 9 11 13 15 } 136 + */ 137 + static int uwb_rsv_find_best_column_set(struct uwb_rsv_alloc_info *ai, int interval, 138 + int num_safe_mas, int num_unsafe_mas) 139 + { 140 + struct uwb_rsv_col_info *ci = ai->ci; 141 + struct uwb_rsv_col_set_info *csi = &ci->csi; 142 + struct uwb_rsv_col_set_info tmp_csi; 143 + int deep, set, col, start_col_deep, col_start_set; 144 + int start_col, max_mas_in_set, lowest_max_mas_in_deep; 145 + int n_mas; 146 + int found = UWB_RSV_ALLOC_NOT_FOUND; 147 + 148 + tmp_csi.start_col = 0; 149 + start_col_deep = interval; 150 + n_mas = num_unsafe_mas + num_safe_mas; 151 + 152 + for (deep = 0; ((interval >> deep) & 0x1) == 0; deep++) { 153 + start_col_deep /= 2; 154 + col_start_set = 0; 155 + lowest_max_mas_in_deep = UWB_MAS_PER_ZONE; 156 + 157 + for (set = 1; set <= (1 << deep); set++) { 158 + max_mas_in_set = 0; 159 + start_col = start_col_deep + col_start_set; 160 + for (col = start_col; col < UWB_NUM_ZONES; col += interval) { 161 + 162 + if (ci[col].max_avail_safe >= num_safe_mas && 163 + ci[col].max_avail_unsafe >= n_mas) { 164 + if (ci[col].highest_mas[n_mas] > max_mas_in_set) 165 + max_mas_in_set = ci[col].highest_mas[n_mas]; 166 + } else { 167 + max_mas_in_set = 0; 168 + break; 169 + } 170 + } 171 + if ((lowest_max_mas_in_deep > max_mas_in_set) && max_mas_in_set) { 172 + lowest_max_mas_in_deep = max_mas_in_set; 173 + 174 + tmp_csi.start_col = start_col; 175 + } 176 + col_start_set += (interval >> deep); 177 + } 178 + 179 + if (lowest_max_mas_in_deep < 8) { 180 + csi->start_col = tmp_csi.start_col; 181 + found = UWB_RSV_ALLOC_FOUND; 182 + break; 183 + } else if ((lowest_max_mas_in_deep > 8) && 184 + (lowest_max_mas_in_deep != UWB_MAS_PER_ZONE) && 185 + (found == UWB_RSV_ALLOC_NOT_FOUND)) { 186 + csi->start_col = tmp_csi.start_col; 187 + found = UWB_RSV_ALLOC_FOUND; 188 + } 189 + } 190 + 191 + if (found == UWB_RSV_ALLOC_FOUND) { 192 + csi->interval = interval; 193 + csi->safe_mas_per_col = num_safe_mas; 194 + csi->unsafe_mas_per_col = num_unsafe_mas; 195 + 196 + ai->safe_allocated_mases = (UWB_NUM_ZONES / interval) * num_safe_mas; 197 + ai->unsafe_allocated_mases = (UWB_NUM_ZONES / interval) * num_unsafe_mas; 198 + ai->total_allocated_mases = ai->safe_allocated_mases + ai->unsafe_allocated_mases; 199 + ai->interval = interval; 200 + } 201 + return found; 202 + } 203 + 204 + static void get_row_descriptors(struct uwb_rsv_alloc_info *ai) 205 + { 206 + unsigned char *bm = ai->bm; 207 + struct uwb_rsv_row_info *ri = &ai->ri; 208 + int col, mas; 209 + 210 + ri->free_rows = 16; 211 + for (mas = 0; mas < UWB_MAS_PER_ZONE; mas ++) { 212 + ri->avail[mas] = 1; 213 + for (col = 1; col < UWB_NUM_ZONES; col++) { 214 + if (bm[col * UWB_NUM_ZONES + mas] == UWB_RSV_MAS_NOT_AVAIL) { 215 + ri->free_rows--; 216 + ri->avail[mas]=0; 217 + break; 218 + } 219 + } 220 + } 221 + } 222 + 223 + static void uwb_rsv_fill_column_info(unsigned char *bm, int column, struct uwb_rsv_col_info *rci) 224 + { 225 + int mas; 226 + int block_count = 0, start_block = 0; 227 + int previous_avail = 0; 228 + int available = 0; 229 + int safe_mas_in_row[UWB_MAS_PER_ZONE] = { 230 + 8, 7, 6, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 2, 1, 231 + }; 232 + 233 + rci->max_avail_safe = 0; 234 + 235 + for (mas = 0; mas < UWB_MAS_PER_ZONE; mas ++) { 236 + if (!bm[column * UWB_NUM_ZONES + mas]) { 237 + available++; 238 + rci->max_avail_unsafe = available; 239 + 240 + rci->highest_mas[available] = mas; 241 + 242 + if (previous_avail) { 243 + block_count++; 244 + if ((block_count > safe_mas_in_row[start_block]) && 245 + (!rci->max_avail_safe)) 246 + rci->max_avail_safe = available - 1; 247 + } else { 248 + previous_avail = 1; 249 + start_block = mas; 250 + block_count = 1; 251 + } 252 + } else { 253 + previous_avail = 0; 254 + } 255 + } 256 + if (!rci->max_avail_safe) 257 + rci->max_avail_safe = rci->max_avail_unsafe; 258 + } 259 + 260 + static void get_column_descriptors(struct uwb_rsv_alloc_info *ai) 261 + { 262 + unsigned char *bm = ai->bm; 263 + struct uwb_rsv_col_info *ci = ai->ci; 264 + int col; 265 + 266 + for (col = 1; col < UWB_NUM_ZONES; col++) { 267 + uwb_rsv_fill_column_info(bm, col, &ci[col]); 268 + } 269 + } 270 + 271 + static int uwb_rsv_find_best_row_alloc(struct uwb_rsv_alloc_info *ai) 272 + { 273 + int n_rows; 274 + int max_rows = ai->max_mas / UWB_USABLE_MAS_PER_ROW; 275 + int min_rows = ai->min_mas / UWB_USABLE_MAS_PER_ROW; 276 + if (ai->min_mas % UWB_USABLE_MAS_PER_ROW) 277 + min_rows++; 278 + for (n_rows = max_rows; n_rows >= min_rows; n_rows--) { 279 + if (n_rows <= ai->ri.free_rows) { 280 + ai->ri.used_rows = n_rows; 281 + ai->interval = 1; /* row reservation */ 282 + uwb_rsv_fill_row_alloc(ai); 283 + return UWB_RSV_ALLOC_FOUND; 284 + } 285 + } 286 + return UWB_RSV_ALLOC_NOT_FOUND; 287 + } 288 + 289 + static int uwb_rsv_find_best_col_alloc(struct uwb_rsv_alloc_info *ai, int interval) 290 + { 291 + int n_safe, n_unsafe, n_mas; 292 + int n_column = UWB_NUM_ZONES / interval; 293 + int max_per_zone = ai->max_mas / n_column; 294 + int min_per_zone = ai->min_mas / n_column; 295 + 296 + if (ai->min_mas % n_column) 297 + min_per_zone++; 298 + 299 + if (min_per_zone > UWB_MAS_PER_ZONE) { 300 + return UWB_RSV_ALLOC_NOT_FOUND; 301 + } 302 + 303 + if (max_per_zone > UWB_MAS_PER_ZONE) { 304 + max_per_zone = UWB_MAS_PER_ZONE; 305 + } 306 + 307 + for (n_mas = max_per_zone; n_mas >= min_per_zone; n_mas--) { 308 + if (uwb_rsv_find_best_column_set(ai, interval, 0, n_mas) == UWB_RSV_ALLOC_NOT_FOUND) 309 + continue; 310 + for (n_safe = n_mas; n_safe >= 0; n_safe--) { 311 + n_unsafe = n_mas - n_safe; 312 + if (uwb_rsv_find_best_column_set(ai, interval, n_safe, n_unsafe) == UWB_RSV_ALLOC_FOUND) { 313 + uwb_rsv_fill_column_alloc(ai); 314 + return UWB_RSV_ALLOC_FOUND; 315 + } 316 + } 317 + } 318 + return UWB_RSV_ALLOC_NOT_FOUND; 319 + } 320 + 321 + int uwb_rsv_find_best_allocation(struct uwb_rsv *rsv, struct uwb_mas_bm *available, 322 + struct uwb_mas_bm *result) 323 + { 324 + struct uwb_rsv_alloc_info *ai; 325 + int interval; 326 + int bit_index; 327 + 328 + ai = kzalloc(sizeof(struct uwb_rsv_alloc_info), GFP_KERNEL); 329 + 330 + ai->min_mas = rsv->min_mas; 331 + ai->max_mas = rsv->max_mas; 332 + ai->max_interval = rsv->max_interval; 333 + 334 + 335 + /* fill the not available vector from the available bm */ 336 + for (bit_index = 0; bit_index < UWB_NUM_MAS; bit_index++) { 337 + if (!test_bit(bit_index, available->bm)) 338 + ai->bm[bit_index] = UWB_RSV_MAS_NOT_AVAIL; 339 + } 340 + 341 + if (ai->max_interval == 1) { 342 + get_row_descriptors(ai); 343 + if (uwb_rsv_find_best_row_alloc(ai) == UWB_RSV_ALLOC_FOUND) 344 + goto alloc_found; 345 + else 346 + goto alloc_not_found; 347 + } 348 + 349 + get_column_descriptors(ai); 350 + 351 + for (interval = 16; interval >= 2; interval>>=1) { 352 + if (interval > ai->max_interval) 353 + continue; 354 + if (uwb_rsv_find_best_col_alloc(ai, interval) == UWB_RSV_ALLOC_FOUND) 355 + goto alloc_found; 356 + } 357 + 358 + /* try row reservation if no column is found */ 359 + get_row_descriptors(ai); 360 + if (uwb_rsv_find_best_row_alloc(ai) == UWB_RSV_ALLOC_FOUND) 361 + goto alloc_found; 362 + else 363 + goto alloc_not_found; 364 + 365 + alloc_found: 366 + bitmap_zero(result->bm, UWB_NUM_MAS); 367 + bitmap_zero(result->unsafe_bm, UWB_NUM_MAS); 368 + /* fill the safe and unsafe bitmaps */ 369 + for (bit_index = 0; bit_index < UWB_NUM_MAS; bit_index++) { 370 + if (ai->bm[bit_index] == UWB_RSV_MAS_SAFE) 371 + set_bit(bit_index, result->bm); 372 + else if (ai->bm[bit_index] == UWB_RSV_MAS_UNSAFE) 373 + set_bit(bit_index, result->unsafe_bm); 374 + } 375 + bitmap_or(result->bm, result->bm, result->unsafe_bm, UWB_NUM_MAS); 376 + 377 + result->safe = ai->safe_allocated_mases; 378 + result->unsafe = ai->unsafe_allocated_mases; 379 + 380 + kfree(ai); 381 + return UWB_RSV_ALLOC_FOUND; 382 + 383 + alloc_not_found: 384 + kfree(ai); 385 + return UWB_RSV_ALLOC_NOT_FOUND; 386 + }
+50 -84
drivers/uwb/beacon.c
··· 22 22 * 23 23 * FIXME: docs 24 24 */ 25 - 26 25 #include <linux/kernel.h> 27 26 #include <linux/init.h> 28 27 #include <linux/module.h> 29 28 #include <linux/device.h> 30 29 #include <linux/err.h> 31 30 #include <linux/kdev_t.h> 31 + 32 32 #include "uwb-internal.h" 33 33 34 - #define D_LOCAL 0 35 - #include <linux/uwb/debug.h> 36 - 37 - /** Start Beaconing command structure */ 34 + /* Start Beaconing command structure */ 38 35 struct uwb_rc_cmd_start_beacon { 39 36 struct uwb_rccb rccb; 40 37 __le16 wBPSTOffset; ··· 116 119 int result; 117 120 struct device *dev = &rc->uwb_dev.dev; 118 121 119 - mutex_lock(&rc->uwb_dev.mutex); 120 122 if (channel < 0) 121 123 channel = -1; 122 124 if (channel == -1) ··· 124 128 /* channel >= 0...dah */ 125 129 result = uwb_rc_start_beacon(rc, bpst_offset, channel); 126 130 if (result < 0) 127 - goto out_up; 131 + return result; 128 132 if (le16_to_cpu(rc->ies->wIELength) > 0) { 129 133 result = uwb_rc_set_ie(rc, rc->ies); 130 134 if (result < 0) { ··· 133 137 result = uwb_rc_stop_beacon(rc); 134 138 channel = -1; 135 139 bpst_offset = 0; 136 - } else 137 - result = 0; 140 + } 138 141 } 139 142 } 140 143 141 - if (result < 0) 142 - goto out_up; 143 - rc->beaconing = channel; 144 - 145 - uwb_notify(rc, NULL, uwb_bg_joined(rc) ? UWB_NOTIF_BG_JOIN : UWB_NOTIF_BG_LEAVE); 146 - 147 - out_up: 148 - mutex_unlock(&rc->uwb_dev.mutex); 144 + if (result >= 0) 145 + rc->beaconing = channel; 149 146 return result; 150 147 } 151 148 ··· 157 168 * FIXME: use something faster for search than a list 158 169 */ 159 170 160 - struct uwb_beca uwb_beca = { 161 - .list = LIST_HEAD_INIT(uwb_beca.list), 162 - .mutex = __MUTEX_INITIALIZER(uwb_beca.mutex) 163 - }; 164 - 165 - 166 171 void uwb_bce_kfree(struct kref *_bce) 167 172 { 168 173 struct uwb_beca_e *bce = container_of(_bce, struct uwb_beca_e, refcnt); ··· 168 185 169 186 /* Find a beacon by dev addr in the cache */ 170 187 static 171 - struct uwb_beca_e *__uwb_beca_find_bydev(const struct uwb_dev_addr *dev_addr) 188 + struct uwb_beca_e *__uwb_beca_find_bydev(struct uwb_rc *rc, 189 + const struct uwb_dev_addr *dev_addr) 172 190 { 173 191 struct uwb_beca_e *bce, *next; 174 - list_for_each_entry_safe(bce, next, &uwb_beca.list, node) { 175 - d_printf(6, NULL, "looking for addr %02x:%02x in %02x:%02x\n", 176 - dev_addr->data[0], dev_addr->data[1], 177 - bce->dev_addr.data[0], bce->dev_addr.data[1]); 192 + list_for_each_entry_safe(bce, next, &rc->uwb_beca.list, node) { 178 193 if (!memcmp(&bce->dev_addr, dev_addr, sizeof(bce->dev_addr))) 179 194 goto out; 180 195 } ··· 183 202 184 203 /* Find a beacon by dev addr in the cache */ 185 204 static 186 - struct uwb_beca_e *__uwb_beca_find_bymac(const struct uwb_mac_addr *mac_addr) 205 + struct uwb_beca_e *__uwb_beca_find_bymac(struct uwb_rc *rc, 206 + const struct uwb_mac_addr *mac_addr) 187 207 { 188 208 struct uwb_beca_e *bce, *next; 189 - list_for_each_entry_safe(bce, next, &uwb_beca.list, node) { 209 + list_for_each_entry_safe(bce, next, &rc->uwb_beca.list, node) { 190 210 if (!memcmp(bce->mac_addr, mac_addr->data, 191 211 sizeof(struct uwb_mac_addr))) 192 212 goto out; ··· 211 229 struct uwb_dev *found = NULL; 212 230 struct uwb_beca_e *bce; 213 231 214 - mutex_lock(&uwb_beca.mutex); 215 - bce = __uwb_beca_find_bydev(devaddr); 232 + mutex_lock(&rc->uwb_beca.mutex); 233 + bce = __uwb_beca_find_bydev(rc, devaddr); 216 234 if (bce) 217 235 found = uwb_dev_try_get(rc, bce->uwb_dev); 218 - mutex_unlock(&uwb_beca.mutex); 236 + mutex_unlock(&rc->uwb_beca.mutex); 219 237 220 238 return found; 221 239 } ··· 231 249 struct uwb_dev *found = NULL; 232 250 struct uwb_beca_e *bce; 233 251 234 - mutex_lock(&uwb_beca.mutex); 235 - bce = __uwb_beca_find_bymac(macaddr); 252 + mutex_lock(&rc->uwb_beca.mutex); 253 + bce = __uwb_beca_find_bymac(rc, macaddr); 236 254 if (bce) 237 255 found = uwb_dev_try_get(rc, bce->uwb_dev); 238 - mutex_unlock(&uwb_beca.mutex); 256 + mutex_unlock(&rc->uwb_beca.mutex); 239 257 240 258 return found; 241 259 } ··· 256 274 * @bf: Beacon frame (part of b, really) 257 275 * @ts_jiffies: Timestamp (in jiffies) when the beacon was received 258 276 */ 259 - struct uwb_beca_e *__uwb_beca_add(struct uwb_rc_evt_beacon *be, 277 + static 278 + struct uwb_beca_e *__uwb_beca_add(struct uwb_rc *rc, 279 + struct uwb_rc_evt_beacon *be, 260 280 struct uwb_beacon_frame *bf, 261 281 unsigned long ts_jiffies) 262 282 { ··· 270 286 uwb_beca_e_init(bce); 271 287 bce->ts_jiffies = ts_jiffies; 272 288 bce->uwb_dev = NULL; 273 - list_add(&bce->node, &uwb_beca.list); 289 + list_add(&bce->node, &rc->uwb_beca.list); 274 290 return bce; 275 291 } 276 292 ··· 279 295 * 280 296 * Remove associated devicest too. 281 297 */ 282 - void uwb_beca_purge(void) 298 + void uwb_beca_purge(struct uwb_rc *rc) 283 299 { 284 300 struct uwb_beca_e *bce, *next; 285 301 unsigned long expires; 286 302 287 - mutex_lock(&uwb_beca.mutex); 288 - list_for_each_entry_safe(bce, next, &uwb_beca.list, node) { 303 + mutex_lock(&rc->uwb_beca.mutex); 304 + list_for_each_entry_safe(bce, next, &rc->uwb_beca.list, node) { 289 305 expires = bce->ts_jiffies + msecs_to_jiffies(beacon_timeout_ms); 290 306 if (time_after(jiffies, expires)) { 291 307 uwbd_dev_offair(bce); 292 - list_del(&bce->node); 293 - uwb_bce_put(bce); 294 308 } 295 309 } 296 - mutex_unlock(&uwb_beca.mutex); 310 + mutex_unlock(&rc->uwb_beca.mutex); 297 311 } 298 312 299 313 /* Clean up the whole beacon cache. Called on shutdown */ 300 - void uwb_beca_release(void) 314 + void uwb_beca_release(struct uwb_rc *rc) 301 315 { 302 316 struct uwb_beca_e *bce, *next; 303 - mutex_lock(&uwb_beca.mutex); 304 - list_for_each_entry_safe(bce, next, &uwb_beca.list, node) { 317 + 318 + mutex_lock(&rc->uwb_beca.mutex); 319 + list_for_each_entry_safe(bce, next, &rc->uwb_beca.list, node) { 305 320 list_del(&bce->node); 306 321 uwb_bce_put(bce); 307 322 } 308 - mutex_unlock(&uwb_beca.mutex); 323 + mutex_unlock(&rc->uwb_beca.mutex); 309 324 } 310 325 311 326 static void uwb_beacon_print(struct uwb_rc *rc, struct uwb_rc_evt_beacon *be, ··· 332 349 ssize_t result = 0; 333 350 struct uwb_rc_evt_beacon *be; 334 351 struct uwb_beacon_frame *bf; 335 - struct uwb_buf_ctx ctx = { 336 - .buf = buf, 337 - .bytes = 0, 338 - .size = size 339 - }; 352 + int ies_len; 353 + struct uwb_ie_hdr *ies; 340 354 341 355 mutex_lock(&bce->mutex); 356 + 342 357 be = bce->be; 343 - if (be == NULL) 344 - goto out; 345 - bf = (void *) be->BeaconInfo; 346 - uwb_ie_for_each(uwb_dev, uwb_ie_dump_hex, &ctx, 347 - bf->IEData, be->wBeaconInfoLength - sizeof(*bf)); 348 - result = ctx.bytes; 349 - out: 358 + if (be) { 359 + bf = (struct uwb_beacon_frame *)bce->be->BeaconInfo; 360 + ies_len = be->wBeaconInfoLength - sizeof(struct uwb_beacon_frame); 361 + ies = (struct uwb_ie_hdr *)bf->IEData; 362 + 363 + result = uwb_ie_dump_hex(ies, ies_len, buf, size); 364 + } 365 + 350 366 mutex_unlock(&bce->mutex); 367 + 351 368 return result; 352 369 } 353 370 ··· 420 437 if (uwb_mac_addr_bcast(&bf->Device_Identifier)) 421 438 return 0; 422 439 423 - mutex_lock(&uwb_beca.mutex); 424 - bce = __uwb_beca_find_bymac(&bf->Device_Identifier); 440 + mutex_lock(&rc->uwb_beca.mutex); 441 + bce = __uwb_beca_find_bymac(rc, &bf->Device_Identifier); 425 442 if (bce == NULL) { 426 443 /* Not in there, a new device is pinging */ 427 444 uwb_beacon_print(evt->rc, be, bf); 428 - bce = __uwb_beca_add(be, bf, evt->ts_jiffies); 445 + bce = __uwb_beca_add(rc, be, bf, evt->ts_jiffies); 429 446 if (bce == NULL) { 430 - mutex_unlock(&uwb_beca.mutex); 447 + mutex_unlock(&rc->uwb_beca.mutex); 431 448 return -ENOMEM; 432 449 } 433 450 } 434 - mutex_unlock(&uwb_beca.mutex); 451 + mutex_unlock(&rc->uwb_beca.mutex); 435 452 436 453 mutex_lock(&bce->mutex); 437 454 /* purge old beacon data */ ··· 571 588 return result; 572 589 } 573 590 574 - /** 575 - * uwb_bg_joined - is the RC in a beacon group? 576 - * @rc: the radio controller 577 - * 578 - * Returns true if the radio controller is in a beacon group (even if 579 - * it's the sole member). 580 - */ 581 - int uwb_bg_joined(struct uwb_rc *rc) 582 - { 583 - return rc->beaconing != -1; 584 - } 585 - EXPORT_SYMBOL_GPL(uwb_bg_joined); 586 - 587 591 /* 588 592 * Print beaconing state. 589 593 */ ··· 589 619 590 620 /* 591 621 * Start beaconing on the specified channel, or stop beaconing. 592 - * 593 - * The BPST offset of when to start searching for a beacon group to 594 - * join may be specified. 595 622 */ 596 623 static ssize_t uwb_rc_beacon_store(struct device *dev, 597 624 struct device_attribute *attr, ··· 597 630 struct uwb_dev *uwb_dev = to_uwb_dev(dev); 598 631 struct uwb_rc *rc = uwb_dev->rc; 599 632 int channel; 600 - unsigned bpst_offset = 0; 601 633 ssize_t result = -EINVAL; 602 634 603 - result = sscanf(buf, "%d %u\n", &channel, &bpst_offset); 635 + result = sscanf(buf, "%d", &channel); 604 636 if (result >= 1) 605 - result = uwb_rc_beacon(rc, channel, bpst_offset); 637 + result = uwb_radio_force_channel(rc, channel); 606 638 607 639 return result < 0 ? result : size; 608 640 }
+1 -3
drivers/uwb/driver.c
··· 53 53 #include <linux/err.h> 54 54 #include <linux/kdev_t.h> 55 55 #include <linux/random.h> 56 - #include <linux/uwb/debug.h> 56 + 57 57 #include "uwb-internal.h" 58 58 59 59 ··· 118 118 result = class_register(&uwb_rc_class); 119 119 if (result < 0) 120 120 goto error_uwb_rc_class_register; 121 - uwbd_start(); 122 121 uwb_dbg_init(); 123 122 return 0; 124 123 ··· 131 132 static void __exit uwb_subsys_exit(void) 132 133 { 133 134 uwb_dbg_exit(); 134 - uwbd_stop(); 135 135 class_unregister(&uwb_rc_class); 136 136 uwb_est_destroy(); 137 137 return;
+3 -1
drivers/uwb/drp-avail.c
··· 58 58 * 59 59 * avail = global & local & pending 60 60 */ 61 - static void uwb_drp_available(struct uwb_rc *rc, struct uwb_mas_bm *avail) 61 + void uwb_drp_available(struct uwb_rc *rc, struct uwb_mas_bm *avail) 62 62 { 63 63 bitmap_and(avail->bm, rc->drp_avail.global, rc->drp_avail.local, UWB_NUM_MAS); 64 64 bitmap_and(avail->bm, avail->bm, rc->drp_avail.pending, UWB_NUM_MAS); ··· 105 105 bitmap_or(rc->drp_avail.local, rc->drp_avail.local, mas->bm, UWB_NUM_MAS); 106 106 bitmap_or(rc->drp_avail.pending, rc->drp_avail.pending, mas->bm, UWB_NUM_MAS); 107 107 rc->drp_avail.ie_valid = false; 108 + uwb_rsv_handle_drp_avail_change(rc); 108 109 } 109 110 110 111 /** ··· 281 280 mutex_lock(&rc->rsvs_mutex); 282 281 bitmap_copy(rc->drp_avail.global, bmp, UWB_NUM_MAS); 283 282 rc->drp_avail.ie_valid = false; 283 + uwb_rsv_handle_drp_avail_change(rc); 284 284 mutex_unlock(&rc->rsvs_mutex); 285 285 286 286 uwb_rsv_sched_update(rc);
+123 -38
drivers/uwb/drp-ie.c
··· 16 16 * You should have received a copy of the GNU General Public License 17 17 * along with this program. If not, see <http://www.gnu.org/licenses/>. 18 18 */ 19 - #include <linux/version.h> 20 19 #include <linux/kernel.h> 21 20 #include <linux/random.h> 22 21 #include <linux/uwb.h> 23 22 24 23 #include "uwb-internal.h" 24 + 25 + 26 + /* 27 + * Return the reason code for a reservations's DRP IE. 28 + */ 29 + int uwb_rsv_reason_code(struct uwb_rsv *rsv) 30 + { 31 + static const int reason_codes[] = { 32 + [UWB_RSV_STATE_O_INITIATED] = UWB_DRP_REASON_ACCEPTED, 33 + [UWB_RSV_STATE_O_PENDING] = UWB_DRP_REASON_ACCEPTED, 34 + [UWB_RSV_STATE_O_MODIFIED] = UWB_DRP_REASON_MODIFIED, 35 + [UWB_RSV_STATE_O_ESTABLISHED] = UWB_DRP_REASON_ACCEPTED, 36 + [UWB_RSV_STATE_O_TO_BE_MOVED] = UWB_DRP_REASON_ACCEPTED, 37 + [UWB_RSV_STATE_O_MOVE_COMBINING] = UWB_DRP_REASON_MODIFIED, 38 + [UWB_RSV_STATE_O_MOVE_REDUCING] = UWB_DRP_REASON_MODIFIED, 39 + [UWB_RSV_STATE_O_MOVE_EXPANDING] = UWB_DRP_REASON_ACCEPTED, 40 + [UWB_RSV_STATE_T_ACCEPTED] = UWB_DRP_REASON_ACCEPTED, 41 + [UWB_RSV_STATE_T_CONFLICT] = UWB_DRP_REASON_CONFLICT, 42 + [UWB_RSV_STATE_T_PENDING] = UWB_DRP_REASON_PENDING, 43 + [UWB_RSV_STATE_T_DENIED] = UWB_DRP_REASON_DENIED, 44 + [UWB_RSV_STATE_T_RESIZED] = UWB_DRP_REASON_ACCEPTED, 45 + [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = UWB_DRP_REASON_ACCEPTED, 46 + [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = UWB_DRP_REASON_CONFLICT, 47 + [UWB_RSV_STATE_T_EXPANDING_PENDING] = UWB_DRP_REASON_PENDING, 48 + [UWB_RSV_STATE_T_EXPANDING_DENIED] = UWB_DRP_REASON_DENIED, 49 + }; 50 + 51 + return reason_codes[rsv->state]; 52 + } 53 + 54 + /* 55 + * Return the reason code for a reservations's companion DRP IE . 56 + */ 57 + int uwb_rsv_companion_reason_code(struct uwb_rsv *rsv) 58 + { 59 + static const int companion_reason_codes[] = { 60 + [UWB_RSV_STATE_O_MOVE_EXPANDING] = UWB_DRP_REASON_ACCEPTED, 61 + [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = UWB_DRP_REASON_ACCEPTED, 62 + [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = UWB_DRP_REASON_CONFLICT, 63 + [UWB_RSV_STATE_T_EXPANDING_PENDING] = UWB_DRP_REASON_PENDING, 64 + [UWB_RSV_STATE_T_EXPANDING_DENIED] = UWB_DRP_REASON_DENIED, 65 + }; 66 + 67 + return companion_reason_codes[rsv->state]; 68 + } 69 + 70 + /* 71 + * Return the status bit for a reservations's DRP IE. 72 + */ 73 + int uwb_rsv_status(struct uwb_rsv *rsv) 74 + { 75 + static const int statuses[] = { 76 + [UWB_RSV_STATE_O_INITIATED] = 0, 77 + [UWB_RSV_STATE_O_PENDING] = 0, 78 + [UWB_RSV_STATE_O_MODIFIED] = 1, 79 + [UWB_RSV_STATE_O_ESTABLISHED] = 1, 80 + [UWB_RSV_STATE_O_TO_BE_MOVED] = 0, 81 + [UWB_RSV_STATE_O_MOVE_COMBINING] = 1, 82 + [UWB_RSV_STATE_O_MOVE_REDUCING] = 1, 83 + [UWB_RSV_STATE_O_MOVE_EXPANDING] = 1, 84 + [UWB_RSV_STATE_T_ACCEPTED] = 1, 85 + [UWB_RSV_STATE_T_CONFLICT] = 0, 86 + [UWB_RSV_STATE_T_PENDING] = 0, 87 + [UWB_RSV_STATE_T_DENIED] = 0, 88 + [UWB_RSV_STATE_T_RESIZED] = 1, 89 + [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = 1, 90 + [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = 1, 91 + [UWB_RSV_STATE_T_EXPANDING_PENDING] = 1, 92 + [UWB_RSV_STATE_T_EXPANDING_DENIED] = 1, 93 + 94 + }; 95 + 96 + return statuses[rsv->state]; 97 + } 98 + 99 + /* 100 + * Return the status bit for a reservations's companion DRP IE . 101 + */ 102 + int uwb_rsv_companion_status(struct uwb_rsv *rsv) 103 + { 104 + static const int companion_statuses[] = { 105 + [UWB_RSV_STATE_O_MOVE_EXPANDING] = 0, 106 + [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = 1, 107 + [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = 0, 108 + [UWB_RSV_STATE_T_EXPANDING_PENDING] = 0, 109 + [UWB_RSV_STATE_T_EXPANDING_DENIED] = 0, 110 + }; 111 + 112 + return companion_statuses[rsv->state]; 113 + } 25 114 26 115 /* 27 116 * Allocate a DRP IE. ··· 123 34 static struct uwb_ie_drp *uwb_drp_ie_alloc(void) 124 35 { 125 36 struct uwb_ie_drp *drp_ie; 126 - unsigned tiebreaker; 127 37 128 38 drp_ie = kzalloc(sizeof(struct uwb_ie_drp) + 129 39 UWB_NUM_ZONES * sizeof(struct uwb_drp_alloc), 130 40 GFP_KERNEL); 131 41 if (drp_ie) { 132 42 drp_ie->hdr.element_id = UWB_IE_DRP; 133 - 134 - get_random_bytes(&tiebreaker, sizeof(unsigned)); 135 - uwb_ie_drp_set_tiebreaker(drp_ie, tiebreaker & 1); 136 43 } 137 44 return drp_ie; 138 45 } ··· 189 104 */ 190 105 int uwb_drp_ie_update(struct uwb_rsv *rsv) 191 106 { 192 - struct device *dev = &rsv->rc->uwb_dev.dev; 193 107 struct uwb_ie_drp *drp_ie; 194 - int reason_code, status; 108 + struct uwb_rsv_move *mv; 109 + int unsafe; 195 110 196 - switch (rsv->state) { 197 - case UWB_RSV_STATE_NONE: 111 + if (rsv->state == UWB_RSV_STATE_NONE) { 198 112 kfree(rsv->drp_ie); 199 113 rsv->drp_ie = NULL; 200 114 return 0; 201 - case UWB_RSV_STATE_O_INITIATED: 202 - reason_code = UWB_DRP_REASON_ACCEPTED; 203 - status = 0; 204 - break; 205 - case UWB_RSV_STATE_O_PENDING: 206 - reason_code = UWB_DRP_REASON_ACCEPTED; 207 - status = 0; 208 - break; 209 - case UWB_RSV_STATE_O_MODIFIED: 210 - reason_code = UWB_DRP_REASON_MODIFIED; 211 - status = 1; 212 - break; 213 - case UWB_RSV_STATE_O_ESTABLISHED: 214 - reason_code = UWB_DRP_REASON_ACCEPTED; 215 - status = 1; 216 - break; 217 - case UWB_RSV_STATE_T_ACCEPTED: 218 - reason_code = UWB_DRP_REASON_ACCEPTED; 219 - status = 1; 220 - break; 221 - case UWB_RSV_STATE_T_DENIED: 222 - reason_code = UWB_DRP_REASON_DENIED; 223 - status = 0; 224 - break; 225 - default: 226 - dev_dbg(dev, "rsv with unhandled state (%d)\n", rsv->state); 227 - return -EINVAL; 228 115 } 116 + 117 + unsafe = rsv->mas.unsafe ? 1 : 0; 229 118 230 119 if (rsv->drp_ie == NULL) { 231 120 rsv->drp_ie = uwb_drp_ie_alloc(); ··· 208 149 } 209 150 drp_ie = rsv->drp_ie; 210 151 152 + uwb_ie_drp_set_unsafe(drp_ie, unsafe); 153 + uwb_ie_drp_set_tiebreaker(drp_ie, rsv->tiebreaker); 211 154 uwb_ie_drp_set_owner(drp_ie, uwb_rsv_is_owner(rsv)); 212 - uwb_ie_drp_set_status(drp_ie, status); 213 - uwb_ie_drp_set_reason_code(drp_ie, reason_code); 155 + uwb_ie_drp_set_status(drp_ie, uwb_rsv_status(rsv)); 156 + uwb_ie_drp_set_reason_code(drp_ie, uwb_rsv_reason_code(rsv)); 214 157 uwb_ie_drp_set_stream_index(drp_ie, rsv->stream); 215 158 uwb_ie_drp_set_type(drp_ie, rsv->type); 216 159 ··· 229 168 drp_ie->dev_addr = rsv->owner->dev_addr; 230 169 231 170 uwb_drp_ie_from_bm(drp_ie, &rsv->mas); 171 + 172 + if (uwb_rsv_has_two_drp_ies(rsv)) { 173 + mv = &rsv->mv; 174 + if (mv->companion_drp_ie == NULL) { 175 + mv->companion_drp_ie = uwb_drp_ie_alloc(); 176 + if (mv->companion_drp_ie == NULL) 177 + return -ENOMEM; 178 + } 179 + drp_ie = mv->companion_drp_ie; 180 + 181 + /* keep all the same configuration of the main drp_ie */ 182 + memcpy(drp_ie, rsv->drp_ie, sizeof(struct uwb_ie_drp)); 183 + 184 + 185 + /* FIXME: handle properly the unsafe bit */ 186 + uwb_ie_drp_set_unsafe(drp_ie, 1); 187 + uwb_ie_drp_set_status(drp_ie, uwb_rsv_companion_status(rsv)); 188 + uwb_ie_drp_set_reason_code(drp_ie, uwb_rsv_companion_reason_code(rsv)); 189 + 190 + uwb_drp_ie_from_bm(drp_ie, &mv->companion_mas); 191 + } 232 192 233 193 rsv->ie_valid = true; 234 194 return 0; ··· 301 219 u8 zone; 302 220 u16 zone_mask; 303 221 222 + bitmap_zero(bm->bm, UWB_NUM_MAS); 223 + 304 224 for (cnt = 0; cnt < numallocs; cnt++) { 305 225 alloc = &drp_ie->allocs[cnt]; 306 226 zone_bm = le16_to_cpu(alloc->zone_bm); ··· 314 230 } 315 231 } 316 232 } 233 +
+530 -177
drivers/uwb/drp.c
··· 23 23 #include <linux/delay.h> 24 24 #include "uwb-internal.h" 25 25 26 + 27 + /* DRP Conflict Actions ([ECMA-368 2nd Edition] 17.4.6) */ 28 + enum uwb_drp_conflict_action { 29 + /* Reservation is mantained, no action needed */ 30 + UWB_DRP_CONFLICT_MANTAIN = 0, 31 + 32 + /* the device shall not transmit frames in conflicting MASs in 33 + * the following superframe. If the device is the reservation 34 + * target, it shall also set the Reason Code in its DRP IE to 35 + * Conflict in its beacon in the following superframe. 36 + */ 37 + UWB_DRP_CONFLICT_ACT1, 38 + 39 + /* the device shall not set the Reservation Status bit to ONE 40 + * and shall not transmit frames in conflicting MASs. If the 41 + * device is the reservation target, it shall also set the 42 + * Reason Code in its DRP IE to Conflict. 43 + */ 44 + UWB_DRP_CONFLICT_ACT2, 45 + 46 + /* the device shall not transmit frames in conflicting MASs in 47 + * the following superframe. It shall remove the conflicting 48 + * MASs from the reservation or set the Reservation Status to 49 + * ZERO in its beacon in the following superframe. If the 50 + * device is the reservation target, it shall also set the 51 + * Reason Code in its DRP IE to Conflict. 52 + */ 53 + UWB_DRP_CONFLICT_ACT3, 54 + }; 55 + 56 + 57 + static void uwb_rc_set_drp_cmd_done(struct uwb_rc *rc, void *arg, 58 + struct uwb_rceb *reply, ssize_t reply_size) 59 + { 60 + struct uwb_rc_evt_set_drp_ie *r = (struct uwb_rc_evt_set_drp_ie *)reply; 61 + 62 + if (r != NULL) { 63 + if (r->bResultCode != UWB_RC_RES_SUCCESS) 64 + dev_err(&rc->uwb_dev.dev, "SET-DRP-IE failed: %s (%d)\n", 65 + uwb_rc_strerror(r->bResultCode), r->bResultCode); 66 + } else 67 + dev_err(&rc->uwb_dev.dev, "SET-DRP-IE: timeout\n"); 68 + 69 + spin_lock(&rc->rsvs_lock); 70 + if (rc->set_drp_ie_pending > 1) { 71 + rc->set_drp_ie_pending = 0; 72 + uwb_rsv_queue_update(rc); 73 + } else { 74 + rc->set_drp_ie_pending = 0; 75 + } 76 + spin_unlock(&rc->rsvs_lock); 77 + } 78 + 26 79 /** 27 80 * Construct and send the SET DRP IE 28 81 * ··· 90 37 * 91 38 * A DRP Availability IE is appended. 92 39 * 93 - * rc->uwb_dev.mutex is held 40 + * rc->rsvs_mutex is held 94 41 * 95 42 * FIXME We currently ignore the returned value indicating the remaining space 96 43 * in beacon. This could be used to deny reservation requests earlier if 97 44 * determined that they would cause the beacon space to be exceeded. 98 45 */ 99 - static 100 - int uwb_rc_gen_send_drp_ie(struct uwb_rc *rc) 46 + int uwb_rc_send_all_drp_ie(struct uwb_rc *rc) 101 47 { 102 48 int result; 103 - struct device *dev = &rc->uwb_dev.dev; 104 49 struct uwb_rc_cmd_set_drp_ie *cmd; 105 - struct uwb_rc_evt_set_drp_ie reply; 106 50 struct uwb_rsv *rsv; 51 + struct uwb_rsv_move *mv; 107 52 int num_bytes = 0; 108 53 u8 *IEDataptr; 109 54 110 55 result = -ENOMEM; 111 56 /* First traverse all reservations to determine memory needed. */ 112 57 list_for_each_entry(rsv, &rc->reservations, rc_node) { 113 - if (rsv->drp_ie != NULL) 58 + if (rsv->drp_ie != NULL) { 114 59 num_bytes += rsv->drp_ie->hdr.length + 2; 60 + if (uwb_rsv_has_two_drp_ies(rsv) && 61 + (rsv->mv.companion_drp_ie != NULL)) { 62 + mv = &rsv->mv; 63 + num_bytes += mv->companion_drp_ie->hdr.length + 2; 64 + } 65 + } 115 66 } 116 67 num_bytes += sizeof(rc->drp_avail.ie); 117 68 cmd = kzalloc(sizeof(*cmd) + num_bytes, GFP_KERNEL); ··· 126 69 cmd->wIELength = num_bytes; 127 70 IEDataptr = (u8 *)&cmd->IEData[0]; 128 71 72 + /* FIXME: DRV avail IE is not always needed */ 73 + /* put DRP avail IE first */ 74 + memcpy(IEDataptr, &rc->drp_avail.ie, sizeof(rc->drp_avail.ie)); 75 + IEDataptr += sizeof(struct uwb_ie_drp_avail); 76 + 129 77 /* Next traverse all reservations to place IEs in allocated memory. */ 130 78 list_for_each_entry(rsv, &rc->reservations, rc_node) { 131 79 if (rsv->drp_ie != NULL) { 132 80 memcpy(IEDataptr, rsv->drp_ie, 133 81 rsv->drp_ie->hdr.length + 2); 134 82 IEDataptr += rsv->drp_ie->hdr.length + 2; 83 + 84 + if (uwb_rsv_has_two_drp_ies(rsv) && 85 + (rsv->mv.companion_drp_ie != NULL)) { 86 + mv = &rsv->mv; 87 + memcpy(IEDataptr, mv->companion_drp_ie, 88 + mv->companion_drp_ie->hdr.length + 2); 89 + IEDataptr += mv->companion_drp_ie->hdr.length + 2; 90 + } 135 91 } 136 92 } 137 - memcpy(IEDataptr, &rc->drp_avail.ie, sizeof(rc->drp_avail.ie)); 138 93 139 - reply.rceb.bEventType = UWB_RC_CET_GENERAL; 140 - reply.rceb.wEvent = UWB_RC_CMD_SET_DRP_IE; 141 - result = uwb_rc_cmd(rc, "SET-DRP-IE", &cmd->rccb, 142 - sizeof(*cmd) + num_bytes, &reply.rceb, 143 - sizeof(reply)); 144 - if (result < 0) 145 - goto error_cmd; 146 - result = le16_to_cpu(reply.wRemainingSpace); 147 - if (reply.bResultCode != UWB_RC_RES_SUCCESS) { 148 - dev_err(&rc->uwb_dev.dev, "SET-DRP-IE: command execution " 149 - "failed: %s (%d). RemainingSpace in beacon " 150 - "= %d\n", uwb_rc_strerror(reply.bResultCode), 151 - reply.bResultCode, result); 152 - result = -EIO; 153 - } else { 154 - dev_dbg(dev, "SET-DRP-IE sent. RemainingSpace in beacon " 155 - "= %d.\n", result); 156 - result = 0; 157 - } 158 - error_cmd: 94 + result = uwb_rc_cmd_async(rc, "SET-DRP-IE", &cmd->rccb, sizeof(*cmd) + num_bytes, 95 + UWB_RC_CET_GENERAL, UWB_RC_CMD_SET_DRP_IE, 96 + uwb_rc_set_drp_cmd_done, NULL); 97 + 98 + rc->set_drp_ie_pending = 1; 99 + 159 100 kfree(cmd); 160 101 error: 161 102 return result; 162 - 163 103 } 164 - /** 165 - * Send all DRP IEs associated with this host 104 + 105 + /* 106 + * Evaluate the action to perform using conflict resolution rules 166 107 * 167 - * @returns: >= 0 number of bytes still available in the beacon 168 - * < 0 errno code on error. 169 - * 170 - * As per the protocol we obtain the host controller device lock to access 171 - * bandwidth structures. 108 + * Return a uwb_drp_conflict_action. 172 109 */ 173 - int uwb_rc_send_all_drp_ie(struct uwb_rc *rc) 110 + static int evaluate_conflict_action(struct uwb_ie_drp *ext_drp_ie, int ext_beacon_slot, 111 + struct uwb_rsv *rsv, int our_status) 174 112 { 175 - int result; 113 + int our_tie_breaker = rsv->tiebreaker; 114 + int our_type = rsv->type; 115 + int our_beacon_slot = rsv->rc->uwb_dev.beacon_slot; 176 116 177 - mutex_lock(&rc->uwb_dev.mutex); 178 - result = uwb_rc_gen_send_drp_ie(rc); 179 - mutex_unlock(&rc->uwb_dev.mutex); 180 - return result; 181 - } 182 - 183 - void uwb_drp_handle_timeout(struct uwb_rsv *rsv) 184 - { 185 - struct device *dev = &rsv->rc->uwb_dev.dev; 186 - 187 - dev_dbg(dev, "reservation timeout in state %s (%d)\n", 188 - uwb_rsv_state_str(rsv->state), rsv->state); 189 - 190 - switch (rsv->state) { 191 - case UWB_RSV_STATE_O_INITIATED: 192 - if (rsv->is_multicast) { 193 - uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); 194 - return; 195 - } 196 - break; 197 - case UWB_RSV_STATE_O_ESTABLISHED: 198 - if (rsv->is_multicast) 199 - return; 200 - break; 201 - default: 202 - break; 117 + int ext_tie_breaker = uwb_ie_drp_tiebreaker(ext_drp_ie); 118 + int ext_status = uwb_ie_drp_status(ext_drp_ie); 119 + int ext_type = uwb_ie_drp_type(ext_drp_ie); 120 + 121 + 122 + /* [ECMA-368 2nd Edition] 17.4.6 */ 123 + if (ext_type == UWB_DRP_TYPE_PCA && our_type == UWB_DRP_TYPE_PCA) { 124 + return UWB_DRP_CONFLICT_MANTAIN; 203 125 } 204 - uwb_rsv_remove(rsv); 126 + 127 + /* [ECMA-368 2nd Edition] 17.4.6-1 */ 128 + if (our_type == UWB_DRP_TYPE_ALIEN_BP) { 129 + return UWB_DRP_CONFLICT_MANTAIN; 130 + } 131 + 132 + /* [ECMA-368 2nd Edition] 17.4.6-2 */ 133 + if (ext_type == UWB_DRP_TYPE_ALIEN_BP) { 134 + /* here we know our_type != UWB_DRP_TYPE_ALIEN_BP */ 135 + return UWB_DRP_CONFLICT_ACT1; 136 + } 137 + 138 + /* [ECMA-368 2nd Edition] 17.4.6-3 */ 139 + if (our_status == 0 && ext_status == 1) { 140 + return UWB_DRP_CONFLICT_ACT2; 141 + } 142 + 143 + /* [ECMA-368 2nd Edition] 17.4.6-4 */ 144 + if (our_status == 1 && ext_status == 0) { 145 + return UWB_DRP_CONFLICT_MANTAIN; 146 + } 147 + 148 + /* [ECMA-368 2nd Edition] 17.4.6-5a */ 149 + if (our_tie_breaker == ext_tie_breaker && 150 + our_beacon_slot < ext_beacon_slot) { 151 + return UWB_DRP_CONFLICT_MANTAIN; 152 + } 153 + 154 + /* [ECMA-368 2nd Edition] 17.4.6-5b */ 155 + if (our_tie_breaker != ext_tie_breaker && 156 + our_beacon_slot > ext_beacon_slot) { 157 + return UWB_DRP_CONFLICT_MANTAIN; 158 + } 159 + 160 + if (our_status == 0) { 161 + if (our_tie_breaker == ext_tie_breaker) { 162 + /* [ECMA-368 2nd Edition] 17.4.6-6a */ 163 + if (our_beacon_slot > ext_beacon_slot) { 164 + return UWB_DRP_CONFLICT_ACT2; 165 + } 166 + } else { 167 + /* [ECMA-368 2nd Edition] 17.4.6-6b */ 168 + if (our_beacon_slot < ext_beacon_slot) { 169 + return UWB_DRP_CONFLICT_ACT2; 170 + } 171 + } 172 + } else { 173 + if (our_tie_breaker == ext_tie_breaker) { 174 + /* [ECMA-368 2nd Edition] 17.4.6-7a */ 175 + if (our_beacon_slot > ext_beacon_slot) { 176 + return UWB_DRP_CONFLICT_ACT3; 177 + } 178 + } else { 179 + /* [ECMA-368 2nd Edition] 17.4.6-7b */ 180 + if (our_beacon_slot < ext_beacon_slot) { 181 + return UWB_DRP_CONFLICT_ACT3; 182 + } 183 + } 184 + } 185 + return UWB_DRP_CONFLICT_MANTAIN; 205 186 } 206 187 188 + static void handle_conflict_normal(struct uwb_ie_drp *drp_ie, 189 + int ext_beacon_slot, 190 + struct uwb_rsv *rsv, 191 + struct uwb_mas_bm *conflicting_mas) 192 + { 193 + struct uwb_rc *rc = rsv->rc; 194 + struct uwb_rsv_move *mv = &rsv->mv; 195 + struct uwb_drp_backoff_win *bow = &rc->bow; 196 + int action; 197 + 198 + action = evaluate_conflict_action(drp_ie, ext_beacon_slot, rsv, uwb_rsv_status(rsv)); 199 + 200 + if (uwb_rsv_is_owner(rsv)) { 201 + switch(action) { 202 + case UWB_DRP_CONFLICT_ACT2: 203 + /* try move */ 204 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_TO_BE_MOVED); 205 + if (bow->can_reserve_extra_mases == false) 206 + uwb_rsv_backoff_win_increment(rc); 207 + 208 + break; 209 + case UWB_DRP_CONFLICT_ACT3: 210 + uwb_rsv_backoff_win_increment(rc); 211 + /* drop some mases with reason modified */ 212 + /* put in the companion the mases to be dropped */ 213 + bitmap_and(mv->companion_mas.bm, rsv->mas.bm, conflicting_mas->bm, UWB_NUM_MAS); 214 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED); 215 + default: 216 + break; 217 + } 218 + } else { 219 + switch(action) { 220 + case UWB_DRP_CONFLICT_ACT2: 221 + case UWB_DRP_CONFLICT_ACT3: 222 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT); 223 + default: 224 + break; 225 + } 226 + 227 + } 228 + 229 + } 230 + 231 + static void handle_conflict_expanding(struct uwb_ie_drp *drp_ie, int ext_beacon_slot, 232 + struct uwb_rsv *rsv, bool companion_only, 233 + struct uwb_mas_bm *conflicting_mas) 234 + { 235 + struct uwb_rc *rc = rsv->rc; 236 + struct uwb_drp_backoff_win *bow = &rc->bow; 237 + struct uwb_rsv_move *mv = &rsv->mv; 238 + int action; 239 + 240 + if (companion_only) { 241 + /* status of companion is 0 at this point */ 242 + action = evaluate_conflict_action(drp_ie, ext_beacon_slot, rsv, 0); 243 + if (uwb_rsv_is_owner(rsv)) { 244 + switch(action) { 245 + case UWB_DRP_CONFLICT_ACT2: 246 + case UWB_DRP_CONFLICT_ACT3: 247 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); 248 + rsv->needs_release_companion_mas = false; 249 + if (bow->can_reserve_extra_mases == false) 250 + uwb_rsv_backoff_win_increment(rc); 251 + uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas); 252 + } 253 + } else { /* rsv is target */ 254 + switch(action) { 255 + case UWB_DRP_CONFLICT_ACT2: 256 + case UWB_DRP_CONFLICT_ACT3: 257 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_EXPANDING_CONFLICT); 258 + /* send_drp_avail_ie = true; */ 259 + } 260 + } 261 + } else { /* also base part of the reservation is conflicting */ 262 + if (uwb_rsv_is_owner(rsv)) { 263 + uwb_rsv_backoff_win_increment(rc); 264 + /* remove companion part */ 265 + uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas); 266 + 267 + /* drop some mases with reason modified */ 268 + 269 + /* put in the companion the mases to be dropped */ 270 + bitmap_andnot(mv->companion_mas.bm, rsv->mas.bm, conflicting_mas->bm, UWB_NUM_MAS); 271 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED); 272 + } else { /* it is a target rsv */ 273 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT); 274 + /* send_drp_avail_ie = true; */ 275 + } 276 + } 277 + } 278 + 279 + static void uwb_drp_handle_conflict_rsv(struct uwb_rc *rc, struct uwb_rsv *rsv, 280 + struct uwb_rc_evt_drp *drp_evt, 281 + struct uwb_ie_drp *drp_ie, 282 + struct uwb_mas_bm *conflicting_mas) 283 + { 284 + struct uwb_rsv_move *mv; 285 + 286 + /* check if the conflicting reservation has two drp_ies */ 287 + if (uwb_rsv_has_two_drp_ies(rsv)) { 288 + mv = &rsv->mv; 289 + if (bitmap_intersects(rsv->mas.bm, conflicting_mas->bm, UWB_NUM_MAS)) { 290 + handle_conflict_expanding(drp_ie, drp_evt->beacon_slot_number, 291 + rsv, false, conflicting_mas); 292 + } else { 293 + if (bitmap_intersects(mv->companion_mas.bm, conflicting_mas->bm, UWB_NUM_MAS)) { 294 + handle_conflict_expanding(drp_ie, drp_evt->beacon_slot_number, 295 + rsv, true, conflicting_mas); 296 + } 297 + } 298 + } else if (bitmap_intersects(rsv->mas.bm, conflicting_mas->bm, UWB_NUM_MAS)) { 299 + handle_conflict_normal(drp_ie, drp_evt->beacon_slot_number, rsv, conflicting_mas); 300 + } 301 + } 302 + 303 + static void uwb_drp_handle_all_conflict_rsv(struct uwb_rc *rc, 304 + struct uwb_rc_evt_drp *drp_evt, 305 + struct uwb_ie_drp *drp_ie, 306 + struct uwb_mas_bm *conflicting_mas) 307 + { 308 + struct uwb_rsv *rsv; 309 + 310 + list_for_each_entry(rsv, &rc->reservations, rc_node) { 311 + uwb_drp_handle_conflict_rsv(rc, rsv, drp_evt, drp_ie, conflicting_mas); 312 + } 313 + } 314 + 207 315 /* 208 316 * Based on the DRP IE, transition a target reservation to a new 209 317 * state. 210 318 */ 211 319 static void uwb_drp_process_target(struct uwb_rc *rc, struct uwb_rsv *rsv, 212 - struct uwb_ie_drp *drp_ie) 320 + struct uwb_ie_drp *drp_ie, struct uwb_rc_evt_drp *drp_evt) 213 321 { 214 322 struct device *dev = &rc->uwb_dev.dev; 323 + struct uwb_rsv_move *mv = &rsv->mv; 215 324 int status; 216 325 enum uwb_drp_reason reason_code; 217 - 326 + struct uwb_mas_bm mas; 327 + 218 328 status = uwb_ie_drp_status(drp_ie); 219 329 reason_code = uwb_ie_drp_reason_code(drp_ie); 330 + uwb_drp_ie_to_bm(&mas, drp_ie); 220 331 221 - if (status) { 222 - switch (reason_code) { 223 - case UWB_DRP_REASON_ACCEPTED: 332 + switch (reason_code) { 333 + case UWB_DRP_REASON_ACCEPTED: 334 + 335 + if (rsv->state == UWB_RSV_STATE_T_CONFLICT) { 336 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT); 337 + break; 338 + } 339 + 340 + if (rsv->state == UWB_RSV_STATE_T_EXPANDING_ACCEPTED) { 341 + /* drp_ie is companion */ 342 + if (!bitmap_equal(rsv->mas.bm, mas.bm, UWB_NUM_MAS)) 343 + /* stroke companion */ 344 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_EXPANDING_ACCEPTED); 345 + } else { 346 + if (!bitmap_equal(rsv->mas.bm, mas.bm, UWB_NUM_MAS)) { 347 + if (uwb_drp_avail_reserve_pending(rc, &mas) == -EBUSY) { 348 + /* FIXME: there is a conflict, find 349 + * the conflicting reservations and 350 + * take a sensible action. Consider 351 + * that in drp_ie there is the 352 + * "neighbour" */ 353 + uwb_drp_handle_all_conflict_rsv(rc, drp_evt, drp_ie, &mas); 354 + } else { 355 + /* accept the extra reservation */ 356 + bitmap_copy(mv->companion_mas.bm, mas.bm, UWB_NUM_MAS); 357 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_EXPANDING_ACCEPTED); 358 + } 359 + } else { 360 + if (status) { 361 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED); 362 + } 363 + } 364 + 365 + } 366 + break; 367 + 368 + case UWB_DRP_REASON_MODIFIED: 369 + /* check to see if we have already modified the reservation */ 370 + if (bitmap_equal(rsv->mas.bm, mas.bm, UWB_NUM_MAS)) { 224 371 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED); 225 372 break; 226 - case UWB_DRP_REASON_MODIFIED: 227 - dev_err(dev, "FIXME: unhandled reason code (%d/%d)\n", 228 - reason_code, status); 229 - break; 230 - default: 231 - dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n", 232 - reason_code, status); 233 373 } 234 - } else { 235 - switch (reason_code) { 236 - case UWB_DRP_REASON_ACCEPTED: 237 - /* New reservations are handled in uwb_rsv_find(). */ 238 - break; 239 - case UWB_DRP_REASON_DENIED: 240 - uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); 241 - break; 242 - case UWB_DRP_REASON_CONFLICT: 243 - case UWB_DRP_REASON_MODIFIED: 244 - dev_err(dev, "FIXME: unhandled reason code (%d/%d)\n", 245 - reason_code, status); 246 - break; 247 - default: 248 - dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n", 249 - reason_code, status); 374 + 375 + /* find if the owner wants to expand or reduce */ 376 + if (bitmap_subset(mas.bm, rsv->mas.bm, UWB_NUM_MAS)) { 377 + /* owner is reducing */ 378 + bitmap_andnot(mv->companion_mas.bm, rsv->mas.bm, mas.bm, UWB_NUM_MAS); 379 + uwb_drp_avail_release(rsv->rc, &mv->companion_mas); 250 380 } 381 + 382 + bitmap_copy(rsv->mas.bm, mas.bm, UWB_NUM_MAS); 383 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_RESIZED); 384 + break; 385 + default: 386 + dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n", 387 + reason_code, status); 251 388 } 252 389 } 253 390 ··· 450 199 * state. 451 200 */ 452 201 static void uwb_drp_process_owner(struct uwb_rc *rc, struct uwb_rsv *rsv, 453 - struct uwb_ie_drp *drp_ie) 202 + struct uwb_dev *src, struct uwb_ie_drp *drp_ie, 203 + struct uwb_rc_evt_drp *drp_evt) 454 204 { 455 205 struct device *dev = &rc->uwb_dev.dev; 206 + struct uwb_rsv_move *mv = &rsv->mv; 456 207 int status; 457 208 enum uwb_drp_reason reason_code; 209 + struct uwb_mas_bm mas; 458 210 459 211 status = uwb_ie_drp_status(drp_ie); 460 212 reason_code = uwb_ie_drp_reason_code(drp_ie); 213 + uwb_drp_ie_to_bm(&mas, drp_ie); 461 214 462 215 if (status) { 463 216 switch (reason_code) { 464 217 case UWB_DRP_REASON_ACCEPTED: 465 - uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); 466 - break; 467 - case UWB_DRP_REASON_MODIFIED: 468 - dev_err(dev, "FIXME: unhandled reason code (%d/%d)\n", 469 - reason_code, status); 218 + switch (rsv->state) { 219 + case UWB_RSV_STATE_O_PENDING: 220 + case UWB_RSV_STATE_O_INITIATED: 221 + case UWB_RSV_STATE_O_ESTABLISHED: 222 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); 223 + break; 224 + case UWB_RSV_STATE_O_MODIFIED: 225 + if (bitmap_equal(mas.bm, rsv->mas.bm, UWB_NUM_MAS)) { 226 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); 227 + } else { 228 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED); 229 + } 230 + break; 231 + 232 + case UWB_RSV_STATE_O_MOVE_REDUCING: /* shouldn' t be a problem */ 233 + if (bitmap_equal(mas.bm, rsv->mas.bm, UWB_NUM_MAS)) { 234 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); 235 + } else { 236 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_REDUCING); 237 + } 238 + break; 239 + case UWB_RSV_STATE_O_MOVE_EXPANDING: 240 + if (bitmap_equal(mas.bm, mv->companion_mas.bm, UWB_NUM_MAS)) { 241 + /* Companion reservation accepted */ 242 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_COMBINING); 243 + } else { 244 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_EXPANDING); 245 + } 246 + break; 247 + case UWB_RSV_STATE_O_MOVE_COMBINING: 248 + if (bitmap_equal(mas.bm, rsv->mas.bm, UWB_NUM_MAS)) 249 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_REDUCING); 250 + else 251 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_COMBINING); 252 + break; 253 + default: 254 + break; 255 + } 470 256 break; 471 257 default: 472 258 dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n", ··· 518 230 uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); 519 231 break; 520 232 case UWB_DRP_REASON_CONFLICT: 521 - case UWB_DRP_REASON_MODIFIED: 522 - dev_err(dev, "FIXME: unhandled reason code (%d/%d)\n", 523 - reason_code, status); 233 + /* resolve the conflict */ 234 + bitmap_complement(mas.bm, src->last_availability_bm, 235 + UWB_NUM_MAS); 236 + uwb_drp_handle_conflict_rsv(rc, rsv, drp_evt, drp_ie, &mas); 524 237 break; 525 238 default: 526 239 dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n", ··· 530 241 } 531 242 } 532 243 244 + static void uwb_cnflt_alien_stroke_timer(struct uwb_cnflt_alien *cnflt) 245 + { 246 + unsigned timeout_us = UWB_MAX_LOST_BEACONS * UWB_SUPERFRAME_LENGTH_US; 247 + mod_timer(&cnflt->timer, jiffies + usecs_to_jiffies(timeout_us)); 248 + } 249 + 250 + static void uwb_cnflt_update_work(struct work_struct *work) 251 + { 252 + struct uwb_cnflt_alien *cnflt = container_of(work, 253 + struct uwb_cnflt_alien, 254 + cnflt_update_work); 255 + struct uwb_cnflt_alien *c; 256 + struct uwb_rc *rc = cnflt->rc; 257 + 258 + unsigned long delay_us = UWB_MAS_LENGTH_US * UWB_MAS_PER_ZONE; 259 + 260 + mutex_lock(&rc->rsvs_mutex); 261 + 262 + list_del(&cnflt->rc_node); 263 + 264 + /* update rc global conflicting alien bitmap */ 265 + bitmap_zero(rc->cnflt_alien_bitmap.bm, UWB_NUM_MAS); 266 + 267 + list_for_each_entry(c, &rc->cnflt_alien_list, rc_node) { 268 + bitmap_or(rc->cnflt_alien_bitmap.bm, rc->cnflt_alien_bitmap.bm, c->mas.bm, UWB_NUM_MAS); 269 + } 270 + 271 + queue_delayed_work(rc->rsv_workq, &rc->rsv_alien_bp_work, usecs_to_jiffies(delay_us)); 272 + 273 + kfree(cnflt); 274 + mutex_unlock(&rc->rsvs_mutex); 275 + } 276 + 277 + static void uwb_cnflt_timer(unsigned long arg) 278 + { 279 + struct uwb_cnflt_alien *cnflt = (struct uwb_cnflt_alien *)arg; 280 + 281 + queue_work(cnflt->rc->rsv_workq, &cnflt->cnflt_update_work); 282 + } 283 + 533 284 /* 534 - * Process a received DRP IE, it's either for a reservation owned by 535 - * the RC or targeted at it (or it's for a WUSB cluster reservation). 285 + * We have received an DRP_IE of type Alien BP and we need to make 286 + * sure we do not transmit in conflicting MASs. 536 287 */ 537 - static void uwb_drp_process(struct uwb_rc *rc, struct uwb_dev *src, 538 - struct uwb_ie_drp *drp_ie) 288 + static void uwb_drp_handle_alien_drp(struct uwb_rc *rc, struct uwb_ie_drp *drp_ie) 289 + { 290 + struct device *dev = &rc->uwb_dev.dev; 291 + struct uwb_mas_bm mas; 292 + struct uwb_cnflt_alien *cnflt; 293 + char buf[72]; 294 + unsigned long delay_us = UWB_MAS_LENGTH_US * UWB_MAS_PER_ZONE; 295 + 296 + uwb_drp_ie_to_bm(&mas, drp_ie); 297 + bitmap_scnprintf(buf, sizeof(buf), mas.bm, UWB_NUM_MAS); 298 + 299 + list_for_each_entry(cnflt, &rc->cnflt_alien_list, rc_node) { 300 + if (bitmap_equal(cnflt->mas.bm, mas.bm, UWB_NUM_MAS)) { 301 + /* Existing alien BP reservation conflicting 302 + * bitmap, just reset the timer */ 303 + uwb_cnflt_alien_stroke_timer(cnflt); 304 + return; 305 + } 306 + } 307 + 308 + /* New alien BP reservation conflicting bitmap */ 309 + 310 + /* alloc and initialize new uwb_cnflt_alien */ 311 + cnflt = kzalloc(sizeof(struct uwb_cnflt_alien), GFP_KERNEL); 312 + if (!cnflt) 313 + dev_err(dev, "failed to alloc uwb_cnflt_alien struct\n"); 314 + INIT_LIST_HEAD(&cnflt->rc_node); 315 + init_timer(&cnflt->timer); 316 + cnflt->timer.function = uwb_cnflt_timer; 317 + cnflt->timer.data = (unsigned long)cnflt; 318 + 319 + cnflt->rc = rc; 320 + INIT_WORK(&cnflt->cnflt_update_work, uwb_cnflt_update_work); 321 + 322 + bitmap_copy(cnflt->mas.bm, mas.bm, UWB_NUM_MAS); 323 + 324 + list_add_tail(&cnflt->rc_node, &rc->cnflt_alien_list); 325 + 326 + /* update rc global conflicting alien bitmap */ 327 + bitmap_or(rc->cnflt_alien_bitmap.bm, rc->cnflt_alien_bitmap.bm, mas.bm, UWB_NUM_MAS); 328 + 329 + queue_delayed_work(rc->rsv_workq, &rc->rsv_alien_bp_work, usecs_to_jiffies(delay_us)); 330 + 331 + /* start the timer */ 332 + uwb_cnflt_alien_stroke_timer(cnflt); 333 + } 334 + 335 + static void uwb_drp_process_not_involved(struct uwb_rc *rc, 336 + struct uwb_rc_evt_drp *drp_evt, 337 + struct uwb_ie_drp *drp_ie) 338 + { 339 + struct uwb_mas_bm mas; 340 + 341 + uwb_drp_ie_to_bm(&mas, drp_ie); 342 + uwb_drp_handle_all_conflict_rsv(rc, drp_evt, drp_ie, &mas); 343 + } 344 + 345 + static void uwb_drp_process_involved(struct uwb_rc *rc, struct uwb_dev *src, 346 + struct uwb_rc_evt_drp *drp_evt, 347 + struct uwb_ie_drp *drp_ie) 539 348 { 540 349 struct uwb_rsv *rsv; 541 350 ··· 646 259 */ 647 260 return; 648 261 } 649 - 262 + 650 263 /* 651 264 * Do nothing with DRP IEs for reservations that have been 652 265 * terminated. ··· 655 268 uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); 656 269 return; 657 270 } 658 - 271 + 659 272 if (uwb_ie_drp_owner(drp_ie)) 660 - uwb_drp_process_target(rc, rsv, drp_ie); 273 + uwb_drp_process_target(rc, rsv, drp_ie, drp_evt); 661 274 else 662 - uwb_drp_process_owner(rc, rsv, drp_ie); 275 + uwb_drp_process_owner(rc, rsv, src, drp_ie, drp_evt); 276 + 663 277 } 664 278 279 + 280 + static bool uwb_drp_involves_us(struct uwb_rc *rc, struct uwb_ie_drp *drp_ie) 281 + { 282 + return uwb_dev_addr_cmp(&rc->uwb_dev.dev_addr, &drp_ie->dev_addr) == 0; 283 + } 284 + 285 + /* 286 + * Process a received DRP IE. 287 + */ 288 + static void uwb_drp_process(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt, 289 + struct uwb_dev *src, struct uwb_ie_drp *drp_ie) 290 + { 291 + if (uwb_ie_drp_type(drp_ie) == UWB_DRP_TYPE_ALIEN_BP) 292 + uwb_drp_handle_alien_drp(rc, drp_ie); 293 + else if (uwb_drp_involves_us(rc, drp_ie)) 294 + uwb_drp_process_involved(rc, src, drp_evt, drp_ie); 295 + else 296 + uwb_drp_process_not_involved(rc, drp_evt, drp_ie); 297 + } 298 + 299 + /* 300 + * Process a received DRP Availability IE 301 + */ 302 + static void uwb_drp_availability_process(struct uwb_rc *rc, struct uwb_dev *src, 303 + struct uwb_ie_drp_avail *drp_availability_ie) 304 + { 305 + bitmap_copy(src->last_availability_bm, 306 + drp_availability_ie->bmp, UWB_NUM_MAS); 307 + } 665 308 666 309 /* 667 310 * Process all the DRP IEs (both DRP IEs and the DRP Availability IE) ··· 713 296 714 297 switch (ie_hdr->element_id) { 715 298 case UWB_IE_DRP_AVAILABILITY: 716 - /* FIXME: does something need to be done with this? */ 299 + uwb_drp_availability_process(rc, src_dev, (struct uwb_ie_drp_avail *)ie_hdr); 717 300 break; 718 301 case UWB_IE_DRP: 719 - uwb_drp_process(rc, src_dev, (struct uwb_ie_drp *)ie_hdr); 302 + uwb_drp_process(rc, drp_evt, src_dev, (struct uwb_ie_drp *)ie_hdr); 720 303 break; 721 304 default: 722 305 dev_warn(dev, "unexpected IE in DRP notification\n"); ··· 728 311 dev_warn(dev, "%d octets remaining in DRP notification\n", 729 312 (int)ielen); 730 313 } 731 - 732 - 733 - /* 734 - * Go through all the DRP IEs and find the ones that conflict with our 735 - * reservations. 736 - * 737 - * FIXME: must resolve the conflict according the the rules in 738 - * [ECMA-368]. 739 - */ 740 - static 741 - void uwb_drp_process_conflict_all(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt, 742 - size_t ielen, struct uwb_dev *src_dev) 743 - { 744 - struct device *dev = &rc->uwb_dev.dev; 745 - struct uwb_ie_hdr *ie_hdr; 746 - struct uwb_ie_drp *drp_ie; 747 - void *ptr; 748 - 749 - ptr = drp_evt->ie_data; 750 - for (;;) { 751 - ie_hdr = uwb_ie_next(&ptr, &ielen); 752 - if (!ie_hdr) 753 - break; 754 - 755 - drp_ie = container_of(ie_hdr, struct uwb_ie_drp, hdr); 756 - 757 - /* FIXME: check if this DRP IE conflicts. */ 758 - } 759 - 760 - if (ielen > 0) 761 - dev_warn(dev, "%d octets remaining in DRP notification\n", 762 - (int)ielen); 763 - } 764 - 765 - 766 - /* 767 - * Terminate all reservations owned by, or targeted at, 'uwb_dev'. 768 - */ 769 - static void uwb_drp_terminate_all(struct uwb_rc *rc, struct uwb_dev *uwb_dev) 770 - { 771 - struct uwb_rsv *rsv; 772 - 773 - list_for_each_entry(rsv, &rc->reservations, rc_node) { 774 - if (rsv->owner == uwb_dev 775 - || (rsv->target.type == UWB_RSV_TARGET_DEV && rsv->target.dev == uwb_dev)) 776 - uwb_rsv_remove(rsv); 777 - } 778 - } 779 - 780 314 781 315 /** 782 316 * uwbd_evt_handle_rc_drp - handle a DRP_IE event ··· 769 401 size_t ielength, bytes_left; 770 402 struct uwb_dev_addr src_addr; 771 403 struct uwb_dev *src_dev; 772 - int reason; 773 404 774 405 /* Is there enough data to decode the event (and any IEs in 775 406 its payload)? */ ··· 804 437 805 438 mutex_lock(&rc->rsvs_mutex); 806 439 807 - reason = uwb_rc_evt_drp_reason(drp_evt); 808 - 809 - switch (reason) { 810 - case UWB_DRP_NOTIF_DRP_IE_RCVD: 811 - uwb_drp_process_all(rc, drp_evt, ielength, src_dev); 812 - break; 813 - case UWB_DRP_NOTIF_CONFLICT: 814 - uwb_drp_process_conflict_all(rc, drp_evt, ielength, src_dev); 815 - break; 816 - case UWB_DRP_NOTIF_TERMINATE: 817 - uwb_drp_terminate_all(rc, src_dev); 818 - break; 819 - default: 820 - dev_warn(dev, "ignored DRP event with reason code: %d\n", reason); 821 - break; 822 - } 440 + /* We do not distinguish from the reason */ 441 + uwb_drp_process_all(rc, drp_evt, ielength, src_dev); 823 442 824 443 mutex_unlock(&rc->rsvs_mutex); 825 444
+1 -13
drivers/uwb/est.c
··· 40 40 * uwb_est_get_size() 41 41 */ 42 42 #include <linux/spinlock.h> 43 - #define D_LOCAL 0 44 - #include <linux/uwb/debug.h> 45 - #include "uwb-internal.h" 46 43 44 + #include "uwb-internal.h" 47 45 48 46 struct uwb_est { 49 47 u16 type_event_high; ··· 49 51 u8 entries; 50 52 const struct uwb_est_entry *entry; 51 53 }; 52 - 53 54 54 55 static struct uwb_est *uwb_est; 55 56 static u8 uwb_est_size; ··· 437 440 u8 *ptr = (u8 *) rceb; 438 441 439 442 read_lock_irqsave(&uwb_est_lock, flags); 440 - d_printf(2, dev, "Size query for event 0x%02x/%04x/%02x," 441 - " buffer size %ld\n", 442 - (unsigned) rceb->bEventType, 443 - (unsigned) le16_to_cpu(rceb->wEvent), 444 - (unsigned) rceb->bEventContext, 445 - (long) rceb_size); 446 443 size = -ENOSPC; 447 444 if (rceb_size < sizeof(*rceb)) 448 445 goto out; 449 446 event = le16_to_cpu(rceb->wEvent); 450 447 type_event_high = rceb->bEventType << 8 | (event & 0xff00) >> 8; 451 448 for (itr = 0; itr < uwb_est_used; itr++) { 452 - d_printf(3, dev, "Checking EST 0x%04x/%04x/%04x\n", 453 - uwb_est[itr].type_event_high, uwb_est[itr].vendor, 454 - uwb_est[itr].product); 455 449 if (uwb_est[itr].type_event_high != type_event_high) 456 450 continue; 457 451 size = uwb_est_get_size(rc, &uwb_est[itr],
+29 -24
drivers/uwb/hwa-rc.c
··· 51 51 * 52 52 * 53 53 */ 54 - #include <linux/version.h> 55 54 #include <linux/init.h> 56 55 #include <linux/module.h> 57 56 #include <linux/usb.h> 58 57 #include <linux/usb/wusb.h> 59 58 #include <linux/usb/wusb-wa.h> 60 59 #include <linux/uwb.h> 60 + 61 61 #include "uwb-internal.h" 62 - #define D_LOCAL 1 63 - #include <linux/uwb/debug.h> 64 62 65 63 /* The device uses commands and events from the WHCI specification, although 66 64 * reporting itself as WUSB compliant. */ ··· 629 631 630 632 switch (result = urb->status) { 631 633 case 0: 632 - d_printf(3, dev, "NEEP: receive stat %d, %zu bytes\n", 633 - urb->status, (size_t)urb->actual_length); 634 634 uwb_rc_neh_grok(hwarc->uwb_rc, urb->transfer_buffer, 635 635 urb->actual_length); 636 636 break; 637 637 case -ECONNRESET: /* Not an error, but a controlled situation; */ 638 638 case -ENOENT: /* (we killed the URB)...so, no broadcast */ 639 - d_printf(2, dev, "NEEP: URB reset/noent %d\n", urb->status); 640 639 goto out; 641 640 case -ESHUTDOWN: /* going away! */ 642 - d_printf(2, dev, "NEEP: URB down %d\n", urb->status); 643 641 goto out; 644 642 default: /* On general errors, retry unless it gets ugly */ 645 643 if (edc_inc(&hwarc->neep_edc, EDC_MAX_ERRORS, ··· 644 650 dev_err(dev, "NEEP: URB error %d\n", urb->status); 645 651 } 646 652 result = usb_submit_urb(urb, GFP_ATOMIC); 647 - d_printf(3, dev, "NEEP: submit %d\n", result); 648 653 if (result < 0) { 649 654 dev_err(dev, "NEEP: Can't resubmit URB (%d) resetting device\n", 650 655 result); ··· 752 759 itr_size = le16_to_cpu(usb_dev->actconfig->desc.wTotalLength); 753 760 while (itr_size >= sizeof(*hdr)) { 754 761 hdr = (struct usb_descriptor_header *) itr; 755 - d_printf(3, dev, "Extra device descriptor: " 756 - "type %02x/%u bytes @ %zu (%zu left)\n", 757 - hdr->bDescriptorType, hdr->bLength, 758 - (itr - usb_dev->rawdescriptors[actconfig_idx]), 759 - itr_size); 762 + dev_dbg(dev, "Extra device descriptor: " 763 + "type %02x/%u bytes @ %zu (%zu left)\n", 764 + hdr->bDescriptorType, hdr->bLength, 765 + (itr - usb_dev->rawdescriptors[actconfig_idx]), 766 + itr_size); 760 767 if (hdr->bDescriptorType == USB_DT_CS_RADIO_CONTROL) 761 768 goto found; 762 769 itr += hdr->bLength; ··· 788 795 goto error; 789 796 } 790 797 rc->version = version; 791 - d_printf(3, dev, "Device supports WUSB protocol version 0x%04x \n", 792 - rc->version); 798 + dev_dbg(dev, "Device supports WUSB protocol version 0x%04x \n", rc->version); 793 799 result = 0; 794 800 error: 795 801 return result; ··· 869 877 uwb_rc_rm(uwb_rc); 870 878 usb_put_intf(hwarc->usb_iface); 871 879 usb_put_dev(hwarc->usb_dev); 872 - d_printf(1, &hwarc->usb_iface->dev, "freed hwarc %p\n", hwarc); 873 880 kfree(hwarc); 874 881 uwb_rc_put(uwb_rc); /* when creating the device, refcount = 1 */ 882 + } 883 + 884 + static int hwarc_pre_reset(struct usb_interface *iface) 885 + { 886 + struct hwarc *hwarc = usb_get_intfdata(iface); 887 + struct uwb_rc *uwb_rc = hwarc->uwb_rc; 888 + 889 + uwb_rc_pre_reset(uwb_rc); 890 + return 0; 891 + } 892 + 893 + static int hwarc_post_reset(struct usb_interface *iface) 894 + { 895 + struct hwarc *hwarc = usb_get_intfdata(iface); 896 + struct uwb_rc *uwb_rc = hwarc->uwb_rc; 897 + 898 + uwb_rc_post_reset(uwb_rc); 899 + return 0; 875 900 } 876 901 877 902 /** USB device ID's that we handle */ ··· 907 898 908 899 static struct usb_driver hwarc_driver = { 909 900 .name = "hwa-rc", 901 + .id_table = hwarc_id_table, 910 902 .probe = hwarc_probe, 911 903 .disconnect = hwarc_disconnect, 912 - .id_table = hwarc_id_table, 904 + .pre_reset = hwarc_pre_reset, 905 + .post_reset = hwarc_post_reset, 913 906 }; 914 907 915 908 static int __init hwarc_driver_init(void) 916 909 { 917 - int result; 918 - result = usb_register(&hwarc_driver); 919 - if (result < 0) 920 - printk(KERN_ERR "HWA-RC: Cannot register USB driver: %d\n", 921 - result); 922 - return result; 923 - 910 + return usb_register(&hwarc_driver); 924 911 } 925 912 module_init(hwarc_driver_init); 926 913
+2 -8
drivers/uwb/i1480/dfu/dfu.c
··· 34 34 #include <linux/uwb.h> 35 35 #include <linux/random.h> 36 36 37 - #define D_LOCAL 0 38 - #include <linux/uwb/debug.h> 39 - 40 - /** 37 + /* 41 38 * i1480_rceb_check - Check RCEB for expected field values 42 39 * @i1480: pointer to device for which RCEB is being checked 43 40 * @rceb: RCEB being checked ··· 80 83 EXPORT_SYMBOL_GPL(i1480_rceb_check); 81 84 82 85 83 - /** 86 + /* 84 87 * Execute a Radio Control Command 85 88 * 86 89 * Command data has to be in i1480->cmd_buf. ··· 98 101 u8 expected_type = reply->bEventType; 99 102 u8 context; 100 103 101 - d_fnstart(3, i1480->dev, "(%p, %s, %zu)\n", i1480, cmd_name, cmd_size); 102 104 init_completion(&i1480->evt_complete); 103 105 i1480->evt_result = -EINPROGRESS; 104 106 do { ··· 146 150 result = i1480_rceb_check(i1480, i1480->evt_buf, cmd_name, context, 147 151 expected_type, expected_event); 148 152 error: 149 - d_fnend(3, i1480->dev, "(%p, %s, %zu) = %zd\n", 150 - i1480, cmd_name, cmd_size, result); 151 153 return result; 152 154 } 153 155 EXPORT_SYMBOL_GPL(i1480_cmd);
-18
drivers/uwb/i1480/dfu/mac.c
··· 31 31 #include <linux/uwb.h> 32 32 #include "i1480-dfu.h" 33 33 34 - #define D_LOCAL 0 35 - #include <linux/uwb/debug.h> 36 - 37 34 /* 38 35 * Descriptor for a continuous segment of MAC fw data 39 36 */ ··· 181 184 } 182 185 if (memcmp(i1480->cmd_buf, bin + src_itr, result)) { 183 186 u8 *buf = i1480->cmd_buf; 184 - d_printf(2, i1480->dev, 185 - "original data @ %p + %u, %zu bytes\n", 186 - bin, src_itr, result); 187 - d_dump(4, i1480->dev, bin + src_itr, result); 188 187 for (cnt = 0; cnt < result; cnt++) 189 188 if (bin[src_itr + cnt] != buf[cnt]) { 190 189 dev_err(i1480->dev, "byte failed at " ··· 217 224 struct fw_hdr *hdr_itr; 218 225 int verif_retry_count; 219 226 220 - d_fnstart(3, dev, "(%p, %p)\n", i1480, hdr); 221 227 /* Now, header by header, push them to the hw */ 222 228 for (hdr_itr = hdr; hdr_itr != NULL; hdr_itr = hdr_itr->next) { 223 229 verif_retry_count = 0; ··· 256 264 break; 257 265 } 258 266 } 259 - d_fnend(3, dev, "(%zd)\n", result); 260 267 return result; 261 268 } 262 269 ··· 328 337 const struct firmware *fw; 329 338 struct fw_hdr *fw_hdrs; 330 339 331 - d_fnstart(3, i1480->dev, "(%p, %s, %s)\n", i1480, fw_name, fw_tag); 332 340 result = request_firmware(&fw, fw_name, i1480->dev); 333 341 if (result < 0) /* Up to caller to complain on -ENOENT */ 334 342 goto out; 335 - d_printf(3, i1480->dev, "%s fw '%s': uploading\n", fw_tag, fw_name); 336 343 result = fw_hdrs_load(i1480, &fw_hdrs, fw->data, fw->size); 337 344 if (result < 0) { 338 345 dev_err(i1480->dev, "%s fw '%s': failed to parse firmware " ··· 352 363 out_release: 353 364 release_firmware(fw); 354 365 out: 355 - d_fnend(3, i1480->dev, "(%p, %s, %s) = %d\n", i1480, fw_name, fw_tag, 356 - result); 357 366 return result; 358 367 } 359 368 ··· 420 433 int result; 421 434 u32 *val = (u32 *) i1480->cmd_buf; 422 435 423 - d_fnstart(3, i1480->dev, "(i1480 %p)\n", i1480); 424 436 for (cnt = 0; cnt < 10; cnt++) { 425 437 msleep(100); 426 438 result = i1480->read(i1480, 0x80080000, 4); ··· 433 447 dev_err(i1480->dev, "Timed out waiting for fw to start\n"); 434 448 result = -ETIMEDOUT; 435 449 out: 436 - d_fnend(3, i1480->dev, "(i1480 %p) = %d\n", i1480, result); 437 450 return result; 438 451 439 452 } ··· 452 467 int result = 0, deprecated_name = 0; 453 468 struct i1480_rceb *rcebe = (void *) i1480->evt_buf; 454 469 455 - d_fnstart(3, i1480->dev, "(%p)\n", i1480); 456 470 result = __mac_fw_upload(i1480, i1480->mac_fw_name, "MAC"); 457 471 if (result == -ENOENT) { 458 472 result = __mac_fw_upload(i1480, i1480->mac_fw_name_deprecate, ··· 485 501 dev_err(i1480->dev, "MAC fw '%s': initialization event returns " 486 502 "wrong size (%zu bytes vs %zu needed)\n", 487 503 i1480->mac_fw_name, i1480->evt_result, sizeof(*rcebe)); 488 - dump_bytes(i1480->dev, rcebe, min(i1480->evt_result, (ssize_t)32)); 489 504 goto error_size; 490 505 } 491 506 result = -EIO; ··· 505 522 error_init_timeout: 506 523 error_size: 507 524 error_setup: 508 - d_fnend(3, i1480->dev, "(i1480 %p) = %d\n", i1480, result); 509 525 return result; 510 526 }
-27
drivers/uwb/i1480/dfu/usb.c
··· 35 35 * the functions are i1480_usb_NAME(). 36 36 */ 37 37 #include <linux/module.h> 38 - #include <linux/version.h> 39 38 #include <linux/usb.h> 40 39 #include <linux/interrupt.h> 41 40 #include <linux/delay.h> ··· 42 43 #include <linux/usb/wusb.h> 43 44 #include <linux/usb/wusb-wa.h> 44 45 #include "i1480-dfu.h" 45 - 46 - #define D_LOCAL 0 47 - #include <linux/uwb/debug.h> 48 - 49 46 50 47 struct i1480_usb { 51 48 struct i1480 i1480; ··· 113 118 struct i1480_usb *i1480_usb = container_of(i1480, struct i1480_usb, i1480); 114 119 size_t buffer_size, itr = 0; 115 120 116 - d_fnstart(3, i1480->dev, "(%p, 0x%08x, %p, %zu)\n", 117 - i1480, memory_address, buffer, size); 118 121 BUG_ON(size & 0x3); /* Needs to be a multiple of 4 */ 119 122 while (size > 0) { 120 123 buffer_size = size < i1480->buf_size ? size : i1480->buf_size; ··· 125 132 i1480->cmd_buf, buffer_size, 100 /* FIXME: arbitrary */); 126 133 if (result < 0) 127 134 break; 128 - d_printf(3, i1480->dev, 129 - "wrote @ 0x%08x %u bytes (of %zu bytes requested)\n", 130 - memory_address, result, buffer_size); 131 - d_dump(4, i1480->dev, i1480->cmd_buf, result); 132 135 itr += result; 133 136 memory_address += result; 134 137 size -= result; 135 138 } 136 - d_fnend(3, i1480->dev, "(%p, 0x%08x, %p, %zu) = %d\n", 137 - i1480, memory_address, buffer, size, result); 138 139 return result; 139 140 } 140 141 ··· 153 166 size_t itr, read_size = i1480->buf_size; 154 167 struct i1480_usb *i1480_usb = container_of(i1480, struct i1480_usb, i1480); 155 168 156 - d_fnstart(3, i1480->dev, "(%p, 0x%08x, %zu)\n", 157 - i1480, addr, size); 158 169 BUG_ON(size > i1480->buf_size); 159 170 BUG_ON(size & 0x3); /* Needs to be a multiple of 4 */ 160 171 BUG_ON(read_size > 512); ··· 186 201 } 187 202 result = bytes; 188 203 out: 189 - d_fnend(3, i1480->dev, "(%p, 0x%08x, %zu) = %zd\n", 190 - i1480, addr, size, result); 191 - if (result > 0) 192 - d_dump(4, i1480->dev, i1480->cmd_buf, result); 193 204 return result; 194 205 } 195 206 ··· 241 260 struct i1480_usb *i1480_usb = container_of(i1480, struct i1480_usb, i1480); 242 261 struct usb_endpoint_descriptor *epd; 243 262 244 - d_fnstart(3, dev, "(%p)\n", i1480); 245 263 init_completion(&i1480->evt_complete); 246 264 i1480->evt_result = -EINPROGRESS; 247 265 epd = &i1480_usb->usb_iface->cur_altsetting->endpoint[0].desc; ··· 262 282 goto error_wait; 263 283 } 264 284 usb_kill_urb(i1480_usb->neep_urb); 265 - d_fnend(3, dev, "(%p) = 0\n", i1480); 266 285 return 0; 267 286 268 287 error_wait: 269 288 usb_kill_urb(i1480_usb->neep_urb); 270 289 error_submit: 271 290 i1480->evt_result = result; 272 - d_fnend(3, dev, "(%p) = %d\n", i1480, result); 273 291 return result; 274 292 } 275 293 ··· 298 320 struct uwb_rccb *cmd = i1480->cmd_buf; 299 321 u8 iface_no; 300 322 301 - d_fnstart(3, dev, "(%p, %s, %zu)\n", i1480, cmd_name, cmd_size); 302 323 /* Post a read on the notification & event endpoint */ 303 324 iface_no = i1480_usb->usb_iface->cur_altsetting->desc.bInterfaceNumber; 304 325 epd = &i1480_usb->usb_iface->cur_altsetting->endpoint[0].desc; ··· 325 348 cmd_name, result); 326 349 goto error_submit_ep0; 327 350 } 328 - d_fnend(3, dev, "(%p, %s, %zu) = %d\n", 329 - i1480, cmd_name, cmd_size, result); 330 351 return result; 331 352 332 353 error_submit_ep0: 333 354 usb_kill_urb(i1480_usb->neep_urb); 334 355 error_submit_ep1: 335 - d_fnend(3, dev, "(%p, %s, %zu) = %d\n", 336 - i1480, cmd_name, cmd_size, result); 337 356 return result; 338 357 } 339 358
+2 -3
drivers/uwb/i1480/i1480u-wlp/lc.c
··· 55 55 * is being removed. 56 56 * i1480u_rm() 57 57 */ 58 - #include <linux/version.h> 59 58 #include <linux/if_arp.h> 60 59 #include <linux/etherdevice.h> 61 - #include <linux/uwb/debug.h> 60 + 62 61 #include "i1480u-wlp.h" 63 62 64 63 ··· 206 207 wlp->fill_device_info = i1480u_fill_device_info; 207 208 wlp->stop_queue = i1480u_stop_queue; 208 209 wlp->start_queue = i1480u_start_queue; 209 - result = wlp_setup(wlp, rc); 210 + result = wlp_setup(wlp, rc, net_dev); 210 211 if (result < 0) { 211 212 dev_err(&iface->dev, "Cannot setup WLP\n"); 212 213 goto error_wlp_setup;
+12 -41
drivers/uwb/i1480/i1480u-wlp/netdev.c
··· 41 41 42 42 #include <linux/if_arp.h> 43 43 #include <linux/etherdevice.h> 44 - #include <linux/uwb/debug.h> 44 + 45 45 #include "i1480u-wlp.h" 46 46 47 47 struct i1480u_cmd_set_ip_mas { ··· 207 207 result = i1480u_rx_setup(i1480u); /* Alloc RX stuff */ 208 208 if (result < 0) 209 209 goto error_rx_setup; 210 + 211 + result = uwb_radio_start(&wlp->pal); 212 + if (result < 0) 213 + goto error_radio_start; 214 + 210 215 netif_wake_queue(net_dev); 211 216 #ifdef i1480u_FLOW_CONTROL 212 217 result = usb_submit_urb(i1480u->notif_urb, GFP_KERNEL);; ··· 220 215 goto error_notif_urb_submit; 221 216 } 222 217 #endif 223 - i1480u->uwb_notifs_handler.cb = i1480u_uwb_notifs_cb; 224 - i1480u->uwb_notifs_handler.data = i1480u; 225 - if (uwb_bg_joined(rc)) 226 - netif_carrier_on(net_dev); 227 - else 228 - netif_carrier_off(net_dev); 229 - uwb_notifs_register(rc, &i1480u->uwb_notifs_handler); 230 218 /* Interface is up with an address, now we can create WSS */ 231 219 result = wlp_wss_setup(net_dev, &wlp->wss); 232 220 if (result < 0) { 233 221 dev_err(dev, "Can't create WSS: %d. \n", result); 234 - goto error_notif_deregister; 222 + goto error_wss_setup; 235 223 } 236 224 return 0; 237 - error_notif_deregister: 238 - uwb_notifs_deregister(rc, &i1480u->uwb_notifs_handler); 225 + error_wss_setup: 239 226 #ifdef i1480u_FLOW_CONTROL 227 + usb_kill_urb(i1480u->notif_urb); 240 228 error_notif_urb_submit: 241 229 #endif 230 + uwb_radio_stop(&wlp->pal); 231 + error_radio_start: 242 232 netif_stop_queue(net_dev); 243 233 i1480u_rx_release(i1480u); 244 234 error_rx_setup: ··· 248 248 { 249 249 struct i1480u *i1480u = netdev_priv(net_dev); 250 250 struct wlp *wlp = &i1480u->wlp; 251 - struct uwb_rc *rc = wlp->rc; 252 251 253 252 BUG_ON(wlp->rc == NULL); 254 253 wlp_wss_remove(&wlp->wss); 255 - uwb_notifs_deregister(rc, &i1480u->uwb_notifs_handler); 256 254 netif_carrier_off(net_dev); 257 255 #ifdef i1480u_FLOW_CONTROL 258 256 usb_kill_urb(i1480u->notif_urb); 259 257 #endif 260 258 netif_stop_queue(net_dev); 259 + uwb_radio_stop(&wlp->pal); 261 260 i1480u_rx_release(i1480u); 262 261 i1480u_tx_release(i1480u); 263 262 return 0; ··· 300 301 return -ERANGE; 301 302 net_dev->mtu = mtu; 302 303 return 0; 303 - } 304 - 305 - 306 - /** 307 - * Callback function to handle events from UWB 308 - * When we see other devices we know the carrier is ok, 309 - * if we are the only device in the beacon group we set the carrier 310 - * state to off. 311 - * */ 312 - void i1480u_uwb_notifs_cb(void *data, struct uwb_dev *uwb_dev, 313 - enum uwb_notifs event) 314 - { 315 - struct i1480u *i1480u = data; 316 - struct net_device *net_dev = i1480u->net_dev; 317 - struct device *dev = &i1480u->usb_iface->dev; 318 - switch (event) { 319 - case UWB_NOTIF_BG_JOIN: 320 - netif_carrier_on(net_dev); 321 - dev_info(dev, "Link is up\n"); 322 - break; 323 - case UWB_NOTIF_BG_LEAVE: 324 - netif_carrier_off(net_dev); 325 - dev_info(dev, "Link is down\n"); 326 - break; 327 - default: 328 - dev_err(dev, "don't know how to handle event %d from uwb\n", 329 - event); 330 - } 331 304 } 332 305 333 306 /**
+7 -18
drivers/uwb/i1480/i1480u-wlp/rx.c
··· 68 68 #include <linux/etherdevice.h> 69 69 #include "i1480u-wlp.h" 70 70 71 - #define D_LOCAL 0 72 - #include <linux/uwb/debug.h> 73 - 74 - 75 - /** 71 + /* 76 72 * Setup the RX context 77 73 * 78 74 * Each URB is provided with a transfer_buffer that is the data field ··· 125 129 } 126 130 127 131 128 - /** Release resources associated to the rx context */ 132 + /* Release resources associated to the rx context */ 129 133 void i1480u_rx_release(struct i1480u *i1480u) 130 134 { 131 135 int cnt; ··· 151 155 } 152 156 } 153 157 154 - /** Fix an out-of-sequence packet */ 158 + /* Fix an out-of-sequence packet */ 155 159 #define i1480u_fix(i1480u, msg...) \ 156 160 do { \ 157 161 if (printk_ratelimit()) \ ··· 162 166 } while (0) 163 167 164 168 165 - /** Drop an out-of-sequence packet */ 169 + /* Drop an out-of-sequence packet */ 166 170 #define i1480u_drop(i1480u, msg...) \ 167 171 do { \ 168 172 if (printk_ratelimit()) \ ··· 173 177 174 178 175 179 176 - /** Finalizes setting up the SKB and delivers it 180 + /* Finalizes setting up the SKB and delivers it 177 181 * 178 182 * We first pass the incoming frame to WLP substack for verification. It 179 183 * may also be a WLP association frame in which case WLP will take over the ··· 188 192 struct net_device *net_dev = i1480u->net_dev; 189 193 struct device *dev = &i1480u->usb_iface->dev; 190 194 191 - d_printf(6, dev, "RX delivered pre skb(%p), %u bytes\n", 192 - i1480u->rx_skb, i1480u->rx_skb->len); 193 - d_dump(7, dev, i1480u->rx_skb->data, i1480u->rx_skb->len); 194 195 should_parse = wlp_receive_frame(dev, &i1480u->wlp, i1480u->rx_skb, 195 196 &i1480u->rx_srcaddr); 196 197 if (!should_parse) 197 198 goto out; 198 199 i1480u->rx_skb->protocol = eth_type_trans(i1480u->rx_skb, net_dev); 199 - d_printf(5, dev, "RX delivered skb(%p), %u bytes\n", 200 - i1480u->rx_skb, i1480u->rx_skb->len); 201 - d_dump(7, dev, i1480u->rx_skb->data, 202 - i1480u->rx_skb->len > 72 ? 72 : i1480u->rx_skb->len); 203 200 i1480u->stats.rx_packets++; 204 201 i1480u->stats.rx_bytes += i1480u->rx_untd_pkt_size; 205 202 net_dev->last_rx = jiffies; ··· 205 216 } 206 217 207 218 208 - /** 219 + /* 209 220 * Process a buffer of data received from the USB RX endpoint 210 221 * 211 222 * First fragment arrives with next or last fragment. All other fragments ··· 393 404 } 394 405 395 406 396 - /** 407 + /* 397 408 * Called when an RX URB has finished receiving or has found some kind 398 409 * of error condition. 399 410 *
+1 -2
drivers/uwb/i1480/i1480u-wlp/sysfs.c
··· 25 25 26 26 #include <linux/netdevice.h> 27 27 #include <linux/etherdevice.h> 28 - #include <linux/uwb/debug.h> 29 28 #include <linux/device.h> 29 + 30 30 #include "i1480u-wlp.h" 31 31 32 32 ··· 226 226 * (CLASS_DEVICE_ATTR or DEVICE_ATTR) and i1480u_ATTR_NAME produces a 227 227 * class_device_attr_NAME or device_attr_NAME (for group registration). 228 228 */ 229 - #include <linux/version.h> 230 229 231 230 #define i1480u_SHOW(name, fn, param) \ 232 231 static ssize_t i1480u_show_##name(struct device *dev, \
+8 -58
drivers/uwb/i1480/i1480u-wlp/tx.c
··· 55 55 */ 56 56 57 57 #include "i1480u-wlp.h" 58 - #define D_LOCAL 5 59 - #include <linux/uwb/debug.h> 60 58 61 59 enum { 62 60 /* This is only for Next and Last TX packets */ ··· 62 64 - sizeof(struct untd_hdr_rst), 63 65 }; 64 66 65 - /** Free resources allocated to a i1480u tx context. */ 67 + /* Free resources allocated to a i1480u tx context. */ 66 68 static 67 69 void i1480u_tx_free(struct i1480u_tx *wtx) 68 70 { ··· 97 99 } 98 100 99 101 100 - /** 102 + /* 101 103 * Callback for a completed tx USB URB. 102 104 * 103 105 * TODO: ··· 147 149 <= i1480u->tx_inflight.threshold 148 150 && netif_queue_stopped(net_dev) 149 151 && i1480u->tx_inflight.threshold != 0) { 150 - if (d_test(2) && printk_ratelimit()) 151 - d_printf(2, dev, "Restart queue. \n"); 152 152 netif_start_queue(net_dev); 153 153 atomic_inc(&i1480u->tx_inflight.restart_count); 154 154 } ··· 154 158 } 155 159 156 160 157 - /** 161 + /* 158 162 * Given a buffer that doesn't fit in a single fragment, create an 159 163 * scatter/gather structure for delivery to the USB pipe. 160 164 * ··· 249 253 /* Now do each remaining fragment */ 250 254 result = -EINVAL; 251 255 while (pl_size_left > 0) { 252 - d_printf(5, NULL, "ITR HDR: pl_size_left %zu buf_itr %zu\n", 253 - pl_size_left, buf_itr - wtx->buf); 254 256 if (buf_itr + sizeof(*untd_hdr_rst) - wtx->buf 255 257 > wtx->buf_size) { 256 258 printk(KERN_ERR "BUG: no space for header\n"); 257 259 goto error_bug; 258 260 } 259 - d_printf(5, NULL, "ITR HDR 2: pl_size_left %zu buf_itr %zu\n", 260 - pl_size_left, buf_itr - wtx->buf); 261 261 untd_hdr_rst = buf_itr; 262 262 buf_itr += sizeof(*untd_hdr_rst); 263 263 if (pl_size_left > i1480u_MAX_PL_SIZE) { ··· 263 271 frg_pl_size = pl_size_left; 264 272 untd_hdr_set_type(&untd_hdr_rst->hdr, i1480u_PKT_FRAG_LST); 265 273 } 266 - d_printf(5, NULL, 267 - "ITR PL: pl_size_left %zu buf_itr %zu frg_pl_size %zu\n", 268 - pl_size_left, buf_itr - wtx->buf, frg_pl_size); 269 274 untd_hdr_set_rx_tx(&untd_hdr_rst->hdr, 0); 270 275 untd_hdr_rst->hdr.len = cpu_to_le16(frg_pl_size); 271 276 untd_hdr_rst->padding = 0; ··· 275 286 buf_itr += frg_pl_size; 276 287 pl_itr += frg_pl_size; 277 288 pl_size_left -= frg_pl_size; 278 - d_printf(5, NULL, 279 - "ITR PL 2: pl_size_left %zu buf_itr %zu frg_pl_size %zu\n", 280 - pl_size_left, buf_itr - wtx->buf, frg_pl_size); 281 289 } 282 290 dev_kfree_skb_irq(skb); 283 291 return 0; ··· 294 308 } 295 309 296 310 297 - /** 311 + /* 298 312 * Given a buffer that fits in a single fragment, fill out a @wtx 299 313 * struct for transmitting it down the USB pipe. 300 314 * ··· 332 346 } 333 347 334 348 335 - /** 349 + /* 336 350 * Given a skb to transmit, massage it to become palatable for the TX pipe 337 351 * 338 352 * This will break the buffer in chunks smaller than ··· 411 425 return NULL; 412 426 } 413 427 414 - /** 428 + /* 415 429 * Actual fragmentation and transmission of frame 416 430 * 417 431 * @wlp: WLP substack data structure ··· 433 447 struct i1480u_tx *wtx; 434 448 struct wlp_tx_hdr *wlp_tx_hdr; 435 449 static unsigned char dev_bcast[2] = { 0xff, 0xff }; 436 - #if 0 437 - int lockup = 50; 438 - #endif 439 450 440 - d_fnstart(6, dev, "(skb %p (%u), net_dev %p)\n", skb, skb->len, 441 - net_dev); 442 451 BUG_ON(i1480u->wlp.rc == NULL); 443 452 if ((net_dev->flags & IFF_UP) == 0) 444 453 goto out; 445 454 result = -EBUSY; 446 455 if (atomic_read(&i1480u->tx_inflight.count) >= i1480u->tx_inflight.max) { 447 - if (d_test(2) && printk_ratelimit()) 448 - d_printf(2, dev, "Max frames in flight " 449 - "stopping queue.\n"); 450 456 netif_stop_queue(net_dev); 451 457 goto error_max_inflight; 452 458 } ··· 467 489 wlp_tx_hdr_set_delivery_id_type(wlp_tx_hdr, i1480u->options.pca_base_priority); 468 490 } 469 491 470 - #if 0 471 - dev_info(dev, "TX delivering skb -> USB, %zu bytes\n", skb->len); 472 - dump_bytes(dev, skb->data, skb->len > 72 ? 72 : skb->len); 473 - #endif 474 - #if 0 475 - /* simulates a device lockup after every lockup# packets */ 476 - if (lockup && ((i1480u->stats.tx_packets + 1) % lockup) == 0) { 477 - /* Simulate a dropped transmit interrupt */ 478 - net_dev->trans_start = jiffies; 479 - netif_stop_queue(net_dev); 480 - dev_err(dev, "Simulate lockup at %ld\n", jiffies); 481 - return result; 482 - } 483 - #endif 484 - 485 492 result = usb_submit_urb(wtx->urb, GFP_ATOMIC); /* Go baby */ 486 493 if (result < 0) { 487 494 dev_err(dev, "TX: cannot submit URB: %d\n", result); ··· 476 513 } 477 514 atomic_inc(&i1480u->tx_inflight.count); 478 515 net_dev->trans_start = jiffies; 479 - d_fnend(6, dev, "(skb %p (%u), net_dev %p) = %d\n", skb, skb->len, 480 - net_dev, result); 481 516 return result; 482 517 483 518 error_tx_urb_submit: ··· 483 522 error_wtx_alloc: 484 523 error_max_inflight: 485 524 out: 486 - d_fnend(6, dev, "(skb %p (%u), net_dev %p) = %d\n", skb, skb->len, 487 - net_dev, result); 488 525 return result; 489 526 } 490 527 491 528 492 - /** 529 + /* 493 530 * Transmit an skb Called when an skbuf has to be transmitted 494 531 * 495 532 * The skb is first passed to WLP substack to ensure this is a valid ··· 510 551 struct device *dev = &i1480u->usb_iface->dev; 511 552 struct uwb_dev_addr dst; 512 553 513 - d_fnstart(6, dev, "(skb %p (%u), net_dev %p)\n", skb, skb->len, 514 - net_dev); 515 - BUG_ON(i1480u->wlp.rc == NULL); 516 554 if ((net_dev->flags & IFF_UP) == 0) 517 555 goto error; 518 556 result = wlp_prepare_tx_frame(dev, &i1480u->wlp, skb, &dst); ··· 518 562 "Dropping packet.\n", result); 519 563 goto error; 520 564 } else if (result == 1) { 521 - d_printf(6, dev, "WLP will transmit frame. \n"); 522 565 /* trans_start time will be set when WLP actually transmits 523 566 * the frame */ 524 567 goto out; 525 568 } 526 - d_printf(6, dev, "Transmitting frame. \n"); 527 569 result = i1480u_xmit_frame(&i1480u->wlp, skb, &dst); 528 570 if (result < 0) { 529 571 dev_err(dev, "Frame TX failed (%d).\n", result); 530 572 goto error; 531 573 } 532 - d_fnend(6, dev, "(skb %p (%u), net_dev %p) = %d\n", skb, skb->len, 533 - net_dev, result); 534 574 return NETDEV_TX_OK; 535 575 error: 536 576 dev_kfree_skb_any(skb); 537 577 i1480u->stats.tx_dropped++; 538 578 out: 539 - d_fnend(6, dev, "(skb %p (%u), net_dev %p) = %d\n", skb, skb->len, 540 - net_dev, result); 541 579 return NETDEV_TX_OK; 542 580 } 543 581 544 582 545 - /** 583 + /* 546 584 * Called when a pkt transmission doesn't complete in a reasonable period 547 585 * Device reset may sleep - do it outside of interrupt context (delayed) 548 586 */
+55
drivers/uwb/ie-rcv.c
··· 1 + /* 2 + * Ultra Wide Band 3 + * IE Received notification handling. 4 + * 5 + * Copyright (C) 2008 Cambridge Silicon Radio Ltd. 6 + * 7 + * This program is free software; you can redistribute it and/or 8 + * modify it under the terms of the GNU General Public License version 9 + * 2 as published by the Free Software Foundation. 10 + * 11 + * This program is distributed in the hope that it will be useful, 12 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 + * GNU General Public License for more details. 15 + * 16 + * You should have received a copy of the GNU General Public License 17 + * along with this program. If not, see <http://www.gnu.org/licenses/>. 18 + */ 19 + 20 + #include <linux/errno.h> 21 + #include <linux/module.h> 22 + #include <linux/device.h> 23 + #include <linux/bitmap.h> 24 + #include "uwb-internal.h" 25 + 26 + /* 27 + * Process an incoming IE Received notification. 28 + */ 29 + int uwbd_evt_handle_rc_ie_rcv(struct uwb_event *evt) 30 + { 31 + int result = -EINVAL; 32 + struct device *dev = &evt->rc->uwb_dev.dev; 33 + struct uwb_rc_evt_ie_rcv *iercv; 34 + size_t iesize; 35 + 36 + /* Is there enough data to decode it? */ 37 + if (evt->notif.size < sizeof(*iercv)) { 38 + dev_err(dev, "IE Received notification: Not enough data to " 39 + "decode (%zu vs %zu bytes needed)\n", 40 + evt->notif.size, sizeof(*iercv)); 41 + goto error; 42 + } 43 + iercv = container_of(evt->notif.rceb, struct uwb_rc_evt_ie_rcv, rceb); 44 + iesize = le16_to_cpu(iercv->wIELength); 45 + 46 + dev_dbg(dev, "IE received, element ID=%d\n", iercv->IEData[0]); 47 + 48 + if (iercv->IEData[0] == UWB_RELINQUISH_REQUEST_IE) { 49 + dev_warn(dev, "unhandled Relinquish Request IE\n"); 50 + } 51 + 52 + return 0; 53 + error: 54 + return result; 55 + }
+147 -310
drivers/uwb/ie.c
··· 25 25 */ 26 26 27 27 #include "uwb-internal.h" 28 - #define D_LOCAL 0 29 - #include <linux/uwb/debug.h> 30 28 31 29 /** 32 30 * uwb_ie_next - get the next IE in a buffer ··· 59 61 EXPORT_SYMBOL_GPL(uwb_ie_next); 60 62 61 63 /** 64 + * uwb_ie_dump_hex - print IEs to a character buffer 65 + * @ies: the IEs to print. 66 + * @len: length of all the IEs. 67 + * @buf: the destination buffer. 68 + * @size: size of @buf. 69 + * 70 + * Returns the number of characters written. 71 + */ 72 + int uwb_ie_dump_hex(const struct uwb_ie_hdr *ies, size_t len, 73 + char *buf, size_t size) 74 + { 75 + void *ptr; 76 + const struct uwb_ie_hdr *ie; 77 + int r = 0; 78 + u8 *d; 79 + 80 + ptr = (void *)ies; 81 + for (;;) { 82 + ie = uwb_ie_next(&ptr, &len); 83 + if (!ie) 84 + break; 85 + 86 + r += scnprintf(buf + r, size - r, "%02x %02x", 87 + (unsigned)ie->element_id, 88 + (unsigned)ie->length); 89 + d = (uint8_t *)ie + sizeof(struct uwb_ie_hdr); 90 + while (d != ptr && r < size) 91 + r += scnprintf(buf + r, size - r, " %02x", (unsigned)*d++); 92 + if (r < size) 93 + buf[r++] = '\n'; 94 + }; 95 + 96 + return r; 97 + } 98 + 99 + /** 62 100 * Get the IEs that a radio controller is sending in its beacon 63 101 * 64 102 * @uwb_rc: UWB Radio Controller ··· 104 70 * anything. Once done with the iedata buffer, call 105 71 * uwb_rc_ie_release(iedata). Don't call kfree on it. 106 72 */ 73 + static 107 74 ssize_t uwb_rc_get_ie(struct uwb_rc *uwb_rc, struct uwb_rc_evt_get_ie **pget_ie) 108 75 { 109 76 ssize_t result; ··· 113 78 struct uwb_rceb *reply = NULL; 114 79 struct uwb_rc_evt_get_ie *get_ie; 115 80 116 - d_fnstart(3, dev, "(%p, %p)\n", uwb_rc, pget_ie); 117 - result = -ENOMEM; 118 81 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 119 82 if (cmd == NULL) 120 - goto error_kzalloc; 83 + return -ENOMEM; 84 + 121 85 cmd->bCommandType = UWB_RC_CET_GENERAL; 122 86 cmd->wCommand = cpu_to_le16(UWB_RC_CMD_GET_IE); 123 87 result = uwb_rc_vcmd(uwb_rc, "GET_IE", cmd, sizeof(*cmd), 124 88 UWB_RC_CET_GENERAL, UWB_RC_CMD_GET_IE, 125 89 &reply); 90 + kfree(cmd); 126 91 if (result < 0) 127 - goto error_cmd; 92 + return result; 93 + 128 94 get_ie = container_of(reply, struct uwb_rc_evt_get_ie, rceb); 129 95 if (result < sizeof(*get_ie)) { 130 96 dev_err(dev, "not enough data returned for decoding GET IE " 131 97 "(%zu bytes received vs %zu needed)\n", 132 98 result, sizeof(*get_ie)); 133 - result = -EINVAL; 99 + return -EINVAL; 134 100 } else if (result < sizeof(*get_ie) + le16_to_cpu(get_ie->wIELength)) { 135 101 dev_err(dev, "not enough data returned for decoding GET IE " 136 102 "payload (%zu bytes received vs %zu needed)\n", result, 137 103 sizeof(*get_ie) + le16_to_cpu(get_ie->wIELength)); 138 - result = -EINVAL; 139 - } else 140 - *pget_ie = get_ie; 141 - error_cmd: 142 - kfree(cmd); 143 - error_kzalloc: 144 - d_fnend(3, dev, "(%p, %p) = %d\n", uwb_rc, pget_ie, (int)result); 145 - return result; 146 - } 147 - EXPORT_SYMBOL_GPL(uwb_rc_get_ie); 148 - 149 - 150 - /* 151 - * Given a pointer to an IE, print it in ASCII/hex followed by a new line 152 - * 153 - * @ie_hdr: pointer to the IE header. Length is in there, and it is 154 - * guaranteed that the ie_hdr->length bytes following it are 155 - * safely accesible. 156 - * 157 - * @_data: context data passed from uwb_ie_for_each(), an struct output_ctx 158 - */ 159 - int uwb_ie_dump_hex(struct uwb_dev *uwb_dev, const struct uwb_ie_hdr *ie_hdr, 160 - size_t offset, void *_ctx) 161 - { 162 - struct uwb_buf_ctx *ctx = _ctx; 163 - const u8 *pl = (void *)(ie_hdr + 1); 164 - u8 pl_itr; 165 - 166 - ctx->bytes += scnprintf(ctx->buf + ctx->bytes, ctx->size - ctx->bytes, 167 - "%02x %02x ", (unsigned) ie_hdr->element_id, 168 - (unsigned) ie_hdr->length); 169 - pl_itr = 0; 170 - while (pl_itr < ie_hdr->length && ctx->bytes < ctx->size) 171 - ctx->bytes += scnprintf(ctx->buf + ctx->bytes, 172 - ctx->size - ctx->bytes, 173 - "%02x ", (unsigned) pl[pl_itr++]); 174 - if (ctx->bytes < ctx->size) 175 - ctx->buf[ctx->bytes++] = '\n'; 176 - return 0; 177 - } 178 - EXPORT_SYMBOL_GPL(uwb_ie_dump_hex); 179 - 180 - 181 - /** 182 - * Verify that a pointer in a buffer points to valid IE 183 - * 184 - * @start: pointer to start of buffer in which IE appears 185 - * @itr: pointer to IE inside buffer that will be verified 186 - * @top: pointer to end of buffer 187 - * 188 - * @returns: 0 if IE is valid, <0 otherwise 189 - * 190 - * Verification involves checking that the buffer can contain a 191 - * header and the amount of data reported in the IE header can be found in 192 - * the buffer. 193 - */ 194 - static 195 - int uwb_rc_ie_verify(struct uwb_dev *uwb_dev, const void *start, 196 - const void *itr, const void *top) 197 - { 198 - struct device *dev = &uwb_dev->dev; 199 - const struct uwb_ie_hdr *ie_hdr; 200 - 201 - if (top - itr < sizeof(*ie_hdr)) { 202 - dev_err(dev, "Bad IE: no data to decode header " 203 - "(%zu bytes left vs %zu needed) at offset %zu\n", 204 - top - itr, sizeof(*ie_hdr), itr - start); 205 104 return -EINVAL; 206 105 } 207 - ie_hdr = itr; 208 - itr += sizeof(*ie_hdr); 209 - if (top - itr < ie_hdr->length) { 210 - dev_err(dev, "Bad IE: not enough data for payload " 211 - "(%zu bytes left vs %zu needed) at offset %zu\n", 212 - top - itr, (size_t)ie_hdr->length, 213 - (void *)ie_hdr - start); 214 - return -EINVAL; 215 - } 216 - return 0; 217 - } 218 106 219 - 220 - /** 221 - * Walk a buffer filled with consecutive IE's a buffer 222 - * 223 - * @uwb_dev: UWB device this IEs belong to (for err messages mainly) 224 - * 225 - * @fn: function to call with each IE; if it returns 0, we keep 226 - * traversing the buffer. If it returns !0, we'll stop and return 227 - * that value. 228 - * 229 - * @data: pointer passed to @fn 230 - * 231 - * @buf: buffer where the consecutive IEs are located 232 - * 233 - * @size: size of @buf 234 - * 235 - * Each IE is checked for basic correctness (there is space left for 236 - * the header and the payload). If that test is failed, we stop 237 - * processing. For every good IE, @fn is called. 238 - */ 239 - ssize_t uwb_ie_for_each(struct uwb_dev *uwb_dev, uwb_ie_f fn, void *data, 240 - const void *buf, size_t size) 241 - { 242 - ssize_t result = 0; 243 - const struct uwb_ie_hdr *ie_hdr; 244 - const void *itr = buf, *top = itr + size; 245 - 246 - while (itr < top) { 247 - if (uwb_rc_ie_verify(uwb_dev, buf, itr, top) != 0) 248 - break; 249 - ie_hdr = itr; 250 - itr += sizeof(*ie_hdr) + ie_hdr->length; 251 - result = fn(uwb_dev, ie_hdr, itr - buf, data); 252 - if (result != 0) 253 - break; 254 - } 107 + *pget_ie = get_ie; 255 108 return result; 256 109 } 257 - EXPORT_SYMBOL_GPL(uwb_ie_for_each); 258 110 259 111 260 112 /** ··· 178 256 return result; 179 257 } 180 258 181 - /** 182 - * Determine by IE id if IE is host settable 183 - * WUSB 1.0 [8.6.2.8 Table 8.85] 184 - * 185 - * EXCEPTION: 186 - * All but UWB_IE_WLP appears in Table 8.85 from WUSB 1.0. Setting this IE 187 - * is required for the WLP substack to perform association with its WSS so 188 - * we hope that the WUSB spec will be changed to reflect this. 189 - */ 190 - static 191 - int uwb_rc_ie_is_host_settable(enum uwb_ie element_id) 192 - { 193 - if (element_id == UWB_PCA_AVAILABILITY || 194 - element_id == UWB_BP_SWITCH_IE || 195 - element_id == UWB_MAC_CAPABILITIES_IE || 196 - element_id == UWB_PHY_CAPABILITIES_IE || 197 - element_id == UWB_APP_SPEC_PROBE_IE || 198 - element_id == UWB_IDENTIFICATION_IE || 199 - element_id == UWB_MASTER_KEY_ID_IE || 200 - element_id == UWB_IE_WLP || 201 - element_id == UWB_APP_SPEC_IE) 202 - return 1; 203 - return 0; 204 - } 205 - 206 - 207 - /** 208 - * Extract Host Settable IEs from IE 209 - * 210 - * @ie_data: pointer to buffer containing all IEs 211 - * @size: size of buffer 212 - * 213 - * @returns: length of buffer that only includes host settable IEs 214 - * 215 - * Given a buffer of IEs we move all Host Settable IEs to front of buffer 216 - * by overwriting the IEs that are not Host Settable. 217 - * Buffer length is adjusted accordingly. 218 - */ 219 - static 220 - ssize_t uwb_rc_parse_host_settable_ie(struct uwb_dev *uwb_dev, 221 - void *ie_data, size_t size) 222 - { 223 - size_t new_len = size; 224 - struct uwb_ie_hdr *ie_hdr; 225 - size_t ie_length; 226 - void *itr = ie_data, *top = itr + size; 227 - 228 - while (itr < top) { 229 - if (uwb_rc_ie_verify(uwb_dev, ie_data, itr, top) != 0) 230 - break; 231 - ie_hdr = itr; 232 - ie_length = sizeof(*ie_hdr) + ie_hdr->length; 233 - if (uwb_rc_ie_is_host_settable(ie_hdr->element_id)) { 234 - itr += ie_length; 235 - } else { 236 - memmove(itr, itr + ie_length, top - (itr + ie_length)); 237 - new_len -= ie_length; 238 - top -= ie_length; 239 - } 240 - } 241 - return new_len; 242 - } 243 - 244 - 245 259 /* Cleanup the whole IE management subsystem */ 246 260 void uwb_rc_ie_init(struct uwb_rc *uwb_rc) 247 261 { ··· 186 328 187 329 188 330 /** 189 - * Set up cache for host settable IEs currently being transmitted 331 + * uwb_rc_ie_setup - setup a radio controller's IE manager 332 + * @uwb_rc: the radio controller. 190 333 * 191 - * First we just call GET-IE to get the current IEs being transmitted 192 - * (or we workaround and pretend we did) and (because the format is 193 - * the same) reuse that as the IE cache (with the command prefix, as 194 - * explained in 'struct uwb_rc'). 334 + * The current set of IEs are obtained from the hardware with a GET-IE 335 + * command (since the radio controller is not yet beaconing this will 336 + * be just the hardware's MAC and PHY Capability IEs). 195 337 * 196 - * @returns: size of cache created 338 + * Returns 0 on success; -ve on an error. 197 339 */ 198 - ssize_t uwb_rc_ie_setup(struct uwb_rc *uwb_rc) 340 + int uwb_rc_ie_setup(struct uwb_rc *uwb_rc) 199 341 { 200 - struct device *dev = &uwb_rc->uwb_dev.dev; 201 - ssize_t result; 202 - size_t capacity; 203 - struct uwb_rc_evt_get_ie *ie_info; 342 + struct uwb_rc_evt_get_ie *ie_info = NULL; 343 + int capacity; 204 344 205 - d_fnstart(3, dev, "(%p)\n", uwb_rc); 345 + capacity = uwb_rc_get_ie(uwb_rc, &ie_info); 346 + if (capacity < 0) 347 + return capacity; 348 + 206 349 mutex_lock(&uwb_rc->ies_mutex); 207 - result = uwb_rc_get_ie(uwb_rc, &ie_info); 208 - if (result < 0) 209 - goto error_get_ie; 210 - capacity = result; 211 - d_printf(5, dev, "Got IEs %zu bytes (%zu long at %p)\n", result, 212 - (size_t)le16_to_cpu(ie_info->wIELength), ie_info); 213 350 214 - /* Remove IEs that host should not set. */ 215 - result = uwb_rc_parse_host_settable_ie(&uwb_rc->uwb_dev, 216 - ie_info->IEData, le16_to_cpu(ie_info->wIELength)); 217 - if (result < 0) 218 - goto error_parse; 219 - d_printf(5, dev, "purged non-settable IEs to %zu bytes\n", result); 220 - uwb_rc->ies = (void *) ie_info; 351 + uwb_rc->ies = (struct uwb_rc_cmd_set_ie *)ie_info; 221 352 uwb_rc->ies->rccb.bCommandType = UWB_RC_CET_GENERAL; 222 353 uwb_rc->ies->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_SET_IE); 223 354 uwb_rc->ies_capacity = capacity; 224 - d_printf(5, dev, "IE cache at %p %zu bytes, %zu capacity\n", 225 - ie_info, result, capacity); 226 - result = 0; 227 - error_parse: 228 - error_get_ie: 355 + 229 356 mutex_unlock(&uwb_rc->ies_mutex); 230 - d_fnend(3, dev, "(%p) = %zu\n", uwb_rc, result); 231 - return result; 357 + 358 + return 0; 232 359 } 233 360 234 361 ··· 226 383 } 227 384 228 385 229 - static 230 - int __acc_size(struct uwb_dev *uwb_dev, const struct uwb_ie_hdr *ie_hdr, 231 - size_t offset, void *_ctx) 386 + static int uwb_rc_ie_add_one(struct uwb_rc *rc, const struct uwb_ie_hdr *new_ie) 232 387 { 233 - size_t *acc_size = _ctx; 234 - *acc_size += sizeof(*ie_hdr) + ie_hdr->length; 235 - d_printf(6, &uwb_dev->dev, "new acc size %zu\n", *acc_size); 388 + struct uwb_rc_cmd_set_ie *new_ies; 389 + void *ptr, *prev_ie; 390 + struct uwb_ie_hdr *ie; 391 + size_t length, new_ie_len, new_capacity, size, prev_size; 392 + 393 + length = le16_to_cpu(rc->ies->wIELength); 394 + new_ie_len = sizeof(struct uwb_ie_hdr) + new_ie->length; 395 + new_capacity = sizeof(struct uwb_rc_cmd_set_ie) + length + new_ie_len; 396 + 397 + if (new_capacity > rc->ies_capacity) { 398 + new_ies = krealloc(rc->ies, new_capacity, GFP_KERNEL); 399 + if (!new_ies) 400 + return -ENOMEM; 401 + rc->ies = new_ies; 402 + } 403 + 404 + ptr = rc->ies->IEData; 405 + size = length; 406 + for (;;) { 407 + prev_ie = ptr; 408 + prev_size = size; 409 + ie = uwb_ie_next(&ptr, &size); 410 + if (!ie || ie->element_id > new_ie->element_id) 411 + break; 412 + } 413 + 414 + memmove(prev_ie + new_ie_len, prev_ie, prev_size); 415 + memcpy(prev_ie, new_ie, new_ie_len); 416 + rc->ies->wIELength = cpu_to_le16(length + new_ie_len); 417 + 236 418 return 0; 237 419 } 238 420 239 - 240 421 /** 241 - * Add a new IE to IEs currently being transmitted by device 242 - * 422 + * uwb_rc_ie_add - add new IEs to the radio controller's beacon 423 + * @uwb_rc: the radio controller. 243 424 * @ies: the buffer containing the new IE or IEs to be added to 244 - * the device's beacon. The buffer will be verified for 245 - * consistence (meaning the headers should be right) and 246 - * consistent with the buffer size. 247 - * @size: size of @ies (in bytes, total buffer size) 248 - * @returns: 0 if ok, <0 errno code on error 425 + * the device's beacon. 426 + * @size: length of all the IEs. 249 427 * 250 428 * According to WHCI 0.95 [4.13.6] the driver will only receive the RCEB 251 429 * after the device sent the first beacon that includes the IEs specified ··· 275 411 * we start beaconing. 276 412 * 277 413 * Setting an IE on the device will overwrite all current IEs in device. So 278 - * we take the current IEs being transmitted by the device, append the 414 + * we take the current IEs being transmitted by the device, insert the 279 415 * new one, and call SET IE with all the IEs needed. 280 416 * 281 - * The local IE cache will only be updated with the new IE if SET IE 282 - * completed successfully. 417 + * Returns 0 on success; or -ENOMEM. 283 418 */ 284 419 int uwb_rc_ie_add(struct uwb_rc *uwb_rc, 285 420 const struct uwb_ie_hdr *ies, size_t size) 286 421 { 287 422 int result = 0; 288 - struct device *dev = &uwb_rc->uwb_dev.dev; 289 - struct uwb_rc_cmd_set_ie *new_ies; 290 - size_t ies_size, total_size, acc_size = 0; 423 + void *ptr; 424 + const struct uwb_ie_hdr *ie; 291 425 292 - if (uwb_rc->ies == NULL) 293 - return -ESHUTDOWN; 294 - uwb_ie_for_each(&uwb_rc->uwb_dev, __acc_size, &acc_size, ies, size); 295 - if (acc_size != size) { 296 - dev_err(dev, "BUG: bad IEs, misconstructed headers " 297 - "[%zu bytes reported vs %zu calculated]\n", 298 - size, acc_size); 299 - WARN_ON(1); 300 - return -EINVAL; 301 - } 302 426 mutex_lock(&uwb_rc->ies_mutex); 303 - ies_size = le16_to_cpu(uwb_rc->ies->wIELength); 304 - total_size = sizeof(*uwb_rc->ies) + ies_size; 305 - if (total_size + size > uwb_rc->ies_capacity) { 306 - d_printf(4, dev, "Reallocating IE cache from %p capacity %zu " 307 - "to capacity %zu\n", uwb_rc->ies, uwb_rc->ies_capacity, 308 - total_size + size); 309 - new_ies = kzalloc(total_size + size, GFP_KERNEL); 310 - if (new_ies == NULL) { 311 - dev_err(dev, "No memory for adding new IE\n"); 312 - result = -ENOMEM; 313 - goto error_alloc; 314 - } 315 - memcpy(new_ies, uwb_rc->ies, total_size); 316 - uwb_rc->ies_capacity = total_size + size; 317 - kfree(uwb_rc->ies); 318 - uwb_rc->ies = new_ies; 319 - d_printf(4, dev, "New IE cache at %p capacity %zu\n", 320 - uwb_rc->ies, uwb_rc->ies_capacity); 427 + 428 + ptr = (void *)ies; 429 + for (;;) { 430 + ie = uwb_ie_next(&ptr, &size); 431 + if (!ie) 432 + break; 433 + 434 + result = uwb_rc_ie_add_one(uwb_rc, ie); 435 + if (result < 0) 436 + break; 321 437 } 322 - memcpy((void *)uwb_rc->ies + total_size, ies, size); 323 - uwb_rc->ies->wIELength = cpu_to_le16(ies_size + size); 324 - if (uwb_rc->beaconing != -1) { 325 - result = uwb_rc_set_ie(uwb_rc, uwb_rc->ies); 326 - if (result < 0) { 327 - dev_err(dev, "Cannot set new IE on device: %d\n", 328 - result); 329 - uwb_rc->ies->wIELength = cpu_to_le16(ies_size); 438 + if (result >= 0) { 439 + if (size == 0) { 440 + if (uwb_rc->beaconing != -1) 441 + result = uwb_rc_set_ie(uwb_rc, uwb_rc->ies); 330 442 } else 331 - result = 0; 443 + result = -EINVAL; 332 444 } 333 - d_printf(4, dev, "IEs now occupy %hu bytes of %zu capacity at %p\n", 334 - le16_to_cpu(uwb_rc->ies->wIELength), uwb_rc->ies_capacity, 335 - uwb_rc->ies); 336 - error_alloc: 445 + 337 446 mutex_unlock(&uwb_rc->ies_mutex); 447 + 338 448 return result; 339 449 } 340 450 EXPORT_SYMBOL_GPL(uwb_rc_ie_add); ··· 327 489 * beacon. We don't reallocate, we just mark the size smaller. 328 490 */ 329 491 static 330 - int uwb_rc_ie_cache_rm(struct uwb_rc *uwb_rc, enum uwb_ie to_remove) 492 + void uwb_rc_ie_cache_rm(struct uwb_rc *uwb_rc, enum uwb_ie to_remove) 331 493 { 332 - struct uwb_ie_hdr *ie_hdr; 333 - size_t new_len = le16_to_cpu(uwb_rc->ies->wIELength); 334 - void *itr = uwb_rc->ies->IEData; 335 - void *top = itr + new_len; 494 + struct uwb_ie_hdr *ie; 495 + size_t len = le16_to_cpu(uwb_rc->ies->wIELength); 496 + void *ptr; 497 + size_t size; 336 498 337 - while (itr < top) { 338 - ie_hdr = itr; 339 - if (ie_hdr->element_id != to_remove) { 340 - itr += sizeof(*ie_hdr) + ie_hdr->length; 341 - } else { 342 - int ie_length; 343 - ie_length = sizeof(*ie_hdr) + ie_hdr->length; 344 - if (top - itr != ie_length) 345 - memmove(itr, itr + ie_length, top - itr + ie_length); 346 - top -= ie_length; 347 - new_len -= ie_length; 499 + ptr = uwb_rc->ies->IEData; 500 + size = len; 501 + for (;;) { 502 + ie = uwb_ie_next(&ptr, &size); 503 + if (!ie) 504 + break; 505 + if (ie->element_id == to_remove) { 506 + len -= sizeof(struct uwb_ie_hdr) + ie->length; 507 + memmove(ie, ptr, size); 508 + ptr = ie; 348 509 } 349 510 } 350 - uwb_rc->ies->wIELength = cpu_to_le16(new_len); 351 - return 0; 511 + uwb_rc->ies->wIELength = cpu_to_le16(len); 352 512 } 353 513 354 514 355 515 /** 356 - * Remove an IE currently being transmitted by device 516 + * uwb_rc_ie_rm - remove an IE from the radio controller's beacon 517 + * @uwb_rc: the radio controller. 518 + * @element_id: the element ID of the IE to remove. 357 519 * 358 - * @element_id: id of IE to be removed from device's beacon 520 + * Only IEs previously added with uwb_rc_ie_add() may be removed. 521 + * 522 + * Returns 0 on success; or -ve the SET-IE command to the radio 523 + * controller failed. 359 524 */ 360 525 int uwb_rc_ie_rm(struct uwb_rc *uwb_rc, enum uwb_ie element_id) 361 526 { 362 - struct device *dev = &uwb_rc->uwb_dev.dev; 363 - int result; 527 + int result = 0; 364 528 365 - if (uwb_rc->ies == NULL) 366 - return -ESHUTDOWN; 367 529 mutex_lock(&uwb_rc->ies_mutex); 368 - result = uwb_rc_ie_cache_rm(uwb_rc, element_id); 369 - if (result < 0) 370 - dev_err(dev, "Cannot remove IE from cache.\n"); 371 - if (uwb_rc->beaconing != -1) { 530 + 531 + uwb_rc_ie_cache_rm(uwb_rc, element_id); 532 + 533 + if (uwb_rc->beaconing != -1) 372 534 result = uwb_rc_set_ie(uwb_rc, uwb_rc->ies); 373 - if (result < 0) 374 - dev_err(dev, "Cannot set new IE on device.\n"); 375 - } 535 + 376 536 mutex_unlock(&uwb_rc->ies_mutex); 537 + 377 538 return result; 378 539 } 379 540 EXPORT_SYMBOL_GPL(uwb_rc_ie_rm);
+4 -19
drivers/uwb/lc-dev.c
··· 22 22 * 23 23 * FIXME: docs 24 24 */ 25 - 26 25 #include <linux/kernel.h> 27 26 #include <linux/device.h> 28 27 #include <linux/err.h> 29 28 #include <linux/kdev_t.h> 30 29 #include <linux/random.h> 31 30 #include "uwb-internal.h" 32 - 33 - #define D_LOCAL 1 34 - #include <linux/uwb/debug.h> 35 - 36 31 37 32 /* We initialize addresses to 0xff (invalid, as it is bcast) */ 38 33 static inline void uwb_dev_addr_init(struct uwb_dev_addr *addr) ··· 99 104 { 100 105 struct uwb_dev *uwb_dev = to_uwb_dev(dev); 101 106 102 - d_fnstart(4, NULL, "(dev %p uwb_dev %p)\n", dev, uwb_dev); 103 107 uwb_bce_put(uwb_dev->bce); 104 - d_printf(0, &uwb_dev->dev, "uwb_dev %p freed\n", uwb_dev); 105 108 memset(uwb_dev, 0x69, sizeof(*uwb_dev)); 106 109 kfree(uwb_dev); 107 - d_fnend(4, NULL, "(dev %p uwb_dev %p) = void\n", dev, uwb_dev); 108 110 } 109 111 110 112 /* ··· 267 275 */ 268 276 static int __uwb_dev_sys_add(struct uwb_dev *uwb_dev, struct device *parent_dev) 269 277 { 270 - int result; 271 278 struct device *dev; 272 - 273 - d_fnstart(4, NULL, "(uwb_dev %p parent_dev %p)\n", uwb_dev, parent_dev); 274 - BUG_ON(parent_dev == NULL); 275 279 276 280 dev = &uwb_dev->dev; 277 281 /* Device sysfs files are only useful for neighbor devices not ··· 277 289 dev->parent = parent_dev; 278 290 dev_set_drvdata(dev, uwb_dev); 279 291 280 - result = device_add(dev); 281 - d_fnend(4, NULL, "(uwb_dev %p parent_dev %p) = %d\n", uwb_dev, parent_dev, result); 282 - return result; 292 + return device_add(dev); 283 293 } 284 294 285 295 286 296 static void __uwb_dev_sys_rm(struct uwb_dev *uwb_dev) 287 297 { 288 - d_fnstart(4, NULL, "(uwb_dev %p)\n", uwb_dev); 289 298 dev_set_drvdata(&uwb_dev->dev, NULL); 290 299 device_del(&uwb_dev->dev); 291 - d_fnend(4, NULL, "(uwb_dev %p) = void\n", uwb_dev); 292 300 } 293 301 294 302 ··· 368 384 struct device *dev = &uwb_dev->dev; 369 385 char macbuf[UWB_ADDR_STRSIZE], devbuf[UWB_ADDR_STRSIZE]; 370 386 371 - d_fnstart(3, NULL, "(dev %p [uwb_dev %p], uwb_rc %p)\n", dev, uwb_dev, rc); 372 387 uwb_mac_addr_print(macbuf, sizeof(macbuf), &uwb_dev->mac_addr); 373 388 uwb_dev_addr_print(devbuf, sizeof(devbuf), &uwb_dev->dev_addr); 374 389 dev_info(dev, "uwb device (mac %s dev %s) disconnected from %s %s\n", ··· 375 392 rc ? rc->uwb_dev.dev.parent->bus->name : "n/a", 376 393 rc ? dev_name(rc->uwb_dev.dev.parent) : ""); 377 394 uwb_dev_rm(uwb_dev); 395 + list_del(&uwb_dev->bce->node); 396 + uwb_bce_put(uwb_dev->bce); 378 397 uwb_dev_put(uwb_dev); /* for the creation in _onair() */ 379 - d_fnend(3, NULL, "(dev %p [uwb_dev %p], uwb_rc %p) = 0\n", dev, uwb_dev, rc); 398 + 380 399 return 0; 381 400 } 382 401
+20 -42
drivers/uwb/lc-rc.c
··· 36 36 #include <linux/etherdevice.h> 37 37 #include <linux/usb.h> 38 38 39 - #define D_LOCAL 1 40 - #include <linux/uwb/debug.h> 41 39 #include "uwb-internal.h" 42 40 43 41 static int uwb_rc_index_match(struct device *dev, void *data) ··· 79 81 struct uwb_dev *uwb_dev = container_of(dev, struct uwb_dev, dev); 80 82 struct uwb_rc *rc = container_of(uwb_dev, struct uwb_rc, uwb_dev); 81 83 82 - uwb_rc_neh_destroy(rc); 83 84 uwb_rc_ie_release(rc); 84 - d_printf(1, dev, "freed uwb_rc %p\n", rc); 85 85 kfree(rc); 86 86 } 87 87 ··· 96 100 rc->scan_type = UWB_SCAN_DISABLED; 97 101 INIT_LIST_HEAD(&rc->notifs_chain.list); 98 102 mutex_init(&rc->notifs_chain.mutex); 103 + INIT_LIST_HEAD(&rc->uwb_beca.list); 104 + mutex_init(&rc->uwb_beca.mutex); 99 105 uwb_drp_avail_init(rc); 100 106 uwb_rc_ie_init(rc); 101 107 uwb_rsv_init(rc); ··· 189 191 int result; 190 192 struct device *dev = &rc->uwb_dev.dev; 191 193 192 - result = uwb_rc_reset(rc); 194 + result = uwb_radio_setup(rc); 193 195 if (result < 0) { 194 - dev_err(dev, "cannot reset UWB radio: %d\n", result); 196 + dev_err(dev, "cannot setup UWB radio: %d\n", result); 195 197 goto error; 196 198 } 197 199 result = uwb_rc_mac_addr_setup(rc); ··· 248 250 249 251 rc->priv = priv; 250 252 253 + init_waitqueue_head(&rc->uwbd.wq); 254 + INIT_LIST_HEAD(&rc->uwbd.event_list); 255 + spin_lock_init(&rc->uwbd.event_list_lock); 256 + 257 + uwbd_start(rc); 258 + 251 259 result = rc->start(rc); 252 260 if (result < 0) 253 261 goto error_rc_start; ··· 288 284 error_dev_add: 289 285 error_rc_setup: 290 286 rc->stop(rc); 291 - uwbd_flush(rc); 287 + uwbd_stop(rc); 292 288 error_rc_start: 293 289 return result; 294 290 } ··· 310 306 rc->ready = 0; 311 307 312 308 uwb_dbg_del_rc(rc); 313 - uwb_rsv_cleanup(rc); 314 - uwb_rc_ie_rm(rc, UWB_IDENTIFICATION_IE); 315 - if (rc->beaconing >= 0) 316 - uwb_rc_beacon(rc, -1, 0); 317 - if (rc->scan_type != UWB_SCAN_DISABLED) 318 - uwb_rc_scan(rc, rc->scanning, UWB_SCAN_DISABLED, 0); 319 - uwb_rc_reset(rc); 309 + uwb_rsv_remove_all(rc); 310 + uwb_radio_shutdown(rc); 320 311 321 312 rc->stop(rc); 322 - uwbd_flush(rc); 313 + 314 + uwbd_stop(rc); 315 + uwb_rc_neh_destroy(rc); 323 316 324 317 uwb_dev_lock(&rc->uwb_dev); 325 318 rc->priv = NULL; 326 319 rc->cmd = NULL; 327 320 uwb_dev_unlock(&rc->uwb_dev); 328 - mutex_lock(&uwb_beca.mutex); 321 + mutex_lock(&rc->uwb_beca.mutex); 329 322 uwb_dev_for_each(rc, uwb_dev_offair_helper, NULL); 330 323 __uwb_rc_sys_rm(rc); 331 - mutex_unlock(&uwb_beca.mutex); 324 + mutex_unlock(&rc->uwb_beca.mutex); 325 + uwb_rsv_cleanup(rc); 326 + uwb_beca_release(rc); 332 327 uwb_dev_rm(&rc->uwb_dev); 333 328 } 334 329 EXPORT_SYMBOL_GPL(uwb_rc_rm); ··· 471 468 __uwb_rc_put(rc); 472 469 } 473 470 EXPORT_SYMBOL_GPL(uwb_rc_put); 474 - 475 - /* 476 - * 477 - * 478 - */ 479 - ssize_t uwb_rc_print_IEs(struct uwb_rc *uwb_rc, char *buf, size_t size) 480 - { 481 - ssize_t result; 482 - struct uwb_rc_evt_get_ie *ie_info; 483 - struct uwb_buf_ctx ctx; 484 - 485 - result = uwb_rc_get_ie(uwb_rc, &ie_info); 486 - if (result < 0) 487 - goto error_get_ie; 488 - ctx.buf = buf; 489 - ctx.size = size; 490 - ctx.bytes = 0; 491 - uwb_ie_for_each(&uwb_rc->uwb_dev, uwb_ie_dump_hex, &ctx, 492 - ie_info->IEData, result - sizeof(*ie_info)); 493 - result = ctx.bytes; 494 - kfree(ie_info); 495 - error_get_ie: 496 - return result; 497 - } 498 -
+33 -39
drivers/uwb/neh.c
··· 86 86 #include <linux/err.h> 87 87 88 88 #include "uwb-internal.h" 89 - #define D_LOCAL 0 90 - #include <linux/uwb/debug.h> 91 89 92 90 /* 93 91 * UWB Radio Controller Notification/Event Handle ··· 252 254 253 255 static void __uwb_rc_neh_rm(struct uwb_rc *rc, struct uwb_rc_neh *neh) 254 256 { 255 - del_timer(&neh->timer); 256 257 __uwb_rc_ctx_put(rc, neh); 257 258 list_del(&neh->list_node); 258 259 } ··· 272 275 __uwb_rc_neh_rm(rc, neh); 273 276 spin_unlock_irqrestore(&rc->neh_lock, flags); 274 277 278 + del_timer_sync(&neh->timer); 275 279 uwb_rc_neh_put(neh); 276 280 } 277 281 ··· 347 349 } 348 350 349 351 350 - /** 352 + /* 351 353 * Process notifications coming from the radio control interface 352 354 * 353 355 * @rc: UWB Radio Control Interface descriptor ··· 399 401 uwb_evt->notif.size = size; 400 402 uwb_evt->notif.rceb = rceb; 401 403 402 - switch (le16_to_cpu(rceb->wEvent)) { 403 - /* Trap some vendor specific events 404 - * 405 - * FIXME: move this to handling in ptc-est, where we 406 - * register a NULL event handler for these two guys 407 - * using the Intel IDs. 408 - */ 409 - case 0x0103: 410 - dev_info(dev, "FIXME: DEVICE ADD\n"); 411 - return; 412 - case 0x0104: 413 - dev_info(dev, "FIXME: DEVICE RM\n"); 414 - return; 415 - default: 416 - break; 417 - } 418 - 419 404 uwbd_event_queue(uwb_evt); 420 405 } 421 406 ··· 419 438 rceb->bEventContext, size); 420 439 } else { 421 440 neh = uwb_rc_neh_lookup(rc, rceb); 422 - if (neh) 441 + if (neh) { 442 + del_timer_sync(&neh->timer); 423 443 uwb_rc_neh_cb(neh, rceb, size); 424 - else 444 + } else 425 445 dev_warn(dev, "event 0x%02x/%04x/%02x (%zu bytes): nobody cared\n", 426 446 rceb->bEventType, le16_to_cpu(rceb->wEvent), 427 447 rceb->bEventContext, size); ··· 477 495 size_t size, real_size, event_size; 478 496 int needtofree; 479 497 480 - d_fnstart(3, dev, "(rc %p buf %p %zu buf_size)\n", rc, buf, buf_size); 481 - d_printf(2, dev, "groking event block: %zu bytes\n", buf_size); 482 498 itr = buf; 483 499 size = buf_size; 484 500 while (size > 0) { ··· 524 544 525 545 itr += real_size; 526 546 size -= real_size; 527 - d_printf(2, dev, "consumed %zd bytes, %zu left\n", 528 - event_size, size); 529 547 } 530 - d_fnend(3, dev, "(rc %p buf %p %zu buf_size) = void\n", rc, buf, buf_size); 531 548 } 532 549 EXPORT_SYMBOL_GPL(uwb_rc_neh_grok); 533 550 ··· 539 562 */ 540 563 void uwb_rc_neh_error(struct uwb_rc *rc, int error) 541 564 { 542 - struct uwb_rc_neh *neh, *next; 565 + struct uwb_rc_neh *neh; 543 566 unsigned long flags; 544 567 545 - BUG_ON(error >= 0); 546 - spin_lock_irqsave(&rc->neh_lock, flags); 547 - list_for_each_entry_safe(neh, next, &rc->neh_list, list_node) { 568 + for (;;) { 569 + spin_lock_irqsave(&rc->neh_lock, flags); 570 + if (list_empty(&rc->neh_list)) { 571 + spin_unlock_irqrestore(&rc->neh_lock, flags); 572 + break; 573 + } 574 + neh = list_first_entry(&rc->neh_list, struct uwb_rc_neh, list_node); 548 575 __uwb_rc_neh_rm(rc, neh); 576 + spin_unlock_irqrestore(&rc->neh_lock, flags); 577 + 578 + del_timer_sync(&neh->timer); 549 579 uwb_rc_neh_cb(neh, NULL, error); 550 580 } 551 - spin_unlock_irqrestore(&rc->neh_lock, flags); 552 581 } 553 582 EXPORT_SYMBOL_GPL(uwb_rc_neh_error); 554 583 ··· 566 583 unsigned long flags; 567 584 568 585 spin_lock_irqsave(&rc->neh_lock, flags); 569 - __uwb_rc_neh_rm(rc, neh); 586 + if (neh->context) 587 + __uwb_rc_neh_rm(rc, neh); 588 + else 589 + neh = NULL; 570 590 spin_unlock_irqrestore(&rc->neh_lock, flags); 571 591 572 - uwb_rc_neh_cb(neh, NULL, -ETIMEDOUT); 592 + if (neh) 593 + uwb_rc_neh_cb(neh, NULL, -ETIMEDOUT); 573 594 } 574 595 575 596 /** Initializes the @rc's neh subsystem ··· 592 605 void uwb_rc_neh_destroy(struct uwb_rc *rc) 593 606 { 594 607 unsigned long flags; 595 - struct uwb_rc_neh *neh, *next; 608 + struct uwb_rc_neh *neh; 596 609 597 - spin_lock_irqsave(&rc->neh_lock, flags); 598 - list_for_each_entry_safe(neh, next, &rc->neh_list, list_node) { 610 + for (;;) { 611 + spin_lock_irqsave(&rc->neh_lock, flags); 612 + if (list_empty(&rc->neh_list)) { 613 + spin_unlock_irqrestore(&rc->neh_lock, flags); 614 + break; 615 + } 616 + neh = list_first_entry(&rc->neh_list, struct uwb_rc_neh, list_node); 599 617 __uwb_rc_neh_rm(rc, neh); 618 + spin_unlock_irqrestore(&rc->neh_lock, flags); 619 + 620 + del_timer_sync(&neh->timer); 600 621 uwb_rc_neh_put(neh); 601 622 } 602 - spin_unlock_irqrestore(&rc->neh_lock, flags); 603 623 }
+16 -9
drivers/uwb/pal.c
··· 16 16 * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 17 */ 18 18 #include <linux/kernel.h> 19 + #include <linux/debugfs.h> 19 20 #include <linux/uwb.h> 20 21 21 22 #include "uwb-internal.h" ··· 33 32 34 33 /** 35 34 * uwb_pal_register - register a UWB PAL 36 - * @rc: the radio controller the PAL will be using 37 35 * @pal: the PAL 38 36 * 39 37 * The PAL must be initialized with uwb_pal_init(). 40 38 */ 41 - int uwb_pal_register(struct uwb_rc *rc, struct uwb_pal *pal) 39 + int uwb_pal_register(struct uwb_pal *pal) 42 40 { 41 + struct uwb_rc *rc = pal->rc; 43 42 int ret; 44 43 45 44 if (pal->device) { ··· 55 54 } 56 55 } 57 56 58 - spin_lock(&rc->pal_lock); 57 + pal->debugfs_dir = uwb_dbg_create_pal_dir(pal); 58 + 59 + mutex_lock(&rc->uwb_dev.mutex); 59 60 list_add(&pal->node, &rc->pals); 60 - spin_unlock(&rc->pal_lock); 61 + mutex_unlock(&rc->uwb_dev.mutex); 61 62 62 63 return 0; 63 64 } ··· 67 64 68 65 /** 69 66 * uwb_pal_register - unregister a UWB PAL 70 - * @rc: the radio controller the PAL was using 71 67 * @pal: the PAL 72 68 */ 73 - void uwb_pal_unregister(struct uwb_rc *rc, struct uwb_pal *pal) 69 + void uwb_pal_unregister(struct uwb_pal *pal) 74 70 { 75 - spin_lock(&rc->pal_lock); 71 + struct uwb_rc *rc = pal->rc; 72 + 73 + uwb_radio_stop(pal); 74 + 75 + mutex_lock(&rc->uwb_dev.mutex); 76 76 list_del(&pal->node); 77 - spin_unlock(&rc->pal_lock); 77 + mutex_unlock(&rc->uwb_dev.mutex); 78 + 79 + debugfs_remove(pal->debugfs_dir); 78 80 79 81 if (pal->device) { 80 82 sysfs_remove_link(&rc->uwb_dev.dev.kobj, pal->name); ··· 94 86 */ 95 87 void uwb_rc_pal_init(struct uwb_rc *rc) 96 88 { 97 - spin_lock_init(&rc->pal_lock); 98 89 INIT_LIST_HEAD(&rc->pals); 99 90 }
+202
drivers/uwb/radio.c
··· 1 + /* 2 + * UWB radio (channel) management. 3 + * 4 + * Copyright (C) 2008 Cambridge Silicon Radio Ltd. 5 + * 6 + * This program is free software; you can redistribute it and/or 7 + * modify it under the terms of the GNU General Public License version 8 + * 2 as published by the Free Software Foundation. 9 + * 10 + * This program is distributed in the hope that it will be useful, 11 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 + * GNU General Public License for more details. 14 + * 15 + * You should have received a copy of the GNU General Public License 16 + * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 + */ 18 + #include <linux/kernel.h> 19 + #include <linux/uwb.h> 20 + 21 + #include "uwb-internal.h" 22 + 23 + 24 + static int uwb_radio_select_channel(struct uwb_rc *rc) 25 + { 26 + /* 27 + * Default to channel 9 (BG1, TFC1) unless the user has 28 + * selected a specific channel or there are no active PALs. 29 + */ 30 + if (rc->active_pals == 0) 31 + return -1; 32 + if (rc->beaconing_forced) 33 + return rc->beaconing_forced; 34 + return 9; 35 + } 36 + 37 + 38 + /* 39 + * Notify all active PALs that the channel has changed. 40 + */ 41 + static void uwb_radio_channel_changed(struct uwb_rc *rc, int channel) 42 + { 43 + struct uwb_pal *pal; 44 + 45 + list_for_each_entry(pal, &rc->pals, node) { 46 + if (pal->channel && channel != pal->channel) { 47 + pal->channel = channel; 48 + if (pal->channel_changed) 49 + pal->channel_changed(pal, pal->channel); 50 + } 51 + } 52 + } 53 + 54 + /* 55 + * Change to a new channel and notify any active PALs of the new 56 + * channel. 57 + * 58 + * When stopping the radio, PALs need to be notified first so they can 59 + * terminate any active reservations. 60 + */ 61 + static int uwb_radio_change_channel(struct uwb_rc *rc, int channel) 62 + { 63 + int ret = 0; 64 + 65 + if (channel == -1) 66 + uwb_radio_channel_changed(rc, channel); 67 + 68 + if (channel != rc->beaconing) { 69 + if (rc->beaconing != -1 && channel != -1) { 70 + /* 71 + * FIXME: should signal the channel change 72 + * with a Channel Change IE. 73 + */ 74 + ret = uwb_radio_change_channel(rc, -1); 75 + if (ret < 0) 76 + return ret; 77 + } 78 + ret = uwb_rc_beacon(rc, channel, 0); 79 + } 80 + 81 + if (channel != -1) 82 + uwb_radio_channel_changed(rc, rc->beaconing); 83 + 84 + return ret; 85 + } 86 + 87 + /** 88 + * uwb_radio_start - request that the radio be started 89 + * @pal: the PAL making the request. 90 + * 91 + * If the radio is not already active, aa suitable channel is selected 92 + * and beacons are started. 93 + */ 94 + int uwb_radio_start(struct uwb_pal *pal) 95 + { 96 + struct uwb_rc *rc = pal->rc; 97 + int ret = 0; 98 + 99 + mutex_lock(&rc->uwb_dev.mutex); 100 + 101 + if (!pal->channel) { 102 + pal->channel = -1; 103 + rc->active_pals++; 104 + ret = uwb_radio_change_channel(rc, uwb_radio_select_channel(rc)); 105 + } 106 + 107 + mutex_unlock(&rc->uwb_dev.mutex); 108 + return ret; 109 + } 110 + EXPORT_SYMBOL_GPL(uwb_radio_start); 111 + 112 + /** 113 + * uwb_radio_stop - request tha the radio be stopped. 114 + * @pal: the PAL making the request. 115 + * 116 + * Stops the radio if no other PAL is making use of it. 117 + */ 118 + void uwb_radio_stop(struct uwb_pal *pal) 119 + { 120 + struct uwb_rc *rc = pal->rc; 121 + 122 + mutex_lock(&rc->uwb_dev.mutex); 123 + 124 + if (pal->channel) { 125 + rc->active_pals--; 126 + uwb_radio_change_channel(rc, uwb_radio_select_channel(rc)); 127 + pal->channel = 0; 128 + } 129 + 130 + mutex_unlock(&rc->uwb_dev.mutex); 131 + } 132 + EXPORT_SYMBOL_GPL(uwb_radio_stop); 133 + 134 + /* 135 + * uwb_radio_force_channel - force a specific channel to be used 136 + * @rc: the radio controller. 137 + * @channel: the channel to use; -1 to force the radio to stop; 0 to 138 + * use the default channel selection algorithm. 139 + */ 140 + int uwb_radio_force_channel(struct uwb_rc *rc, int channel) 141 + { 142 + int ret = 0; 143 + 144 + mutex_lock(&rc->uwb_dev.mutex); 145 + 146 + rc->beaconing_forced = channel; 147 + ret = uwb_radio_change_channel(rc, uwb_radio_select_channel(rc)); 148 + 149 + mutex_unlock(&rc->uwb_dev.mutex); 150 + return ret; 151 + } 152 + 153 + /* 154 + * uwb_radio_setup - setup the radio manager 155 + * @rc: the radio controller. 156 + * 157 + * The radio controller is reset to ensure it's in a known state 158 + * before it's used. 159 + */ 160 + int uwb_radio_setup(struct uwb_rc *rc) 161 + { 162 + return uwb_rc_reset(rc); 163 + } 164 + 165 + /* 166 + * uwb_radio_reset_state - reset any radio manager state 167 + * @rc: the radio controller. 168 + * 169 + * All internal radio manager state is reset to values corresponding 170 + * to a reset radio controller. 171 + */ 172 + void uwb_radio_reset_state(struct uwb_rc *rc) 173 + { 174 + struct uwb_pal *pal; 175 + 176 + mutex_lock(&rc->uwb_dev.mutex); 177 + 178 + list_for_each_entry(pal, &rc->pals, node) { 179 + if (pal->channel) { 180 + pal->channel = -1; 181 + if (pal->channel_changed) 182 + pal->channel_changed(pal, -1); 183 + } 184 + } 185 + 186 + rc->beaconing = -1; 187 + rc->scanning = -1; 188 + 189 + mutex_unlock(&rc->uwb_dev.mutex); 190 + } 191 + 192 + /* 193 + * uwb_radio_shutdown - shutdown the radio manager 194 + * @rc: the radio controller. 195 + * 196 + * The radio controller is reset. 197 + */ 198 + void uwb_radio_shutdown(struct uwb_rc *rc) 199 + { 200 + uwb_radio_reset_state(rc); 201 + uwb_rc_reset(rc); 202 + }
+37 -10
drivers/uwb/reset.c
··· 32 32 #include <linux/err.h> 33 33 34 34 #include "uwb-internal.h" 35 - #define D_LOCAL 0 36 - #include <linux/uwb/debug.h> 37 35 38 36 /** 39 37 * Command result codes (WUSB1.0[T8-69]) ··· 321 323 struct uwb_rc *rc = evt->rc; 322 324 int ret; 323 325 324 - /* Need to prevent the RC hardware module going away while in 325 - the rc->reset() call. */ 326 - if (!try_module_get(rc->owner)) 327 - return 0; 328 - 329 326 dev_info(&rc->uwb_dev.dev, "resetting radio controller\n"); 330 327 ret = rc->reset(rc); 331 - if (ret) 328 + if (ret) { 332 329 dev_err(&rc->uwb_dev.dev, "failed to reset hardware: %d\n", ret); 333 - 334 - module_put(rc->owner); 330 + goto error; 331 + } 332 + return 0; 333 + error: 334 + /* Nothing can be done except try the reset again. */ 335 + uwb_rc_reset_all(rc); 335 336 return ret; 336 337 } 337 338 ··· 357 360 uwbd_event_queue(evt); 358 361 } 359 362 EXPORT_SYMBOL_GPL(uwb_rc_reset_all); 363 + 364 + void uwb_rc_pre_reset(struct uwb_rc *rc) 365 + { 366 + rc->stop(rc); 367 + uwbd_flush(rc); 368 + 369 + uwb_radio_reset_state(rc); 370 + uwb_rsv_remove_all(rc); 371 + } 372 + EXPORT_SYMBOL_GPL(uwb_rc_pre_reset); 373 + 374 + void uwb_rc_post_reset(struct uwb_rc *rc) 375 + { 376 + int ret; 377 + 378 + ret = rc->start(rc); 379 + if (ret) 380 + goto error; 381 + ret = uwb_rc_mac_addr_set(rc, &rc->uwb_dev.mac_addr); 382 + if (ret) 383 + goto error; 384 + ret = uwb_rc_dev_addr_set(rc, &rc->uwb_dev.dev_addr); 385 + if (ret) 386 + goto error; 387 + return; 388 + error: 389 + /* Nothing can be done except try the reset again. */ 390 + uwb_rc_reset_all(rc); 391 + } 392 + EXPORT_SYMBOL_GPL(uwb_rc_post_reset);
+436 -127
drivers/uwb/rsv.c
··· 15 15 * You should have received a copy of the GNU General Public License 16 16 * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 17 */ 18 - #include <linux/version.h> 19 18 #include <linux/kernel.h> 20 19 #include <linux/uwb.h> 20 + #include <linux/random.h> 21 21 22 22 #include "uwb-internal.h" 23 23 24 24 static void uwb_rsv_timer(unsigned long arg); 25 25 26 26 static const char *rsv_states[] = { 27 - [UWB_RSV_STATE_NONE] = "none", 28 - [UWB_RSV_STATE_O_INITIATED] = "initiated", 29 - [UWB_RSV_STATE_O_PENDING] = "pending", 30 - [UWB_RSV_STATE_O_MODIFIED] = "modified", 31 - [UWB_RSV_STATE_O_ESTABLISHED] = "established", 32 - [UWB_RSV_STATE_T_ACCEPTED] = "accepted", 33 - [UWB_RSV_STATE_T_DENIED] = "denied", 34 - [UWB_RSV_STATE_T_PENDING] = "pending", 27 + [UWB_RSV_STATE_NONE] = "none ", 28 + [UWB_RSV_STATE_O_INITIATED] = "o initiated ", 29 + [UWB_RSV_STATE_O_PENDING] = "o pending ", 30 + [UWB_RSV_STATE_O_MODIFIED] = "o modified ", 31 + [UWB_RSV_STATE_O_ESTABLISHED] = "o established ", 32 + [UWB_RSV_STATE_O_TO_BE_MOVED] = "o to be moved ", 33 + [UWB_RSV_STATE_O_MOVE_EXPANDING] = "o move expanding", 34 + [UWB_RSV_STATE_O_MOVE_COMBINING] = "o move combining", 35 + [UWB_RSV_STATE_O_MOVE_REDUCING] = "o move reducing ", 36 + [UWB_RSV_STATE_T_ACCEPTED] = "t accepted ", 37 + [UWB_RSV_STATE_T_CONFLICT] = "t conflict ", 38 + [UWB_RSV_STATE_T_PENDING] = "t pending ", 39 + [UWB_RSV_STATE_T_DENIED] = "t denied ", 40 + [UWB_RSV_STATE_T_RESIZED] = "t resized ", 41 + [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = "t expanding acc ", 42 + [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = "t expanding conf", 43 + [UWB_RSV_STATE_T_EXPANDING_PENDING] = "t expanding pend", 44 + [UWB_RSV_STATE_T_EXPANDING_DENIED] = "t expanding den ", 35 45 }; 36 46 37 47 static const char *rsv_types[] = { ··· 51 41 [UWB_DRP_TYPE_PRIVATE] = "private", 52 42 [UWB_DRP_TYPE_PCA] = "pca", 53 43 }; 44 + 45 + bool uwb_rsv_has_two_drp_ies(struct uwb_rsv *rsv) 46 + { 47 + static const bool has_two_drp_ies[] = { 48 + [UWB_RSV_STATE_O_INITIATED] = false, 49 + [UWB_RSV_STATE_O_PENDING] = false, 50 + [UWB_RSV_STATE_O_MODIFIED] = false, 51 + [UWB_RSV_STATE_O_ESTABLISHED] = false, 52 + [UWB_RSV_STATE_O_TO_BE_MOVED] = false, 53 + [UWB_RSV_STATE_O_MOVE_COMBINING] = false, 54 + [UWB_RSV_STATE_O_MOVE_REDUCING] = false, 55 + [UWB_RSV_STATE_O_MOVE_EXPANDING] = true, 56 + [UWB_RSV_STATE_T_ACCEPTED] = false, 57 + [UWB_RSV_STATE_T_CONFLICT] = false, 58 + [UWB_RSV_STATE_T_PENDING] = false, 59 + [UWB_RSV_STATE_T_DENIED] = false, 60 + [UWB_RSV_STATE_T_RESIZED] = false, 61 + [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = true, 62 + [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = true, 63 + [UWB_RSV_STATE_T_EXPANDING_PENDING] = true, 64 + [UWB_RSV_STATE_T_EXPANDING_DENIED] = true, 65 + }; 66 + 67 + return has_two_drp_ies[rsv->state]; 68 + } 54 69 55 70 /** 56 71 * uwb_rsv_state_str - return a string for a reservation state ··· 101 66 } 102 67 EXPORT_SYMBOL_GPL(uwb_rsv_type_str); 103 68 104 - static void uwb_rsv_dump(struct uwb_rsv *rsv) 69 + void uwb_rsv_dump(char *text, struct uwb_rsv *rsv) 105 70 { 106 71 struct device *dev = &rsv->rc->uwb_dev.dev; 107 72 struct uwb_dev_addr devaddr; ··· 117 82 dev_dbg(dev, "rsv %s -> %s: %s\n", owner, target, uwb_rsv_state_str(rsv->state)); 118 83 } 119 84 85 + static void uwb_rsv_release(struct kref *kref) 86 + { 87 + struct uwb_rsv *rsv = container_of(kref, struct uwb_rsv, kref); 88 + 89 + kfree(rsv); 90 + } 91 + 92 + void uwb_rsv_get(struct uwb_rsv *rsv) 93 + { 94 + kref_get(&rsv->kref); 95 + } 96 + 97 + void uwb_rsv_put(struct uwb_rsv *rsv) 98 + { 99 + kref_put(&rsv->kref, uwb_rsv_release); 100 + } 101 + 120 102 /* 121 103 * Get a free stream index for a reservation. 122 104 * ··· 144 92 static int uwb_rsv_get_stream(struct uwb_rsv *rsv) 145 93 { 146 94 struct uwb_rc *rc = rsv->rc; 95 + struct device *dev = &rc->uwb_dev.dev; 147 96 unsigned long *streams_bm; 148 97 int stream; 149 98 ··· 166 113 rsv->stream = stream; 167 114 set_bit(stream, streams_bm); 168 115 116 + dev_dbg(dev, "get stream %d\n", rsv->stream); 117 + 169 118 return 0; 170 119 } 171 120 172 121 static void uwb_rsv_put_stream(struct uwb_rsv *rsv) 173 122 { 174 123 struct uwb_rc *rc = rsv->rc; 124 + struct device *dev = &rc->uwb_dev.dev; 175 125 unsigned long *streams_bm; 176 126 177 127 switch (rsv->target.type) { ··· 189 133 } 190 134 191 135 clear_bit(rsv->stream, streams_bm); 136 + 137 + dev_dbg(dev, "put stream %d\n", rsv->stream); 192 138 } 193 139 194 - /* 195 - * Generate a MAS allocation with a single row component. 196 - */ 197 - static void uwb_rsv_gen_alloc_row(struct uwb_mas_bm *mas, 198 - int first_mas, int mas_per_zone, 199 - int zs, int ze) 140 + void uwb_rsv_backoff_win_timer(unsigned long arg) 200 141 { 201 - struct uwb_mas_bm col; 202 - int z; 142 + struct uwb_drp_backoff_win *bow = (struct uwb_drp_backoff_win *)arg; 143 + struct uwb_rc *rc = container_of(bow, struct uwb_rc, bow); 144 + struct device *dev = &rc->uwb_dev.dev; 203 145 204 - bitmap_zero(mas->bm, UWB_NUM_MAS); 205 - bitmap_zero(col.bm, UWB_NUM_MAS); 206 - bitmap_fill(col.bm, mas_per_zone); 207 - bitmap_shift_left(col.bm, col.bm, first_mas + zs * UWB_MAS_PER_ZONE, UWB_NUM_MAS); 208 - 209 - for (z = zs; z <= ze; z++) { 210 - bitmap_or(mas->bm, mas->bm, col.bm, UWB_NUM_MAS); 211 - bitmap_shift_left(col.bm, col.bm, UWB_MAS_PER_ZONE, UWB_NUM_MAS); 146 + bow->can_reserve_extra_mases = true; 147 + if (bow->total_expired <= 4) { 148 + bow->total_expired++; 149 + } else { 150 + /* after 4 backoff window has expired we can exit from 151 + * the backoff procedure */ 152 + bow->total_expired = 0; 153 + bow->window = UWB_DRP_BACKOFF_WIN_MIN >> 1; 212 154 } 155 + dev_dbg(dev, "backoff_win_timer total_expired=%d, n=%d\n: ", bow->total_expired, bow->n); 156 + 157 + /* try to relocate all the "to be moved" relocations */ 158 + uwb_rsv_handle_drp_avail_change(rc); 213 159 } 214 160 215 - /* 216 - * Allocate some MAS for this reservation based on current local 217 - * availability, the reservation parameters (max_mas, min_mas, 218 - * sparsity), and the WiMedia rules for MAS allocations. 219 - * 220 - * Returns -EBUSY is insufficient free MAS are available. 221 - * 222 - * FIXME: to simplify this, only safe reservations with a single row 223 - * component in zones 1 to 15 are tried (zone 0 is skipped to avoid 224 - * problems with the MAS reserved for the BP). 225 - * 226 - * [ECMA-368] section B.2. 227 - */ 228 - static int uwb_rsv_alloc_mas(struct uwb_rsv *rsv) 161 + void uwb_rsv_backoff_win_increment(struct uwb_rc *rc) 229 162 { 230 - static const int safe_mas_in_row[UWB_NUM_ZONES] = { 231 - 8, 7, 6, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 2, 1, 232 - }; 233 - int n, r; 234 - struct uwb_mas_bm mas; 235 - bool found = false; 163 + struct uwb_drp_backoff_win *bow = &rc->bow; 164 + struct device *dev = &rc->uwb_dev.dev; 165 + unsigned timeout_us; 236 166 237 - /* 238 - * Search all valid safe allocations until either: too few MAS 239 - * are available; or the smallest allocation with sufficient 240 - * MAS is found. 241 - * 242 - * The top of the zones are preferred, so space for larger 243 - * allocations is available in the bottom of the zone (e.g., a 244 - * 15 MAS allocation should start in row 14 leaving space for 245 - * a 120 MAS allocation at row 0). 246 - */ 247 - for (n = safe_mas_in_row[0]; n >= 1; n--) { 248 - int num_mas; 167 + dev_dbg(dev, "backoff_win_increment: window=%d\n", bow->window); 249 168 250 - num_mas = n * (UWB_NUM_ZONES - 1); 251 - if (num_mas < rsv->min_mas) 252 - break; 253 - if (found && num_mas < rsv->max_mas) 254 - break; 169 + bow->can_reserve_extra_mases = false; 255 170 256 - for (r = UWB_MAS_PER_ZONE-1; r >= 0; r--) { 257 - if (safe_mas_in_row[r] < n) 258 - continue; 259 - uwb_rsv_gen_alloc_row(&mas, r, n, 1, UWB_NUM_ZONES); 260 - if (uwb_drp_avail_reserve_pending(rsv->rc, &mas) == 0) { 261 - found = true; 262 - break; 263 - } 264 - } 265 - } 171 + if((bow->window << 1) == UWB_DRP_BACKOFF_WIN_MAX) 172 + return; 266 173 267 - if (!found) 268 - return -EBUSY; 174 + bow->window <<= 1; 175 + bow->n = random32() & (bow->window - 1); 176 + dev_dbg(dev, "new_window=%d, n=%d\n: ", bow->window, bow->n); 269 177 270 - bitmap_copy(rsv->mas.bm, mas.bm, UWB_NUM_MAS); 271 - return 0; 178 + /* reset the timer associated variables */ 179 + timeout_us = bow->n * UWB_SUPERFRAME_LENGTH_US; 180 + bow->total_expired = 0; 181 + mod_timer(&bow->timer, jiffies + usecs_to_jiffies(timeout_us)); 272 182 } 273 183 274 184 static void uwb_rsv_stroke_timer(struct uwb_rsv *rsv) ··· 247 225 * received. 248 226 */ 249 227 if (rsv->is_multicast) { 250 - if (rsv->state == UWB_RSV_STATE_O_INITIATED) 228 + if (rsv->state == UWB_RSV_STATE_O_INITIATED 229 + || rsv->state == UWB_RSV_STATE_O_MOVE_EXPANDING 230 + || rsv->state == UWB_RSV_STATE_O_MOVE_COMBINING 231 + || rsv->state == UWB_RSV_STATE_O_MOVE_REDUCING) 251 232 sframes = 1; 252 233 if (rsv->state == UWB_RSV_STATE_O_ESTABLISHED) 253 234 sframes = 0; 235 + 254 236 } 255 237 256 - rsv->expired = false; 257 238 if (sframes > 0) { 258 239 /* 259 240 * Add an additional 2 superframes to account for the ··· 278 253 rsv->state = new_state; 279 254 rsv->ie_valid = false; 280 255 281 - uwb_rsv_dump(rsv); 256 + uwb_rsv_dump("SU", rsv); 282 257 283 258 uwb_rsv_stroke_timer(rsv); 284 259 uwb_rsv_sched_update(rsv->rc); ··· 292 267 293 268 void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state) 294 269 { 270 + struct uwb_rsv_move *mv = &rsv->mv; 271 + 295 272 if (rsv->state == new_state) { 296 273 switch (rsv->state) { 297 274 case UWB_RSV_STATE_O_ESTABLISHED: 275 + case UWB_RSV_STATE_O_MOVE_EXPANDING: 276 + case UWB_RSV_STATE_O_MOVE_COMBINING: 277 + case UWB_RSV_STATE_O_MOVE_REDUCING: 298 278 case UWB_RSV_STATE_T_ACCEPTED: 279 + case UWB_RSV_STATE_T_EXPANDING_ACCEPTED: 280 + case UWB_RSV_STATE_T_RESIZED: 299 281 case UWB_RSV_STATE_NONE: 300 282 uwb_rsv_stroke_timer(rsv); 301 283 break; ··· 314 282 return; 315 283 } 316 284 285 + uwb_rsv_dump("SC", rsv); 286 + 317 287 switch (new_state) { 318 288 case UWB_RSV_STATE_NONE: 319 - uwb_drp_avail_release(rsv->rc, &rsv->mas); 320 - uwb_rsv_put_stream(rsv); 321 289 uwb_rsv_state_update(rsv, UWB_RSV_STATE_NONE); 322 290 uwb_rsv_callback(rsv); 323 291 break; ··· 327 295 case UWB_RSV_STATE_O_PENDING: 328 296 uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_PENDING); 329 297 break; 298 + case UWB_RSV_STATE_O_MODIFIED: 299 + /* in the companion there are the MASes to drop */ 300 + bitmap_andnot(rsv->mas.bm, rsv->mas.bm, mv->companion_mas.bm, UWB_NUM_MAS); 301 + uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MODIFIED); 302 + break; 330 303 case UWB_RSV_STATE_O_ESTABLISHED: 304 + if (rsv->state == UWB_RSV_STATE_O_MODIFIED 305 + || rsv->state == UWB_RSV_STATE_O_MOVE_REDUCING) { 306 + uwb_drp_avail_release(rsv->rc, &mv->companion_mas); 307 + rsv->needs_release_companion_mas = false; 308 + } 331 309 uwb_drp_avail_reserve(rsv->rc, &rsv->mas); 332 310 uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_ESTABLISHED); 333 311 uwb_rsv_callback(rsv); 334 312 break; 313 + case UWB_RSV_STATE_O_MOVE_EXPANDING: 314 + rsv->needs_release_companion_mas = true; 315 + uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MOVE_EXPANDING); 316 + break; 317 + case UWB_RSV_STATE_O_MOVE_COMBINING: 318 + rsv->needs_release_companion_mas = false; 319 + uwb_drp_avail_reserve(rsv->rc, &mv->companion_mas); 320 + bitmap_or(rsv->mas.bm, rsv->mas.bm, mv->companion_mas.bm, UWB_NUM_MAS); 321 + rsv->mas.safe += mv->companion_mas.safe; 322 + rsv->mas.unsafe += mv->companion_mas.unsafe; 323 + uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MOVE_COMBINING); 324 + break; 325 + case UWB_RSV_STATE_O_MOVE_REDUCING: 326 + bitmap_andnot(mv->companion_mas.bm, rsv->mas.bm, mv->final_mas.bm, UWB_NUM_MAS); 327 + rsv->needs_release_companion_mas = true; 328 + rsv->mas.safe = mv->final_mas.safe; 329 + rsv->mas.unsafe = mv->final_mas.unsafe; 330 + bitmap_copy(rsv->mas.bm, mv->final_mas.bm, UWB_NUM_MAS); 331 + bitmap_copy(rsv->mas.unsafe_bm, mv->final_mas.unsafe_bm, UWB_NUM_MAS); 332 + uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MOVE_REDUCING); 333 + break; 335 334 case UWB_RSV_STATE_T_ACCEPTED: 335 + case UWB_RSV_STATE_T_RESIZED: 336 + rsv->needs_release_companion_mas = false; 336 337 uwb_drp_avail_reserve(rsv->rc, &rsv->mas); 337 338 uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_ACCEPTED); 338 339 uwb_rsv_callback(rsv); ··· 373 308 case UWB_RSV_STATE_T_DENIED: 374 309 uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_DENIED); 375 310 break; 311 + case UWB_RSV_STATE_T_CONFLICT: 312 + uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_CONFLICT); 313 + break; 314 + case UWB_RSV_STATE_T_PENDING: 315 + uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_PENDING); 316 + break; 317 + case UWB_RSV_STATE_T_EXPANDING_ACCEPTED: 318 + rsv->needs_release_companion_mas = true; 319 + uwb_drp_avail_reserve(rsv->rc, &mv->companion_mas); 320 + uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_EXPANDING_ACCEPTED); 321 + break; 376 322 default: 377 323 dev_err(&rsv->rc->uwb_dev.dev, "unhandled state: %s (%d)\n", 378 324 uwb_rsv_state_str(new_state), new_state); 379 325 } 326 + } 327 + 328 + static void uwb_rsv_handle_timeout_work(struct work_struct *work) 329 + { 330 + struct uwb_rsv *rsv = container_of(work, struct uwb_rsv, 331 + handle_timeout_work); 332 + struct uwb_rc *rc = rsv->rc; 333 + 334 + mutex_lock(&rc->rsvs_mutex); 335 + 336 + uwb_rsv_dump("TO", rsv); 337 + 338 + switch (rsv->state) { 339 + case UWB_RSV_STATE_O_INITIATED: 340 + if (rsv->is_multicast) { 341 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); 342 + goto unlock; 343 + } 344 + break; 345 + case UWB_RSV_STATE_O_MOVE_EXPANDING: 346 + if (rsv->is_multicast) { 347 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_COMBINING); 348 + goto unlock; 349 + } 350 + break; 351 + case UWB_RSV_STATE_O_MOVE_COMBINING: 352 + if (rsv->is_multicast) { 353 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_REDUCING); 354 + goto unlock; 355 + } 356 + break; 357 + case UWB_RSV_STATE_O_MOVE_REDUCING: 358 + if (rsv->is_multicast) { 359 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); 360 + goto unlock; 361 + } 362 + break; 363 + case UWB_RSV_STATE_O_ESTABLISHED: 364 + if (rsv->is_multicast) 365 + goto unlock; 366 + break; 367 + case UWB_RSV_STATE_T_EXPANDING_ACCEPTED: 368 + /* 369 + * The time out could be for the main or of the 370 + * companion DRP, assume it's for the companion and 371 + * drop that first. A further time out is required to 372 + * drop the main. 373 + */ 374 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED); 375 + uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas); 376 + goto unlock; 377 + default: 378 + break; 379 + } 380 + 381 + uwb_rsv_remove(rsv); 382 + 383 + unlock: 384 + mutex_unlock(&rc->rsvs_mutex); 380 385 } 381 386 382 387 static struct uwb_rsv *uwb_rsv_alloc(struct uwb_rc *rc) ··· 459 324 460 325 INIT_LIST_HEAD(&rsv->rc_node); 461 326 INIT_LIST_HEAD(&rsv->pal_node); 327 + kref_init(&rsv->kref); 462 328 init_timer(&rsv->timer); 463 329 rsv->timer.function = uwb_rsv_timer; 464 330 rsv->timer.data = (unsigned long)rsv; 465 331 466 332 rsv->rc = rc; 333 + INIT_WORK(&rsv->handle_timeout_work, uwb_rsv_handle_timeout_work); 467 334 468 335 return rsv; 469 - } 470 - 471 - static void uwb_rsv_free(struct uwb_rsv *rsv) 472 - { 473 - uwb_dev_put(rsv->owner); 474 - if (rsv->target.type == UWB_RSV_TARGET_DEV) 475 - uwb_dev_put(rsv->target.dev); 476 - kfree(rsv); 477 336 } 478 337 479 338 /** ··· 500 371 501 372 void uwb_rsv_remove(struct uwb_rsv *rsv) 502 373 { 374 + uwb_rsv_dump("RM", rsv); 375 + 503 376 if (rsv->state != UWB_RSV_STATE_NONE) 504 377 uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); 378 + 379 + if (rsv->needs_release_companion_mas) 380 + uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas); 381 + uwb_drp_avail_release(rsv->rc, &rsv->mas); 382 + 383 + if (uwb_rsv_is_owner(rsv)) 384 + uwb_rsv_put_stream(rsv); 385 + 505 386 del_timer_sync(&rsv->timer); 506 - list_del(&rsv->rc_node); 507 - uwb_rsv_free(rsv); 387 + uwb_dev_put(rsv->owner); 388 + if (rsv->target.type == UWB_RSV_TARGET_DEV) 389 + uwb_dev_put(rsv->target.dev); 390 + 391 + list_del_init(&rsv->rc_node); 392 + uwb_rsv_put(rsv); 508 393 } 509 394 510 395 /** 511 396 * uwb_rsv_destroy - free a UWB reservation structure 512 397 * @rsv: the reservation to free 513 398 * 514 - * The reservation will be terminated if it is pending or established. 399 + * The reservation must already be terminated. 515 400 */ 516 401 void uwb_rsv_destroy(struct uwb_rsv *rsv) 517 402 { 518 - struct uwb_rc *rc = rsv->rc; 519 - 520 - mutex_lock(&rc->rsvs_mutex); 521 - uwb_rsv_remove(rsv); 522 - mutex_unlock(&rc->rsvs_mutex); 403 + uwb_rsv_put(rsv); 523 404 } 524 405 EXPORT_SYMBOL_GPL(uwb_rsv_destroy); 525 406 ··· 538 399 * @rsv: the reservation 539 400 * 540 401 * The PAL should fill in @rsv's owner, target, type, max_mas, 541 - * min_mas, sparsity and is_multicast fields. If the target is a 402 + * min_mas, max_interval and is_multicast fields. If the target is a 542 403 * uwb_dev it must be referenced. 543 404 * 544 405 * The reservation's callback will be called when the reservation is ··· 547 408 int uwb_rsv_establish(struct uwb_rsv *rsv) 548 409 { 549 410 struct uwb_rc *rc = rsv->rc; 411 + struct uwb_mas_bm available; 550 412 int ret; 551 413 552 414 mutex_lock(&rc->rsvs_mutex); 553 - 554 415 ret = uwb_rsv_get_stream(rsv); 555 416 if (ret) 556 417 goto out; 557 418 558 - ret = uwb_rsv_alloc_mas(rsv); 559 - if (ret) { 419 + rsv->tiebreaker = random32() & 1; 420 + /* get available mas bitmap */ 421 + uwb_drp_available(rc, &available); 422 + 423 + ret = uwb_rsv_find_best_allocation(rsv, &available, &rsv->mas); 424 + if (ret == UWB_RSV_ALLOC_NOT_FOUND) { 425 + ret = -EBUSY; 560 426 uwb_rsv_put_stream(rsv); 561 427 goto out; 562 428 } 563 429 430 + ret = uwb_drp_avail_reserve_pending(rc, &rsv->mas); 431 + if (ret != 0) { 432 + uwb_rsv_put_stream(rsv); 433 + goto out; 434 + } 435 + 436 + uwb_rsv_get(rsv); 564 437 list_add_tail(&rsv->rc_node, &rc->reservations); 565 438 rsv->owner = &rc->uwb_dev; 566 439 uwb_dev_get(rsv->owner); ··· 588 437 * @rsv: the reservation to modify 589 438 * @max_mas: new maximum MAS to reserve 590 439 * @min_mas: new minimum MAS to reserve 591 - * @sparsity: new sparsity to use 440 + * @max_interval: new max_interval to use 592 441 * 593 442 * FIXME: implement this once there are PALs that use it. 594 443 */ 595 - int uwb_rsv_modify(struct uwb_rsv *rsv, int max_mas, int min_mas, int sparsity) 444 + int uwb_rsv_modify(struct uwb_rsv *rsv, int max_mas, int min_mas, int max_interval) 596 445 { 597 446 return -ENOSYS; 598 447 } 599 448 EXPORT_SYMBOL_GPL(uwb_rsv_modify); 449 + 450 + /* 451 + * move an already established reservation (rc->rsvs_mutex must to be 452 + * taken when tis function is called) 453 + */ 454 + int uwb_rsv_try_move(struct uwb_rsv *rsv, struct uwb_mas_bm *available) 455 + { 456 + struct uwb_rc *rc = rsv->rc; 457 + struct uwb_drp_backoff_win *bow = &rc->bow; 458 + struct device *dev = &rc->uwb_dev.dev; 459 + struct uwb_rsv_move *mv; 460 + int ret = 0; 461 + 462 + if (bow->can_reserve_extra_mases == false) 463 + return -EBUSY; 464 + 465 + mv = &rsv->mv; 466 + 467 + if (uwb_rsv_find_best_allocation(rsv, available, &mv->final_mas) == UWB_RSV_ALLOC_FOUND) { 468 + 469 + if (!bitmap_equal(rsv->mas.bm, mv->final_mas.bm, UWB_NUM_MAS)) { 470 + /* We want to move the reservation */ 471 + bitmap_andnot(mv->companion_mas.bm, mv->final_mas.bm, rsv->mas.bm, UWB_NUM_MAS); 472 + uwb_drp_avail_reserve_pending(rc, &mv->companion_mas); 473 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_EXPANDING); 474 + } 475 + } else { 476 + dev_dbg(dev, "new allocation not found\n"); 477 + } 478 + 479 + return ret; 480 + } 481 + 482 + /* It will try to move every reservation in state O_ESTABLISHED giving 483 + * to the MAS allocator algorithm an availability that is the real one 484 + * plus the allocation already established from the reservation. */ 485 + void uwb_rsv_handle_drp_avail_change(struct uwb_rc *rc) 486 + { 487 + struct uwb_drp_backoff_win *bow = &rc->bow; 488 + struct uwb_rsv *rsv; 489 + struct uwb_mas_bm mas; 490 + 491 + if (bow->can_reserve_extra_mases == false) 492 + return; 493 + 494 + list_for_each_entry(rsv, &rc->reservations, rc_node) { 495 + if (rsv->state == UWB_RSV_STATE_O_ESTABLISHED || 496 + rsv->state == UWB_RSV_STATE_O_TO_BE_MOVED) { 497 + uwb_drp_available(rc, &mas); 498 + bitmap_or(mas.bm, mas.bm, rsv->mas.bm, UWB_NUM_MAS); 499 + uwb_rsv_try_move(rsv, &mas); 500 + } 501 + } 502 + 503 + } 600 504 601 505 /** 602 506 * uwb_rsv_terminate - terminate an established reservation ··· 669 463 670 464 mutex_lock(&rc->rsvs_mutex); 671 465 672 - uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); 466 + if (rsv->state != UWB_RSV_STATE_NONE) 467 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); 673 468 674 469 mutex_unlock(&rc->rsvs_mutex); 675 470 } ··· 684 477 * 685 478 * Reservation requests from peers are denied unless a PAL accepts it 686 479 * by calling this function. 480 + * 481 + * The PAL call uwb_rsv_destroy() for all accepted reservations before 482 + * calling uwb_pal_unregister(). 687 483 */ 688 484 void uwb_rsv_accept(struct uwb_rsv *rsv, uwb_rsv_cb_f cb, void *pal_priv) 689 485 { 486 + uwb_rsv_get(rsv); 487 + 690 488 rsv->callback = cb; 691 489 rsv->pal_priv = pal_priv; 692 490 rsv->state = UWB_RSV_STATE_T_ACCEPTED; ··· 742 530 uwb_dev_get(rsv->owner); 743 531 rsv->target.type = UWB_RSV_TARGET_DEV; 744 532 rsv->target.dev = &rc->uwb_dev; 533 + uwb_dev_get(&rc->uwb_dev); 745 534 rsv->type = uwb_ie_drp_type(drp_ie); 746 535 rsv->stream = uwb_ie_drp_stream_index(drp_ie); 747 - set_bit(rsv->stream, rsv->owner->streams); 748 536 uwb_drp_ie_to_bm(&rsv->mas, drp_ie); 749 537 750 538 /* ··· 752 540 * deny the request. 753 541 */ 754 542 rsv->state = UWB_RSV_STATE_T_DENIED; 755 - spin_lock(&rc->pal_lock); 543 + mutex_lock(&rc->uwb_dev.mutex); 756 544 list_for_each_entry(pal, &rc->pals, node) { 757 545 if (pal->new_rsv) 758 - pal->new_rsv(rsv); 546 + pal->new_rsv(pal, rsv); 759 547 if (rsv->state == UWB_RSV_STATE_T_ACCEPTED) 760 548 break; 761 549 } 762 - spin_unlock(&rc->pal_lock); 550 + mutex_unlock(&rc->uwb_dev.mutex); 763 551 764 552 list_add_tail(&rsv->rc_node, &rc->reservations); 765 553 state = rsv->state; 766 554 rsv->state = UWB_RSV_STATE_NONE; 767 - uwb_rsv_set_state(rsv, state); 555 + 556 + /* FIXME: do something sensible here */ 557 + if (state == UWB_RSV_STATE_T_ACCEPTED 558 + && uwb_drp_avail_reserve_pending(rc, &rsv->mas) == -EBUSY) { 559 + /* FIXME: do something sensible here */ 560 + } else { 561 + uwb_rsv_set_state(rsv, state); 562 + } 768 563 769 564 return rsv; 770 565 } 566 + 567 + /** 568 + * uwb_rsv_get_usable_mas - get the bitmap of the usable MAS of a reservations 569 + * @rsv: the reservation. 570 + * @mas: returns the available MAS. 571 + * 572 + * The usable MAS of a reservation may be less than the negotiated MAS 573 + * if alien BPs are present. 574 + */ 575 + void uwb_rsv_get_usable_mas(struct uwb_rsv *rsv, struct uwb_mas_bm *mas) 576 + { 577 + bitmap_zero(mas->bm, UWB_NUM_MAS); 578 + bitmap_andnot(mas->bm, rsv->mas.bm, rsv->rc->cnflt_alien_bitmap.bm, UWB_NUM_MAS); 579 + } 580 + EXPORT_SYMBOL_GPL(uwb_rsv_get_usable_mas); 771 581 772 582 /** 773 583 * uwb_rsv_find - find a reservation for a received DRP IE. ··· 830 596 bool ie_updated = false; 831 597 832 598 list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) { 833 - if (rsv->expired) 834 - uwb_drp_handle_timeout(rsv); 835 599 if (!rsv->ie_valid) { 836 600 uwb_drp_ie_update(rsv); 837 601 ie_updated = true; ··· 839 607 return ie_updated; 840 608 } 841 609 610 + void uwb_rsv_queue_update(struct uwb_rc *rc) 611 + { 612 + unsigned long delay_us = UWB_MAS_LENGTH_US * UWB_MAS_PER_ZONE; 613 + 614 + queue_delayed_work(rc->rsv_workq, &rc->rsv_update_work, usecs_to_jiffies(delay_us)); 615 + } 616 + 617 + /** 618 + * uwb_rsv_sched_update - schedule an update of the DRP IEs 619 + * @rc: the radio controller. 620 + * 621 + * To improve performance and ensure correctness with [ECMA-368] the 622 + * number of SET-DRP-IE commands that are done are limited. 623 + * 624 + * DRP IEs update come from two sources: DRP events from the hardware 625 + * which all occur at the beginning of the superframe ('syncronous' 626 + * events) and reservation establishment/termination requests from 627 + * PALs or timers ('asynchronous' events). 628 + * 629 + * A delayed work ensures that all the synchronous events result in 630 + * one SET-DRP-IE command. 631 + * 632 + * Additional logic (the set_drp_ie_pending and rsv_updated_postponed 633 + * flags) will prevent an asynchrous event starting a SET-DRP-IE 634 + * command if one is currently awaiting a response. 635 + * 636 + * FIXME: this does leave a window where an asynchrous event can delay 637 + * the SET-DRP-IE for a synchronous event by one superframe. 638 + */ 842 639 void uwb_rsv_sched_update(struct uwb_rc *rc) 843 640 { 844 - queue_work(rc->rsv_workq, &rc->rsv_update_work); 641 + spin_lock(&rc->rsvs_lock); 642 + if (!delayed_work_pending(&rc->rsv_update_work)) { 643 + if (rc->set_drp_ie_pending > 0) { 644 + rc->set_drp_ie_pending++; 645 + goto unlock; 646 + } 647 + uwb_rsv_queue_update(rc); 648 + } 649 + unlock: 650 + spin_unlock(&rc->rsvs_lock); 845 651 } 846 652 847 653 /* ··· 888 618 */ 889 619 static void uwb_rsv_update_work(struct work_struct *work) 890 620 { 891 - struct uwb_rc *rc = container_of(work, struct uwb_rc, rsv_update_work); 621 + struct uwb_rc *rc = container_of(work, struct uwb_rc, 622 + rsv_update_work.work); 892 623 bool ie_updated; 893 624 894 625 mutex_lock(&rc->rsvs_mutex); ··· 901 630 ie_updated = true; 902 631 } 903 632 904 - if (ie_updated) 633 + if (ie_updated && (rc->set_drp_ie_pending == 0)) 905 634 uwb_rc_send_all_drp_ie(rc); 635 + 636 + mutex_unlock(&rc->rsvs_mutex); 637 + } 638 + 639 + static void uwb_rsv_alien_bp_work(struct work_struct *work) 640 + { 641 + struct uwb_rc *rc = container_of(work, struct uwb_rc, 642 + rsv_alien_bp_work.work); 643 + struct uwb_rsv *rsv; 644 + 645 + mutex_lock(&rc->rsvs_mutex); 646 + 647 + list_for_each_entry(rsv, &rc->reservations, rc_node) { 648 + if (rsv->type != UWB_DRP_TYPE_ALIEN_BP) { 649 + rsv->callback(rsv); 650 + } 651 + } 906 652 907 653 mutex_unlock(&rc->rsvs_mutex); 908 654 } ··· 928 640 { 929 641 struct uwb_rsv *rsv = (struct uwb_rsv *)arg; 930 642 931 - rsv->expired = true; 932 - uwb_rsv_sched_update(rsv->rc); 643 + queue_work(rsv->rc->rsv_workq, &rsv->handle_timeout_work); 644 + } 645 + 646 + /** 647 + * uwb_rsv_remove_all - remove all reservations 648 + * @rc: the radio controller 649 + * 650 + * A DRP IE update is not done. 651 + */ 652 + void uwb_rsv_remove_all(struct uwb_rc *rc) 653 + { 654 + struct uwb_rsv *rsv, *t; 655 + 656 + mutex_lock(&rc->rsvs_mutex); 657 + list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) { 658 + uwb_rsv_remove(rsv); 659 + } 660 + /* Cancel any postponed update. */ 661 + rc->set_drp_ie_pending = 0; 662 + mutex_unlock(&rc->rsvs_mutex); 663 + 664 + cancel_delayed_work_sync(&rc->rsv_update_work); 933 665 } 934 666 935 667 void uwb_rsv_init(struct uwb_rc *rc) 936 668 { 937 669 INIT_LIST_HEAD(&rc->reservations); 670 + INIT_LIST_HEAD(&rc->cnflt_alien_list); 938 671 mutex_init(&rc->rsvs_mutex); 939 - INIT_WORK(&rc->rsv_update_work, uwb_rsv_update_work); 672 + spin_lock_init(&rc->rsvs_lock); 673 + INIT_DELAYED_WORK(&rc->rsv_update_work, uwb_rsv_update_work); 674 + INIT_DELAYED_WORK(&rc->rsv_alien_bp_work, uwb_rsv_alien_bp_work); 675 + rc->bow.can_reserve_extra_mases = true; 676 + rc->bow.total_expired = 0; 677 + rc->bow.window = UWB_DRP_BACKOFF_WIN_MIN >> 1; 678 + init_timer(&rc->bow.timer); 679 + rc->bow.timer.function = uwb_rsv_backoff_win_timer; 680 + rc->bow.timer.data = (unsigned long)&rc->bow; 940 681 941 682 bitmap_complement(rc->uwb_dev.streams, rc->uwb_dev.streams, UWB_NUM_STREAMS); 942 683 } ··· 984 667 985 668 void uwb_rsv_cleanup(struct uwb_rc *rc) 986 669 { 987 - struct uwb_rsv *rsv, *t; 988 - 989 - mutex_lock(&rc->rsvs_mutex); 990 - list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) { 991 - uwb_rsv_remove(rsv); 992 - } 993 - mutex_unlock(&rc->rsvs_mutex); 994 - 995 - cancel_work_sync(&rc->rsv_update_work); 670 + uwb_rsv_remove_all(rc); 996 671 destroy_workqueue(rc->rsv_workq); 997 672 }
+43 -19
drivers/uwb/umc-bus.c
··· 11 11 #include <linux/uwb/umc.h> 12 12 #include <linux/pci.h> 13 13 14 - static int umc_bus_unbind_helper(struct device *dev, void *data) 14 + static int umc_bus_pre_reset_helper(struct device *dev, void *data) 15 15 { 16 - struct device *parent = data; 16 + int ret = 0; 17 17 18 - if (dev->parent == parent && dev->driver) 19 - device_release_driver(dev); 20 - return 0; 18 + if (dev->driver) { 19 + struct umc_dev *umc = to_umc_dev(dev); 20 + struct umc_driver *umc_drv = to_umc_driver(dev->driver); 21 + 22 + if (umc_drv->pre_reset) 23 + ret = umc_drv->pre_reset(umc); 24 + else 25 + device_release_driver(dev); 26 + } 27 + return ret; 28 + } 29 + 30 + static int umc_bus_post_reset_helper(struct device *dev, void *data) 31 + { 32 + int ret = 0; 33 + 34 + if (dev->driver) { 35 + struct umc_dev *umc = to_umc_dev(dev); 36 + struct umc_driver *umc_drv = to_umc_driver(dev->driver); 37 + 38 + if (umc_drv->post_reset) 39 + ret = umc_drv->post_reset(umc); 40 + } else 41 + ret = device_attach(dev); 42 + 43 + return ret; 21 44 } 22 45 23 46 /** 24 47 * umc_controller_reset - reset the whole UMC controller 25 48 * @umc: the UMC device for the radio controller. 26 49 * 27 - * Drivers will be unbound from all UMC devices belonging to the 28 - * controller and then the radio controller will be rebound. The 29 - * radio controller is expected to do a full hardware reset when it is 30 - * probed. 50 + * Drivers or all capabilities of the controller will have their 51 + * pre_reset methods called or be unbound from their device. Then all 52 + * post_reset methods will be called or the drivers will be rebound. 53 + * 54 + * Radio controllers must provide pre_reset and post_reset methods and 55 + * reset the hardware in their start method. 31 56 * 32 57 * If this is called while a probe() or remove() is in progress it 33 58 * will return -EAGAIN and not perform the reset. ··· 60 35 int umc_controller_reset(struct umc_dev *umc) 61 36 { 62 37 struct device *parent = umc->dev.parent; 63 - int ret; 38 + int ret = 0; 64 39 65 - if (down_trylock(&parent->sem)) 40 + if(down_trylock(&parent->sem)) 66 41 return -EAGAIN; 67 - bus_for_each_dev(&umc_bus_type, NULL, parent, umc_bus_unbind_helper); 68 - ret = device_attach(&umc->dev); 69 - if (ret == 1) 70 - ret = 0; 42 + ret = device_for_each_child(parent, parent, umc_bus_pre_reset_helper); 43 + if (ret >= 0) 44 + device_for_each_child(parent, parent, umc_bus_post_reset_helper); 71 45 up(&parent->sem); 72 46 73 47 return ret; ··· 99 75 if (!dev->driver) 100 76 ret = device_attach(dev); 101 77 102 - return ret < 0 ? ret : 0; 78 + return ret; 103 79 } 104 80 105 - static void umc_bus_rescan(void) 81 + static void umc_bus_rescan(struct device *parent) 106 82 { 107 83 int err; 108 84 ··· 110 86 * We can't use bus_rescan_devices() here as it deadlocks when 111 87 * it tries to retake the dev->parent semaphore. 112 88 */ 113 - err = bus_for_each_dev(&umc_bus_type, NULL, NULL, umc_bus_rescan_helper); 89 + err = device_for_each_child(parent, NULL, umc_bus_rescan_helper); 114 90 if (err < 0) 115 91 printk(KERN_WARNING "%s: rescan of bus failed: %d\n", 116 92 KBUILD_MODNAME, err); ··· 144 120 if (err) 145 121 put_device(dev); 146 122 else 147 - umc_bus_rescan(); 123 + umc_bus_rescan(dev->parent); 148 124 149 125 return err; 150 126 }
+1 -10
drivers/uwb/umc-dev.c
··· 7 7 */ 8 8 #include <linux/kernel.h> 9 9 #include <linux/uwb/umc.h> 10 - #define D_LOCAL 0 11 - #include <linux/uwb/debug.h> 12 10 13 11 static void umc_device_release(struct device *dev) 14 12 { ··· 29 31 30 32 umc = kzalloc(sizeof(struct umc_dev), GFP_KERNEL); 31 33 if (umc) { 32 - snprintf(umc->dev.bus_id, sizeof(umc->dev.bus_id), "%s-%d", 33 - parent->bus_id, n); 34 + dev_set_name(&umc->dev, "%s-%d", dev_name(parent), n); 34 35 umc->dev.parent = parent; 35 36 umc->dev.bus = &umc_bus_type; 36 37 umc->dev.release = umc_device_release; ··· 51 54 { 52 55 int err; 53 56 54 - d_fnstart(3, &umc->dev, "(umc_dev %p)\n", umc); 55 - 56 57 err = request_resource(umc->resource.parent, &umc->resource); 57 58 if (err < 0) { 58 59 dev_err(&umc->dev, "can't allocate resource range " ··· 64 69 err = device_register(&umc->dev); 65 70 if (err < 0) 66 71 goto error_device_register; 67 - d_fnend(3, &umc->dev, "(umc_dev %p) = 0\n", umc); 68 72 return 0; 69 73 70 74 error_device_register: 71 75 release_resource(&umc->resource); 72 76 error_request_resource: 73 - d_fnend(3, &umc->dev, "(umc_dev %p) = %d\n", umc, err); 74 77 return err; 75 78 } 76 79 EXPORT_SYMBOL_GPL(umc_device_register); ··· 88 95 if (!umc) 89 96 return; 90 97 dev = get_device(&umc->dev); 91 - d_fnstart(3, dev, "(umc_dev %p)\n", umc); 92 98 device_unregister(&umc->dev); 93 99 release_resource(&umc->resource); 94 - d_fnend(3, dev, "(umc_dev %p) = void\n", umc); 95 100 put_device(dev); 96 101 } 97 102 EXPORT_SYMBOL_GPL(umc_device_unregister);
+94 -55
drivers/uwb/uwb-debug.c
··· 4 4 * 5 5 * Copyright (C) 2005-2006 Intel Corporation 6 6 * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> 7 + * Copyright (C) 2008 Cambridge Silicon Radio Ltd. 7 8 * 8 9 * This program is free software; you can redistribute it and/or 9 10 * modify it under the terms of the GNU General Public License version ··· 34 33 #include <linux/seq_file.h> 35 34 36 35 #include <linux/uwb/debug-cmd.h> 37 - #define D_LOCAL 0 38 - #include <linux/uwb/debug.h> 39 36 40 37 #include "uwb-internal.h" 41 - 42 - void dump_bytes(struct device *dev, const void *_buf, size_t rsize) 43 - { 44 - const char *buf = _buf; 45 - char line[32]; 46 - size_t offset = 0; 47 - int cnt, cnt2; 48 - for (cnt = 0; cnt < rsize; cnt += 8) { 49 - size_t rtop = rsize - cnt < 8 ? rsize - cnt : 8; 50 - for (offset = cnt2 = 0; cnt2 < rtop; cnt2++) { 51 - offset += scnprintf(line + offset, sizeof(line) - offset, 52 - "%02x ", buf[cnt + cnt2] & 0xff); 53 - } 54 - if (dev) 55 - dev_info(dev, "%s\n", line); 56 - else 57 - printk(KERN_INFO "%s\n", line); 58 - } 59 - } 60 - EXPORT_SYMBOL_GPL(dump_bytes); 61 38 62 39 /* 63 40 * Debug interface ··· 63 84 struct dentry *reservations_f; 64 85 struct dentry *accept_f; 65 86 struct dentry *drp_avail_f; 87 + spinlock_t list_lock; 66 88 }; 67 89 68 90 static struct dentry *root_dir; 69 91 70 92 static void uwb_dbg_rsv_cb(struct uwb_rsv *rsv) 71 93 { 72 - struct uwb_rc *rc = rsv->rc; 73 - struct device *dev = &rc->uwb_dev.dev; 74 - struct uwb_dev_addr devaddr; 75 - char owner[UWB_ADDR_STRSIZE], target[UWB_ADDR_STRSIZE]; 94 + struct uwb_dbg *dbg = rsv->pal_priv; 76 95 77 - uwb_dev_addr_print(owner, sizeof(owner), &rsv->owner->dev_addr); 78 - if (rsv->target.type == UWB_RSV_TARGET_DEV) 79 - devaddr = rsv->target.dev->dev_addr; 80 - else 81 - devaddr = rsv->target.devaddr; 82 - uwb_dev_addr_print(target, sizeof(target), &devaddr); 96 + uwb_rsv_dump("debug", rsv); 83 97 84 - dev_dbg(dev, "debug: rsv %s -> %s: %s\n", 85 - owner, target, uwb_rsv_state_str(rsv->state)); 98 + if (rsv->state == UWB_RSV_STATE_NONE) { 99 + spin_lock(&dbg->list_lock); 100 + list_del(&rsv->pal_node); 101 + spin_unlock(&dbg->list_lock); 102 + uwb_rsv_destroy(rsv); 103 + } 86 104 } 87 105 88 106 static int cmd_rsv_establish(struct uwb_rc *rc, ··· 95 119 if (target == NULL) 96 120 return -ENODEV; 97 121 98 - rsv = uwb_rsv_create(rc, uwb_dbg_rsv_cb, NULL); 122 + rsv = uwb_rsv_create(rc, uwb_dbg_rsv_cb, rc->dbg); 99 123 if (rsv == NULL) { 100 124 uwb_dev_put(target); 101 125 return -ENOMEM; 102 126 } 103 127 104 - rsv->owner = &rc->uwb_dev; 105 - rsv->target.type = UWB_RSV_TARGET_DEV; 106 - rsv->target.dev = target; 107 - rsv->type = cmd->type; 108 - rsv->max_mas = cmd->max_mas; 109 - rsv->min_mas = cmd->min_mas; 110 - rsv->sparsity = cmd->sparsity; 128 + rsv->target.type = UWB_RSV_TARGET_DEV; 129 + rsv->target.dev = target; 130 + rsv->type = cmd->type; 131 + rsv->max_mas = cmd->max_mas; 132 + rsv->min_mas = cmd->min_mas; 133 + rsv->max_interval = cmd->max_interval; 111 134 112 135 ret = uwb_rsv_establish(rsv); 113 136 if (ret) 114 137 uwb_rsv_destroy(rsv); 115 - else 138 + else { 139 + spin_lock(&(rc->dbg)->list_lock); 116 140 list_add_tail(&rsv->pal_node, &rc->dbg->rsvs); 117 - 141 + spin_unlock(&(rc->dbg)->list_lock); 142 + } 118 143 return ret; 119 144 } 120 145 ··· 125 148 struct uwb_rsv *rsv, *found = NULL; 126 149 int i = 0; 127 150 151 + spin_lock(&(rc->dbg)->list_lock); 152 + 128 153 list_for_each_entry(rsv, &rc->dbg->rsvs, pal_node) { 129 154 if (i == cmd->index) { 130 155 found = rsv; 156 + uwb_rsv_get(found); 131 157 break; 132 158 } 159 + i++; 133 160 } 161 + 162 + spin_unlock(&(rc->dbg)->list_lock); 163 + 134 164 if (!found) 135 165 return -EINVAL; 136 166 137 - list_del(&found->pal_node); 138 167 uwb_rsv_terminate(found); 168 + uwb_rsv_put(found); 139 169 140 170 return 0; 171 + } 172 + 173 + static int cmd_ie_add(struct uwb_rc *rc, struct uwb_dbg_cmd_ie *ie_to_add) 174 + { 175 + return uwb_rc_ie_add(rc, 176 + (const struct uwb_ie_hdr *) ie_to_add->data, 177 + ie_to_add->len); 178 + } 179 + 180 + static int cmd_ie_rm(struct uwb_rc *rc, struct uwb_dbg_cmd_ie *ie_to_rm) 181 + { 182 + return uwb_rc_ie_rm(rc, ie_to_rm->data[0]); 141 183 } 142 184 143 185 static int command_open(struct inode *inode, struct file *file) ··· 171 175 { 172 176 struct uwb_rc *rc = file->private_data; 173 177 struct uwb_dbg_cmd cmd; 174 - int ret; 175 - 178 + int ret = 0; 179 + 176 180 if (len != sizeof(struct uwb_dbg_cmd)) 177 181 return -EINVAL; 178 182 ··· 185 189 break; 186 190 case UWB_DBG_CMD_RSV_TERMINATE: 187 191 ret = cmd_rsv_terminate(rc, &cmd.rsv_terminate); 192 + break; 193 + case UWB_DBG_CMD_IE_ADD: 194 + ret = cmd_ie_add(rc, &cmd.ie_add); 195 + break; 196 + case UWB_DBG_CMD_IE_RM: 197 + ret = cmd_ie_rm(rc, &cmd.ie_rm); 198 + break; 199 + case UWB_DBG_CMD_RADIO_START: 200 + ret = uwb_radio_start(&rc->dbg->pal); 201 + break; 202 + case UWB_DBG_CMD_RADIO_STOP: 203 + uwb_radio_stop(&rc->dbg->pal); 188 204 break; 189 205 default: 190 206 return -EINVAL; ··· 291 283 .owner = THIS_MODULE, 292 284 }; 293 285 294 - static void uwb_dbg_new_rsv(struct uwb_rsv *rsv) 286 + static void uwb_dbg_channel_changed(struct uwb_pal *pal, int channel) 295 287 { 296 - struct uwb_rc *rc = rsv->rc; 288 + struct device *dev = &pal->rc->uwb_dev.dev; 297 289 298 - if (rc->dbg->accept) 299 - uwb_rsv_accept(rsv, uwb_dbg_rsv_cb, NULL); 290 + if (channel > 0) 291 + dev_info(dev, "debug: channel %d started\n", channel); 292 + else 293 + dev_info(dev, "debug: channel stopped\n"); 294 + } 295 + 296 + static void uwb_dbg_new_rsv(struct uwb_pal *pal, struct uwb_rsv *rsv) 297 + { 298 + struct uwb_dbg *dbg = container_of(pal, struct uwb_dbg, pal); 299 + 300 + if (dbg->accept) { 301 + spin_lock(&dbg->list_lock); 302 + list_add_tail(&rsv->pal_node, &dbg->rsvs); 303 + spin_unlock(&dbg->list_lock); 304 + uwb_rsv_accept(rsv, uwb_dbg_rsv_cb, dbg); 305 + } 300 306 } 301 307 302 308 /** ··· 324 302 return; 325 303 326 304 INIT_LIST_HEAD(&rc->dbg->rsvs); 305 + spin_lock_init(&(rc->dbg)->list_lock); 327 306 328 307 uwb_pal_init(&rc->dbg->pal); 308 + rc->dbg->pal.rc = rc; 309 + rc->dbg->pal.channel_changed = uwb_dbg_channel_changed; 329 310 rc->dbg->pal.new_rsv = uwb_dbg_new_rsv; 330 - uwb_pal_register(rc, &rc->dbg->pal); 311 + uwb_pal_register(&rc->dbg->pal); 312 + 331 313 if (root_dir) { 332 314 rc->dbg->root_d = debugfs_create_dir(dev_name(&rc->uwb_dev.dev), 333 315 root_dir); ··· 351 325 } 352 326 353 327 /** 354 - * uwb_dbg_add_rc - remove a radio controller's debug interface 328 + * uwb_dbg_del_rc - remove a radio controller's debug interface 355 329 * @rc: the radio controller 356 330 */ 357 331 void uwb_dbg_del_rc(struct uwb_rc *rc) ··· 362 336 return; 363 337 364 338 list_for_each_entry_safe(rsv, t, &rc->dbg->rsvs, pal_node) { 365 - uwb_rsv_destroy(rsv); 339 + uwb_rsv_terminate(rsv); 366 340 } 367 341 368 - uwb_pal_unregister(rc, &rc->dbg->pal); 342 + uwb_pal_unregister(&rc->dbg->pal); 369 343 370 344 if (root_dir) { 371 345 debugfs_remove(rc->dbg->drp_avail_f); ··· 390 364 void uwb_dbg_exit(void) 391 365 { 392 366 debugfs_remove(root_dir); 367 + } 368 + 369 + /** 370 + * uwb_dbg_create_pal_dir - create a debugfs directory for a PAL 371 + * @pal: The PAL. 372 + */ 373 + struct dentry *uwb_dbg_create_pal_dir(struct uwb_pal *pal) 374 + { 375 + struct uwb_rc *rc = pal->rc; 376 + 377 + if (root_dir && rc->dbg && rc->dbg->root_d && pal->name) 378 + return debugfs_create_dir(pal->name, rc->dbg->root_d); 379 + return NULL; 393 380 }
+99 -27
drivers/uwb/uwb-internal.h
··· 66 66 unsigned channel, enum uwb_scan_type type, 67 67 unsigned bpst_offset); 68 68 extern int uwb_rc_send_all_drp_ie(struct uwb_rc *rc); 69 - extern ssize_t uwb_rc_print_IEs(struct uwb_rc *rc, char *, size_t); 70 - extern void uwb_rc_ie_init(struct uwb_rc *); 71 - extern void uwb_rc_ie_init(struct uwb_rc *); 72 - extern ssize_t uwb_rc_ie_setup(struct uwb_rc *); 73 - extern void uwb_rc_ie_release(struct uwb_rc *); 74 - extern int uwb_rc_ie_add(struct uwb_rc *, 75 - const struct uwb_ie_hdr *, size_t); 76 - extern int uwb_rc_ie_rm(struct uwb_rc *, enum uwb_ie); 69 + 70 + void uwb_rc_ie_init(struct uwb_rc *); 71 + int uwb_rc_ie_setup(struct uwb_rc *); 72 + void uwb_rc_ie_release(struct uwb_rc *); 73 + int uwb_ie_dump_hex(const struct uwb_ie_hdr *ies, size_t len, 74 + char *buf, size_t size); 75 + int uwb_rc_set_ie(struct uwb_rc *, struct uwb_rc_cmd_set_ie *); 76 + 77 77 78 78 extern const char *uwb_rc_strerror(unsigned code); 79 79 ··· 92 92 93 93 struct uwb_rc_neh; 94 94 95 + extern int uwb_rc_cmd_async(struct uwb_rc *rc, const char *cmd_name, 96 + struct uwb_rccb *cmd, size_t cmd_size, 97 + u8 expected_type, u16 expected_event, 98 + uwb_rc_cmd_cb_f cb, void *arg); 99 + 100 + 95 101 void uwb_rc_neh_create(struct uwb_rc *rc); 96 102 void uwb_rc_neh_destroy(struct uwb_rc *rc); 97 103 ··· 112 106 extern int uwb_est_create(void); 113 107 extern void uwb_est_destroy(void); 114 108 109 + /* 110 + * UWB conflicting alien reservations 111 + */ 112 + struct uwb_cnflt_alien { 113 + struct uwb_rc *rc; 114 + struct list_head rc_node; 115 + struct uwb_mas_bm mas; 116 + struct timer_list timer; 117 + struct work_struct cnflt_update_work; 118 + }; 115 119 120 + enum uwb_uwb_rsv_alloc_result { 121 + UWB_RSV_ALLOC_FOUND = 0, 122 + UWB_RSV_ALLOC_NOT_FOUND, 123 + }; 124 + 125 + enum uwb_rsv_mas_status { 126 + UWB_RSV_MAS_NOT_AVAIL = 1, 127 + UWB_RSV_MAS_SAFE, 128 + UWB_RSV_MAS_UNSAFE, 129 + }; 130 + 131 + struct uwb_rsv_col_set_info { 132 + unsigned char start_col; 133 + unsigned char interval; 134 + unsigned char safe_mas_per_col; 135 + unsigned char unsafe_mas_per_col; 136 + }; 137 + 138 + struct uwb_rsv_col_info { 139 + unsigned char max_avail_safe; 140 + unsigned char max_avail_unsafe; 141 + unsigned char highest_mas[UWB_MAS_PER_ZONE]; 142 + struct uwb_rsv_col_set_info csi; 143 + }; 144 + 145 + struct uwb_rsv_row_info { 146 + unsigned char avail[UWB_MAS_PER_ZONE]; 147 + unsigned char free_rows; 148 + unsigned char used_rows; 149 + }; 150 + 151 + /* 152 + * UWB find allocation 153 + */ 154 + struct uwb_rsv_alloc_info { 155 + unsigned char bm[UWB_MAS_PER_ZONE * UWB_NUM_ZONES]; 156 + struct uwb_rsv_col_info ci[UWB_NUM_ZONES]; 157 + struct uwb_rsv_row_info ri; 158 + struct uwb_mas_bm *not_available; 159 + struct uwb_mas_bm *result; 160 + int min_mas; 161 + int max_mas; 162 + int max_interval; 163 + int total_allocated_mases; 164 + int safe_allocated_mases; 165 + int unsafe_allocated_mases; 166 + int interval; 167 + }; 168 + 169 + int uwb_rsv_find_best_allocation(struct uwb_rsv *rsv, struct uwb_mas_bm *available, 170 + struct uwb_mas_bm *result); 171 + void uwb_rsv_handle_drp_avail_change(struct uwb_rc *rc); 116 172 /* 117 173 * UWB Events & management daemon 118 174 */ ··· 228 160 }; 229 161 }; 230 162 231 - extern void uwbd_start(void); 232 - extern void uwbd_stop(void); 163 + extern void uwbd_start(struct uwb_rc *rc); 164 + extern void uwbd_stop(struct uwb_rc *rc); 233 165 extern struct uwb_event *uwb_event_alloc(size_t, gfp_t gfp_mask); 234 166 extern void uwbd_event_queue(struct uwb_event *); 235 167 void uwbd_flush(struct uwb_rc *rc); 236 168 237 169 /* UWB event handlers */ 170 + extern int uwbd_evt_handle_rc_ie_rcv(struct uwb_event *); 238 171 extern int uwbd_evt_handle_rc_beacon(struct uwb_event *); 239 172 extern int uwbd_evt_handle_rc_beacon_size(struct uwb_event *); 240 173 extern int uwbd_evt_handle_rc_bpoie_change(struct uwb_event *); ··· 261 192 */ 262 193 263 194 extern unsigned long beacon_timeout_ms; 264 - 265 - /** Beacon cache list */ 266 - struct uwb_beca { 267 - struct list_head list; 268 - size_t entries; 269 - struct mutex mutex; 270 - }; 271 - 272 - extern struct uwb_beca uwb_beca; 273 195 274 196 /** 275 197 * Beacon cache entry ··· 288 228 struct uwb_beacon_frame; 289 229 extern ssize_t uwb_bce_print_IEs(struct uwb_dev *, struct uwb_beca_e *, 290 230 char *, size_t); 291 - extern struct uwb_beca_e *__uwb_beca_add(struct uwb_rc_evt_beacon *, 292 - struct uwb_beacon_frame *, 293 - unsigned long); 294 231 295 232 extern void uwb_bce_kfree(struct kref *_bce); 296 233 static inline void uwb_bce_get(struct uwb_beca_e *bce) ··· 298 241 { 299 242 kref_put(&bce->refcnt, uwb_bce_kfree); 300 243 } 301 - extern void uwb_beca_purge(void); 302 - extern void uwb_beca_release(void); 244 + extern void uwb_beca_purge(struct uwb_rc *rc); 245 + extern void uwb_beca_release(struct uwb_rc *rc); 303 246 304 247 struct uwb_dev *uwb_dev_get_by_devaddr(struct uwb_rc *rc, 305 248 const struct uwb_dev_addr *devaddr); 306 249 struct uwb_dev *uwb_dev_get_by_macaddr(struct uwb_rc *rc, 307 250 const struct uwb_mac_addr *macaddr); 251 + 252 + int uwb_radio_setup(struct uwb_rc *rc); 253 + void uwb_radio_reset_state(struct uwb_rc *rc); 254 + void uwb_radio_shutdown(struct uwb_rc *rc); 255 + int uwb_radio_force_channel(struct uwb_rc *rc, int channel); 308 256 309 257 /* -- UWB Sysfs representation */ 310 258 extern struct class uwb_rc_class; ··· 321 259 void uwb_rsv_init(struct uwb_rc *rc); 322 260 int uwb_rsv_setup(struct uwb_rc *rc); 323 261 void uwb_rsv_cleanup(struct uwb_rc *rc); 262 + void uwb_rsv_remove_all(struct uwb_rc *rc); 263 + void uwb_rsv_get(struct uwb_rsv *rsv); 264 + void uwb_rsv_put(struct uwb_rsv *rsv); 265 + bool uwb_rsv_has_two_drp_ies(struct uwb_rsv *rsv); 266 + void uwb_rsv_dump(char *text, struct uwb_rsv *rsv); 267 + int uwb_rsv_try_move(struct uwb_rsv *rsv, struct uwb_mas_bm *available); 268 + void uwb_rsv_backoff_win_timer(unsigned long arg); 269 + void uwb_rsv_backoff_win_increment(struct uwb_rc *rc); 270 + int uwb_rsv_status(struct uwb_rsv *rsv); 271 + int uwb_rsv_companion_status(struct uwb_rsv *rsv); 324 272 325 273 void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state); 326 274 void uwb_rsv_remove(struct uwb_rsv *rsv); 327 275 struct uwb_rsv *uwb_rsv_find(struct uwb_rc *rc, struct uwb_dev *src, 328 276 struct uwb_ie_drp *drp_ie); 329 277 void uwb_rsv_sched_update(struct uwb_rc *rc); 278 + void uwb_rsv_queue_update(struct uwb_rc *rc); 330 279 331 - void uwb_drp_handle_timeout(struct uwb_rsv *rsv); 332 280 int uwb_drp_ie_update(struct uwb_rsv *rsv); 333 281 void uwb_drp_ie_to_bm(struct uwb_mas_bm *bm, const struct uwb_ie_drp *drp_ie); 334 282 335 283 void uwb_drp_avail_init(struct uwb_rc *rc); 284 + void uwb_drp_available(struct uwb_rc *rc, struct uwb_mas_bm *avail); 336 285 int uwb_drp_avail_reserve_pending(struct uwb_rc *rc, struct uwb_mas_bm *mas); 337 286 void uwb_drp_avail_reserve(struct uwb_rc *rc, struct uwb_mas_bm *mas); 338 287 void uwb_drp_avail_release(struct uwb_rc *rc, struct uwb_mas_bm *mas); ··· 362 289 void uwb_dbg_exit(void); 363 290 void uwb_dbg_add_rc(struct uwb_rc *rc); 364 291 void uwb_dbg_del_rc(struct uwb_rc *rc); 365 - 366 - /* Workarounds for version specific stuff */ 292 + struct dentry *uwb_dbg_create_pal_dir(struct uwb_pal *pal); 367 293 368 294 static inline void uwb_dev_lock(struct uwb_dev *uwb_dev) 369 295 {
+66 -110
drivers/uwb/uwbd.c
··· 68 68 * 69 69 * Handler functions are called normally uwbd_evt_handle_*(). 70 70 */ 71 - 72 71 #include <linux/kthread.h> 73 72 #include <linux/module.h> 74 73 #include <linux/freezer.h> 74 + 75 75 #include "uwb-internal.h" 76 76 77 - #define D_LOCAL 1 78 - #include <linux/uwb/debug.h> 79 - 80 - 81 - /** 77 + /* 82 78 * UWBD Event handler function signature 83 79 * 84 80 * Return !0 if the event needs not to be freed (ie the handler ··· 97 101 const char *name; 98 102 }; 99 103 100 - /** Table of handlers for and properties of the UWBD Radio Control Events */ 101 - static 102 - struct uwbd_event uwbd_events[] = { 104 + /* Table of handlers for and properties of the UWBD Radio Control Events */ 105 + static struct uwbd_event uwbd_urc_events[] = { 106 + [UWB_RC_EVT_IE_RCV] = { 107 + .handler = uwbd_evt_handle_rc_ie_rcv, 108 + .name = "IE_RECEIVED" 109 + }, 103 110 [UWB_RC_EVT_BEACON] = { 104 111 .handler = uwbd_evt_handle_rc_beacon, 105 112 .name = "BEACON_RECEIVED" ··· 141 142 size_t size; 142 143 }; 143 144 144 - #define UWBD_EVT_TYPE_HANDLER(n,a) { \ 145 - .name = (n), \ 146 - .uwbd_events = (a), \ 147 - .size = sizeof(a)/sizeof((a)[0]) \ 148 - } 149 - 150 - 151 - /** Table of handlers for each UWBD Event type. */ 152 - static 153 - struct uwbd_evt_type_handler uwbd_evt_type_handlers[] = { 154 - [UWB_RC_CET_GENERAL] = UWBD_EVT_TYPE_HANDLER("RC", uwbd_events) 145 + /* Table of handlers for each UWBD Event type. */ 146 + static struct uwbd_evt_type_handler uwbd_urc_evt_type_handlers[] = { 147 + [UWB_RC_CET_GENERAL] = { 148 + .name = "URC", 149 + .uwbd_events = uwbd_urc_events, 150 + .size = ARRAY_SIZE(uwbd_urc_events), 151 + }, 155 152 }; 156 - 157 - static const 158 - size_t uwbd_evt_type_handlers_len = 159 - sizeof(uwbd_evt_type_handlers) / sizeof(uwbd_evt_type_handlers[0]); 160 153 161 154 static const struct uwbd_event uwbd_message_handlers[] = { 162 155 [UWB_EVT_MSG_RESET] = { ··· 157 166 }, 158 167 }; 159 168 160 - static DEFINE_MUTEX(uwbd_event_mutex); 161 - 162 - /** 169 + /* 163 170 * Handle an URC event passed to the UWB Daemon 164 171 * 165 172 * @evt: the event to handle ··· 177 188 static 178 189 int uwbd_event_handle_urc(struct uwb_event *evt) 179 190 { 191 + int result = -EINVAL; 180 192 struct uwbd_evt_type_handler *type_table; 181 193 uwbd_evt_handler_f handler; 182 194 u8 type, context; ··· 187 197 event = le16_to_cpu(evt->notif.rceb->wEvent); 188 198 context = evt->notif.rceb->bEventContext; 189 199 190 - if (type > uwbd_evt_type_handlers_len) { 191 - printk(KERN_ERR "UWBD: event type %u: unknown (too high)\n", type); 192 - return -EINVAL; 193 - } 194 - type_table = &uwbd_evt_type_handlers[type]; 195 - if (type_table->uwbd_events == NULL) { 196 - printk(KERN_ERR "UWBD: event type %u: unknown\n", type); 197 - return -EINVAL; 198 - } 199 - if (event > type_table->size) { 200 - printk(KERN_ERR "UWBD: event %s[%u]: unknown (too high)\n", 201 - type_table->name, event); 202 - return -EINVAL; 203 - } 200 + if (type > ARRAY_SIZE(uwbd_urc_evt_type_handlers)) 201 + goto out; 202 + type_table = &uwbd_urc_evt_type_handlers[type]; 203 + if (type_table->uwbd_events == NULL) 204 + goto out; 205 + if (event > type_table->size) 206 + goto out; 204 207 handler = type_table->uwbd_events[event].handler; 205 - if (handler == NULL) { 206 - printk(KERN_ERR "UWBD: event %s[%u]: unknown\n", type_table->name, event); 207 - return -EINVAL; 208 - } 209 - return (*handler)(evt); 208 + if (handler == NULL) 209 + goto out; 210 + 211 + result = (*handler)(evt); 212 + out: 213 + if (result < 0) 214 + dev_err(&evt->rc->uwb_dev.dev, 215 + "UWBD: event 0x%02x/%04x/%02x, handling failed: %d\n", 216 + type, event, context, result); 217 + return result; 210 218 } 211 219 212 220 static void uwbd_event_handle_message(struct uwb_event *evt) ··· 219 231 return; 220 232 } 221 233 222 - /* If this is a reset event we need to drop the 223 - * uwbd_event_mutex or it deadlocks when the reset handler 224 - * attempts to flush the uwbd events. */ 225 - if (evt->message == UWB_EVT_MSG_RESET) 226 - mutex_unlock(&uwbd_event_mutex); 227 - 228 234 result = uwbd_message_handlers[evt->message].handler(evt); 229 235 if (result < 0) 230 236 dev_err(&rc->uwb_dev.dev, "UWBD: '%s' message failed: %d\n", 231 237 uwbd_message_handlers[evt->message].name, result); 232 - 233 - if (evt->message == UWB_EVT_MSG_RESET) 234 - mutex_lock(&uwbd_event_mutex); 235 238 } 236 239 237 240 static void uwbd_event_handle(struct uwb_event *evt) ··· 250 271 251 272 __uwb_rc_put(rc); /* for the __uwb_rc_get() in uwb_rc_notif_cb() */ 252 273 } 253 - /* The UWB Daemon */ 254 - 255 - 256 - /** Daemon's PID: used to decide if we can queue or not */ 257 - static int uwbd_pid; 258 - /** Daemon's task struct for managing the kthread */ 259 - static struct task_struct *uwbd_task; 260 - /** Daemon's waitqueue for waiting for new events */ 261 - static DECLARE_WAIT_QUEUE_HEAD(uwbd_wq); 262 - /** Daemon's list of events; we queue/dequeue here */ 263 - static struct list_head uwbd_event_list = LIST_HEAD_INIT(uwbd_event_list); 264 - /** Daemon's list lock to protect concurent access */ 265 - static DEFINE_SPINLOCK(uwbd_event_list_lock); 266 - 267 274 268 275 /** 269 276 * UWB Daemon ··· 263 298 * FIXME: should change so we don't have a 1HZ timer all the time, but 264 299 * only if there are devices. 265 300 */ 266 - static int uwbd(void *unused) 301 + static int uwbd(void *param) 267 302 { 303 + struct uwb_rc *rc = param; 268 304 unsigned long flags; 269 - struct list_head list = LIST_HEAD_INIT(list); 270 - struct uwb_event *evt, *nxt; 305 + struct uwb_event *evt; 271 306 int should_stop = 0; 307 + 272 308 while (1) { 273 309 wait_event_interruptible_timeout( 274 - uwbd_wq, 275 - !list_empty(&uwbd_event_list) 310 + rc->uwbd.wq, 311 + !list_empty(&rc->uwbd.event_list) 276 312 || (should_stop = kthread_should_stop()), 277 313 HZ); 278 314 if (should_stop) 279 315 break; 280 316 try_to_freeze(); 281 317 282 - mutex_lock(&uwbd_event_mutex); 283 - spin_lock_irqsave(&uwbd_event_list_lock, flags); 284 - list_splice_init(&uwbd_event_list, &list); 285 - spin_unlock_irqrestore(&uwbd_event_list_lock, flags); 286 - list_for_each_entry_safe(evt, nxt, &list, list_node) { 318 + spin_lock_irqsave(&rc->uwbd.event_list_lock, flags); 319 + if (!list_empty(&rc->uwbd.event_list)) { 320 + evt = list_first_entry(&rc->uwbd.event_list, struct uwb_event, list_node); 287 321 list_del(&evt->list_node); 322 + } else 323 + evt = NULL; 324 + spin_unlock_irqrestore(&rc->uwbd.event_list_lock, flags); 325 + 326 + if (evt) { 288 327 uwbd_event_handle(evt); 289 328 kfree(evt); 290 329 } 291 - mutex_unlock(&uwbd_event_mutex); 292 330 293 - uwb_beca_purge(); /* Purge devices that left */ 331 + uwb_beca_purge(rc); /* Purge devices that left */ 294 332 } 295 333 return 0; 296 334 } 297 335 298 336 299 337 /** Start the UWB daemon */ 300 - void uwbd_start(void) 338 + void uwbd_start(struct uwb_rc *rc) 301 339 { 302 - uwbd_task = kthread_run(uwbd, NULL, "uwbd"); 303 - if (uwbd_task == NULL) 340 + rc->uwbd.task = kthread_run(uwbd, rc, "uwbd"); 341 + if (rc->uwbd.task == NULL) 304 342 printk(KERN_ERR "UWB: Cannot start management daemon; " 305 343 "UWB won't work\n"); 306 344 else 307 - uwbd_pid = uwbd_task->pid; 345 + rc->uwbd.pid = rc->uwbd.task->pid; 308 346 } 309 347 310 348 /* Stop the UWB daemon and free any unprocessed events */ 311 - void uwbd_stop(void) 349 + void uwbd_stop(struct uwb_rc *rc) 312 350 { 313 - unsigned long flags; 314 - struct uwb_event *evt, *nxt; 315 - kthread_stop(uwbd_task); 316 - spin_lock_irqsave(&uwbd_event_list_lock, flags); 317 - uwbd_pid = 0; 318 - list_for_each_entry_safe(evt, nxt, &uwbd_event_list, list_node) { 319 - if (evt->type == UWB_EVT_TYPE_NOTIF) 320 - kfree(evt->notif.rceb); 321 - kfree(evt); 322 - } 323 - spin_unlock_irqrestore(&uwbd_event_list_lock, flags); 324 - uwb_beca_release(); 351 + kthread_stop(rc->uwbd.task); 352 + uwbd_flush(rc); 325 353 } 326 354 327 355 /* ··· 331 373 */ 332 374 void uwbd_event_queue(struct uwb_event *evt) 333 375 { 376 + struct uwb_rc *rc = evt->rc; 334 377 unsigned long flags; 335 - spin_lock_irqsave(&uwbd_event_list_lock, flags); 336 - if (uwbd_pid != 0) { 337 - list_add(&evt->list_node, &uwbd_event_list); 338 - wake_up_all(&uwbd_wq); 378 + 379 + spin_lock_irqsave(&rc->uwbd.event_list_lock, flags); 380 + if (rc->uwbd.pid != 0) { 381 + list_add(&evt->list_node, &rc->uwbd.event_list); 382 + wake_up_all(&rc->uwbd.wq); 339 383 } else { 340 384 __uwb_rc_put(evt->rc); 341 385 if (evt->type == UWB_EVT_TYPE_NOTIF) 342 386 kfree(evt->notif.rceb); 343 387 kfree(evt); 344 388 } 345 - spin_unlock_irqrestore(&uwbd_event_list_lock, flags); 389 + spin_unlock_irqrestore(&rc->uwbd.event_list_lock, flags); 346 390 return; 347 391 } 348 392 ··· 352 392 { 353 393 struct uwb_event *evt, *nxt; 354 394 355 - mutex_lock(&uwbd_event_mutex); 356 - 357 - spin_lock_irq(&uwbd_event_list_lock); 358 - list_for_each_entry_safe(evt, nxt, &uwbd_event_list, list_node) { 395 + spin_lock_irq(&rc->uwbd.event_list_lock); 396 + list_for_each_entry_safe(evt, nxt, &rc->uwbd.event_list, list_node) { 359 397 if (evt->rc == rc) { 360 398 __uwb_rc_put(rc); 361 399 list_del(&evt->list_node); ··· 362 404 kfree(evt); 363 405 } 364 406 } 365 - spin_unlock_irq(&uwbd_event_list_lock); 366 - 367 - mutex_unlock(&uwbd_event_mutex); 407 + spin_unlock_irq(&rc->uwbd.event_list_lock); 368 408 }
+39 -79
drivers/uwb/whc-rc.c
··· 39 39 * them to the hw and transfer the replies/notifications back to the 40 40 * UWB stack through the UWB daemon (UWBD). 41 41 */ 42 - #include <linux/version.h> 43 42 #include <linux/init.h> 44 43 #include <linux/module.h> 45 44 #include <linux/pci.h> ··· 48 49 #include <linux/uwb.h> 49 50 #include <linux/uwb/whci.h> 50 51 #include <linux/uwb/umc.h> 51 - #include "uwb-internal.h" 52 52 53 - #define D_LOCAL 0 54 - #include <linux/uwb/debug.h> 53 + #include "uwb-internal.h" 55 54 56 55 /** 57 56 * Descriptor for an instance of the UWB Radio Control Driver that ··· 95 98 struct device *dev = &whcrc->umc_dev->dev; 96 99 u32 urccmd; 97 100 98 - d_fnstart(3, dev, "(%p, %p, %zu)\n", uwb_rc, cmd, cmd_size); 99 - might_sleep(); 100 - 101 - if (cmd_size >= 4096) { 102 - result = -E2BIG; 103 - goto error; 104 - } 101 + if (cmd_size >= 4096) 102 + return -EINVAL; 105 103 106 104 /* 107 105 * If the URC is halted, then the hardware has reset itself. ··· 107 115 if (le_readl(whcrc->rc_base + URCSTS) & URCSTS_HALTED) { 108 116 dev_err(dev, "requesting reset of halted radio controller\n"); 109 117 uwb_rc_reset_all(uwb_rc); 110 - result = -EIO; 111 - goto error; 118 + return -EIO; 112 119 } 113 120 114 121 result = wait_event_timeout(whcrc->cmd_wq, 115 122 !(le_readl(whcrc->rc_base + URCCMD) & URCCMD_ACTIVE), HZ/2); 116 123 if (result == 0) { 117 124 dev_err(dev, "device is not ready to execute commands\n"); 118 - result = -ETIMEDOUT; 119 - goto error; 125 + return -ETIMEDOUT; 120 126 } 121 127 122 128 memmove(whcrc->cmd_buf, cmd, cmd_size); ··· 127 137 whcrc->rc_base + URCCMD); 128 138 spin_unlock(&whcrc->irq_lock); 129 139 130 - error: 131 - d_fnend(3, dev, "(%p, %p, %zu) = %d\n", 132 - uwb_rc, cmd, cmd_size, result); 133 - return result; 140 + return 0; 134 141 } 135 142 136 143 static int whcrc_reset(struct uwb_rc *rc) ··· 154 167 static 155 168 void whcrc_enable_events(struct whcrc *whcrc) 156 169 { 157 - struct device *dev = &whcrc->umc_dev->dev; 158 170 u32 urccmd; 159 - 160 - d_fnstart(4, dev, "(whcrc %p)\n", whcrc); 161 171 162 172 le_writeq(whcrc->evt_dma_buf, whcrc->rc_base + URCEVTADDR); 163 173 ··· 162 178 urccmd = le_readl(whcrc->rc_base + URCCMD) & ~URCCMD_ACTIVE; 163 179 le_writel(urccmd | URCCMD_EARV, whcrc->rc_base + URCCMD); 164 180 spin_unlock(&whcrc->irq_lock); 165 - 166 - d_fnend(4, dev, "(whcrc %p) = void\n", whcrc); 167 181 } 168 182 169 183 static void whcrc_event_work(struct work_struct *work) 170 184 { 171 185 struct whcrc *whcrc = container_of(work, struct whcrc, event_work); 172 - struct device *dev = &whcrc->umc_dev->dev; 173 186 size_t size; 174 187 u64 urcevtaddr; 175 188 176 189 urcevtaddr = le_readq(whcrc->rc_base + URCEVTADDR); 177 190 size = urcevtaddr & URCEVTADDR_OFFSET_MASK; 178 - 179 - d_printf(3, dev, "received %zu octet event\n", size); 180 - d_dump(4, dev, whcrc->evt_buf, size > 32 ? 32 : size); 181 191 182 192 uwb_rc_neh_grok(whcrc->uwb_rc, whcrc->evt_buf, size); 183 193 whcrc_enable_events(whcrc); ··· 195 217 return IRQ_NONE; 196 218 le_writel(urcsts & URCSTS_INT_MASK, whcrc->rc_base + URCSTS); 197 219 198 - d_printf(4, dev, "acked 0x%08x, urcsts 0x%08x\n", 199 - le_readl(whcrc->rc_base + URCSTS), urcsts); 200 - 201 220 if (urcsts & URCSTS_HSE) { 202 221 dev_err(dev, "host system error -- hardware halted\n"); 203 222 /* FIXME: do something sensible here */ 204 223 goto out; 205 224 } 206 - if (urcsts & URCSTS_ER) { 207 - d_printf(3, dev, "ER: event ready\n"); 225 + if (urcsts & URCSTS_ER) 208 226 schedule_work(&whcrc->event_work); 209 - } 210 - if (urcsts & URCSTS_RCI) { 211 - d_printf(3, dev, "RCI: ready to execute another command\n"); 227 + if (urcsts & URCSTS_RCI) 212 228 wake_up_all(&whcrc->cmd_wq); 213 - } 214 229 out: 215 230 return IRQ_HANDLED; 216 231 } ··· 222 251 whcrc->area = umc_dev->resource.start; 223 252 whcrc->rc_len = umc_dev->resource.end - umc_dev->resource.start + 1; 224 253 result = -EBUSY; 225 - if (request_mem_region(whcrc->area, whcrc->rc_len, KBUILD_MODNAME) 226 - == NULL) { 254 + if (request_mem_region(whcrc->area, whcrc->rc_len, KBUILD_MODNAME) == NULL) { 227 255 dev_err(dev, "can't request URC region (%zu bytes @ 0x%lx): %d\n", 228 256 whcrc->rc_len, whcrc->area, result); 229 257 goto error_request_region; ··· 257 287 dev_err(dev, "Can't allocate evt transfer buffer\n"); 258 288 goto error_evt_buffer; 259 289 } 260 - d_printf(3, dev, "UWB RC Interface: %zu bytes at 0x%p, irq %u\n", 261 - whcrc->rc_len, whcrc->rc_base, umc_dev->irq); 262 290 return 0; 263 291 264 292 error_evt_buffer: ··· 301 333 static int whcrc_start_rc(struct uwb_rc *rc) 302 334 { 303 335 struct whcrc *whcrc = rc->priv; 304 - int result = 0; 305 336 struct device *dev = &whcrc->umc_dev->dev; 306 - unsigned long start, duration; 307 337 308 338 /* Reset the thing */ 309 339 le_writel(URCCMD_RESET, whcrc->rc_base + URCCMD); 310 - if (d_test(3)) 311 - start = jiffies; 312 340 if (whci_wait_for(dev, whcrc->rc_base + URCCMD, URCCMD_RESET, 0, 313 - 5000, "device to reset at init") < 0) { 314 - result = -EBUSY; 315 - goto error; 316 - } else if (d_test(3)) { 317 - duration = jiffies - start; 318 - if (duration > msecs_to_jiffies(40)) 319 - dev_err(dev, "Device took %ums to " 320 - "reset. MAX expected: 40ms\n", 321 - jiffies_to_msecs(duration)); 322 - } 341 + 5000, "hardware reset") < 0) 342 + return -EBUSY; 323 343 324 344 /* Set the event buffer, start the controller (enable IRQs later) */ 325 345 le_writel(0, whcrc->rc_base + URCINTR); 326 346 le_writel(URCCMD_RS, whcrc->rc_base + URCCMD); 327 - result = -ETIMEDOUT; 328 - if (d_test(3)) 329 - start = jiffies; 330 347 if (whci_wait_for(dev, whcrc->rc_base + URCSTS, URCSTS_HALTED, 0, 331 - 5000, "device to start") < 0) 332 - goto error; 333 - if (d_test(3)) { 334 - duration = jiffies - start; 335 - if (duration > msecs_to_jiffies(40)) 336 - dev_err(dev, "Device took %ums to start. " 337 - "MAX expected: 40ms\n", 338 - jiffies_to_msecs(duration)); 339 - } 348 + 5000, "radio controller start") < 0) 349 + return -ETIMEDOUT; 340 350 whcrc_enable_events(whcrc); 341 - result = 0; 342 351 le_writel(URCINTR_EN_ALL, whcrc->rc_base + URCINTR); 343 - error: 344 - return result; 352 + return 0; 345 353 } 346 354 347 355 ··· 339 395 340 396 le_writel(0, whcrc->rc_base + URCCMD); 341 397 whci_wait_for(&umc_dev->dev, whcrc->rc_base + URCSTS, 342 - URCSTS_HALTED, 0, 40, "URCSTS.HALTED"); 398 + URCSTS_HALTED, URCSTS_HALTED, 100, "radio controller stop"); 343 399 } 344 400 345 401 static void whcrc_init(struct whcrc *whcrc) ··· 365 421 struct whcrc *whcrc; 366 422 struct device *dev = &umc_dev->dev; 367 423 368 - d_fnstart(3, dev, "(umc_dev %p)\n", umc_dev); 369 424 result = -ENOMEM; 370 425 uwb_rc = uwb_rc_alloc(); 371 426 if (uwb_rc == NULL) { ··· 396 453 if (result < 0) 397 454 goto error_rc_add; 398 455 umc_set_drvdata(umc_dev, whcrc); 399 - d_fnend(3, dev, "(umc_dev %p) = 0\n", umc_dev); 400 456 return 0; 401 457 402 458 error_rc_add: ··· 405 463 error_alloc: 406 464 uwb_rc_put(uwb_rc); 407 465 error_rc_alloc: 408 - d_fnend(3, dev, "(umc_dev %p) = %d\n", umc_dev, result); 409 466 return result; 410 467 } 411 468 ··· 427 486 whcrc_release_rc_umc(whcrc); 428 487 kfree(whcrc); 429 488 uwb_rc_put(uwb_rc); 430 - d_printf(1, &umc_dev->dev, "freed whcrc %p\n", whcrc); 489 + } 490 + 491 + static int whcrc_pre_reset(struct umc_dev *umc) 492 + { 493 + struct whcrc *whcrc = umc_get_drvdata(umc); 494 + struct uwb_rc *uwb_rc = whcrc->uwb_rc; 495 + 496 + uwb_rc_pre_reset(uwb_rc); 497 + return 0; 498 + } 499 + 500 + static int whcrc_post_reset(struct umc_dev *umc) 501 + { 502 + struct whcrc *whcrc = umc_get_drvdata(umc); 503 + struct uwb_rc *uwb_rc = whcrc->uwb_rc; 504 + 505 + uwb_rc_post_reset(uwb_rc); 506 + return 0; 431 507 } 432 508 433 509 /* PCI device ID's that we handle [so it gets loaded] */ ··· 455 497 MODULE_DEVICE_TABLE(pci, whcrc_id_table); 456 498 457 499 static struct umc_driver whcrc_driver = { 458 - .name = "whc-rc", 459 - .cap_id = UMC_CAP_ID_WHCI_RC, 460 - .probe = whcrc_probe, 461 - .remove = whcrc_remove, 500 + .name = "whc-rc", 501 + .cap_id = UMC_CAP_ID_WHCI_RC, 502 + .probe = whcrc_probe, 503 + .remove = whcrc_remove, 504 + .pre_reset = whcrc_pre_reset, 505 + .post_reset = whcrc_post_reset, 462 506 }; 463 507 464 508 static int __init whcrc_driver_init(void)
+3 -3
drivers/uwb/whci.c
··· 67 67 val = le_readl(reg); 68 68 if ((val & mask) == result) 69 69 break; 70 - msleep(10); 71 70 if (t >= max_ms) { 72 - dev_err(dev, "timed out waiting for %s ", tag); 71 + dev_err(dev, "%s timed out\n", tag); 73 72 return -ETIMEDOUT; 74 73 } 74 + msleep(10); 75 75 t += 10; 76 76 } 77 77 return 0; ··· 111 111 + UWBCAPDATA_TO_OFFSET(capdata); 112 112 umc->resource.end = umc->resource.start 113 113 + (n == 0 ? 0x20 : UWBCAPDATA_TO_SIZE(capdata)) - 1; 114 - umc->resource.name = umc->dev.bus_id; 114 + umc->resource.name = dev_name(&umc->dev); 115 115 umc->resource.flags = card->pci->resource[bar].flags; 116 116 umc->resource.parent = &card->pci->resource[bar]; 117 117 umc->irq = card->pci->irq;
+2 -17
drivers/uwb/wlp/eda.c
··· 51 51 * the tag and address of the transmitting neighbor. 52 52 */ 53 53 54 - #define D_LOCAL 5 55 54 #include <linux/netdevice.h> 56 - #include <linux/uwb/debug.h> 57 55 #include <linux/etherdevice.h> 58 56 #include <linux/wlp.h> 59 57 #include "wlp-internal.h" ··· 302 304 { 303 305 int result = 0; 304 306 struct wlp *wlp = container_of(eda, struct wlp, eda); 305 - struct device *dev = &wlp->rc->uwb_dev.dev; 306 307 struct wlp_eda_node *itr; 307 308 unsigned long flags; 308 309 int found = 0; ··· 310 313 list_for_each_entry(itr, &eda->cache, list_node) { 311 314 if (!memcmp(itr->virt_addr, virt_addr, 312 315 sizeof(itr->virt_addr))) { 313 - d_printf(6, dev, "EDA: looking for %pM hit %02x:%02x " 314 - "wss %p tag 0x%02x state %u\n", 315 - virt_addr, 316 - itr->dev_addr.data[1], 317 - itr->dev_addr.data[0], itr->wss, 318 - itr->tag, itr->state); 319 316 result = (*function)(wlp, itr, priv); 320 317 *dev_addr = itr->dev_addr; 321 318 found = 1; 322 319 break; 323 - } else 324 - d_printf(6, dev, "EDA: looking for %pM against %pM miss\n", 325 - virt_addr, itr->virt_addr); 320 + } 326 321 } 327 - if (!found) { 328 - if (printk_ratelimit()) 329 - dev_err(dev, "EDA: Eth addr %pM not found.\n", 330 - virt_addr); 322 + if (!found) 331 323 result = -ENODEV; 332 - } 333 324 spin_unlock_irqrestore(&eda->lock, flags); 334 325 return result; 335 326 }
+8 -173
drivers/uwb/wlp/messages.c
··· 24 24 */ 25 25 26 26 #include <linux/wlp.h> 27 - #define D_LOCAL 6 28 - #include <linux/uwb/debug.h> 27 + 29 28 #include "wlp-internal.h" 30 29 31 30 static ··· 104 105 #define wlp_set(type, type_code, name) \ 105 106 static size_t wlp_set_##name(struct wlp_attr_##name *attr, type value) \ 106 107 { \ 107 - d_fnstart(6, NULL, "(attribute %p)\n", attr); \ 108 108 wlp_set_attr_hdr(&attr->hdr, type_code, \ 109 109 sizeof(*attr) - sizeof(struct wlp_attr_hdr)); \ 110 110 attr->name = value; \ 111 - d_dump(6, NULL, attr, sizeof(*attr)); \ 112 - d_fnend(6, NULL, "(attribute %p)\n", attr); \ 113 111 return sizeof(*attr); \ 114 112 } 115 113 116 114 #define wlp_pset(type, type_code, name) \ 117 115 static size_t wlp_set_##name(struct wlp_attr_##name *attr, type value) \ 118 116 { \ 119 - d_fnstart(6, NULL, "(attribute %p)\n", attr); \ 120 117 wlp_set_attr_hdr(&attr->hdr, type_code, \ 121 118 sizeof(*attr) - sizeof(struct wlp_attr_hdr)); \ 122 119 attr->name = *value; \ 123 - d_dump(6, NULL, attr, sizeof(*attr)); \ 124 - d_fnend(6, NULL, "(attribute %p)\n", attr); \ 125 120 return sizeof(*attr); \ 126 121 } 127 122 ··· 132 139 static size_t wlp_set_##name(struct wlp_attr_##name *attr, type value, \ 133 140 size_t len) \ 134 141 { \ 135 - d_fnstart(6, NULL, "(attribute %p)\n", attr); \ 136 142 wlp_set_attr_hdr(&attr->hdr, type_code, len); \ 137 143 memcpy(attr->name, value, len); \ 138 - d_dump(6, NULL, attr, sizeof(*attr) + len); \ 139 - d_fnend(6, NULL, "(attribute %p)\n", attr); \ 140 144 return sizeof(*attr) + len; \ 141 145 } 142 146 ··· 172 182 size_t datalen; 173 183 void *ptr = attr->wss_info; 174 184 size_t used = sizeof(*attr); 175 - d_fnstart(6, NULL, "(attribute %p)\n", attr); 185 + 176 186 datalen = sizeof(struct wlp_wss_info) + strlen(wss->name); 177 187 wlp_set_attr_hdr(&attr->hdr, WLP_ATTR_WSS_INFO, datalen); 178 188 used = wlp_set_wssid(ptr, &wss->wssid); ··· 180 190 used += wlp_set_accept_enrl(ptr + used, wss->accept_enroll); 181 191 used += wlp_set_wss_sec_status(ptr + used, wss->secure_status); 182 192 used += wlp_set_wss_bcast(ptr + used, &wss->bcast); 183 - d_dump(6, NULL, attr, sizeof(*attr) + datalen); 184 - d_fnend(6, NULL, "(attribute %p, used %d)\n", 185 - attr, (int)(sizeof(*attr) + used)); 186 193 return sizeof(*attr) + used; 187 194 } 188 195 ··· 401 414 size_t used = 0; 402 415 ssize_t result = -EINVAL; 403 416 404 - d_printf(6, dev, "WLP: WSS info: Retrieving WSS name\n"); 405 417 result = wlp_get_wss_name(wlp, ptr, info->name, buflen); 406 418 if (result < 0) { 407 419 dev_err(dev, "WLP: unable to obtain WSS name from " ··· 408 422 goto error_parse; 409 423 } 410 424 used += result; 411 - d_printf(6, dev, "WLP: WSS info: Retrieving accept enroll\n"); 425 + 412 426 result = wlp_get_accept_enrl(wlp, ptr + used, &info->accept_enroll, 413 427 buflen - used); 414 428 if (result < 0) { ··· 423 437 goto error_parse; 424 438 } 425 439 used += result; 426 - d_printf(6, dev, "WLP: WSS info: Retrieving secure status\n"); 440 + 427 441 result = wlp_get_wss_sec_status(wlp, ptr + used, &info->sec_status, 428 442 buflen - used); 429 443 if (result < 0) { ··· 438 452 goto error_parse; 439 453 } 440 454 used += result; 441 - d_printf(6, dev, "WLP: WSS info: Retrieving broadcast\n"); 455 + 442 456 result = wlp_get_wss_bcast(wlp, ptr + used, &info->bcast, 443 457 buflen - used); 444 458 if (result < 0) { ··· 516 530 len = result; 517 531 used = sizeof(*attr); 518 532 ptr = attr; 519 - d_printf(6, dev, "WLP: WSS info: Retrieving WSSID\n"); 533 + 520 534 result = wlp_get_wssid(wlp, ptr + used, wssid, buflen - used); 521 535 if (result < 0) { 522 536 dev_err(dev, "WLP: unable to obtain WSSID from WSS info.\n"); ··· 539 553 goto out; 540 554 } 541 555 result = used; 542 - d_printf(6, dev, "WLP: Successfully parsed WLP information " 543 - "attribute. used %zu bytes\n", used); 544 556 out: 545 557 return result; 546 558 } ··· 582 598 struct wlp_wssid_e *wssid_e; 583 599 char buf[WLP_WSS_UUID_STRSIZE]; 584 600 585 - d_fnstart(6, dev, "wlp %p, attr %p, neighbor %p, wss %p, buflen %d \n", 586 - wlp, attr, neighbor, wss, (int)buflen); 587 601 if (buflen < 0) 588 602 goto out; 589 603 ··· 620 638 wss->accept_enroll = wss_info.accept_enroll; 621 639 wss->state = WLP_WSS_STATE_PART_ENROLLED; 622 640 wlp_wss_uuid_print(buf, sizeof(buf), &wssid); 623 - d_printf(2, dev, "WLP: Found WSS %s. Enrolling.\n", 624 - buf); 641 + dev_dbg(dev, "WLP: Found WSS %s. Enrolling.\n", buf); 625 642 } else { 626 643 wssid_e = wlp_create_wssid_e(wlp, neighbor); 627 644 if (wssid_e == NULL) { ··· 641 660 if (result < 0 && !enroll) /* this was a discovery */ 642 661 wlp_remove_neighbor_tmp_info(neighbor); 643 662 out: 644 - d_fnend(6, dev, "wlp %p, attr %p, neighbor %p, wss %p, buflen %d, " 645 - "result %d \n", wlp, attr, neighbor, wss, (int)buflen, 646 - (int)result); 647 663 return result; 648 664 649 665 } ··· 696 718 struct sk_buff *_skb; 697 719 void *d1_itr; 698 720 699 - d_fnstart(6, dev, "wlp %p\n", wlp); 700 721 if (wlp->dev_info == NULL) { 701 722 result = __wlp_setup_device_info(wlp); 702 723 if (result < 0) { ··· 705 728 } 706 729 } 707 730 info = wlp->dev_info; 708 - d_printf(6, dev, "Local properties:\n" 709 - "Device name (%d bytes): %s\n" 710 - "Model name (%d bytes): %s\n" 711 - "Manufacturer (%d bytes): %s\n" 712 - "Model number (%d bytes): %s\n" 713 - "Serial number (%d bytes): %s\n" 714 - "Primary device type: \n" 715 - " Category: %d \n" 716 - " OUI: %02x:%02x:%02x \n" 717 - " OUI Subdivision: %u \n", 718 - (int)strlen(info->name), info->name, 719 - (int)strlen(info->model_name), info->model_name, 720 - (int)strlen(info->manufacturer), info->manufacturer, 721 - (int)strlen(info->model_nr), info->model_nr, 722 - (int)strlen(info->serial), info->serial, 723 - info->prim_dev_type.category, 724 - info->prim_dev_type.OUI[0], info->prim_dev_type.OUI[1], 725 - info->prim_dev_type.OUI[2], info->prim_dev_type.OUIsubdiv); 726 731 _skb = dev_alloc_skb(sizeof(*_d1) 727 732 + sizeof(struct wlp_attr_uuid_e) 728 733 + sizeof(struct wlp_attr_wss_sel_mthd) ··· 727 768 goto error; 728 769 } 729 770 _d1 = (void *) _skb->data; 730 - d_printf(6, dev, "D1 starts at %p \n", _d1); 731 771 _d1->hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID); 732 772 _d1->hdr.type = WLP_FRAME_ASSOCIATION; 733 773 _d1->type = WLP_ASSOC_D1; ··· 749 791 used += wlp_set_prim_dev_type(d1_itr + used, &info->prim_dev_type); 750 792 used += wlp_set_wlp_assc_err(d1_itr + used, WLP_ASSOC_ERROR_NONE); 751 793 skb_put(_skb, sizeof(*_d1) + used); 752 - d_printf(6, dev, "D1 message:\n"); 753 - d_dump(6, dev, _d1, sizeof(*_d1) 754 - + sizeof(struct wlp_attr_uuid_e) 755 - + sizeof(struct wlp_attr_wss_sel_mthd) 756 - + sizeof(struct wlp_attr_dev_name) 757 - + strlen(info->name) 758 - + sizeof(struct wlp_attr_manufacturer) 759 - + strlen(info->manufacturer) 760 - + sizeof(struct wlp_attr_model_name) 761 - + strlen(info->model_name) 762 - + sizeof(struct wlp_attr_model_nr) 763 - + strlen(info->model_nr) 764 - + sizeof(struct wlp_attr_serial) 765 - + strlen(info->serial) 766 - + sizeof(struct wlp_attr_prim_dev_type) 767 - + sizeof(struct wlp_attr_wlp_assc_err)); 768 794 *skb = _skb; 769 795 error: 770 - d_fnend(6, dev, "wlp %p, result = %d\n", wlp, result); 771 796 return result; 772 797 } 773 798 ··· 778 837 void *d2_itr; 779 838 size_t mem_needed; 780 839 781 - d_fnstart(6, dev, "wlp %p\n", wlp); 782 840 if (wlp->dev_info == NULL) { 783 841 result = __wlp_setup_device_info(wlp); 784 842 if (result < 0) { ··· 787 847 } 788 848 } 789 849 info = wlp->dev_info; 790 - d_printf(6, dev, "Local properties:\n" 791 - "Device name (%d bytes): %s\n" 792 - "Model name (%d bytes): %s\n" 793 - "Manufacturer (%d bytes): %s\n" 794 - "Model number (%d bytes): %s\n" 795 - "Serial number (%d bytes): %s\n" 796 - "Primary device type: \n" 797 - " Category: %d \n" 798 - " OUI: %02x:%02x:%02x \n" 799 - " OUI Subdivision: %u \n", 800 - (int)strlen(info->name), info->name, 801 - (int)strlen(info->model_name), info->model_name, 802 - (int)strlen(info->manufacturer), info->manufacturer, 803 - (int)strlen(info->model_nr), info->model_nr, 804 - (int)strlen(info->serial), info->serial, 805 - info->prim_dev_type.category, 806 - info->prim_dev_type.OUI[0], info->prim_dev_type.OUI[1], 807 - info->prim_dev_type.OUI[2], info->prim_dev_type.OUIsubdiv); 808 850 mem_needed = sizeof(*_d2) 809 851 + sizeof(struct wlp_attr_uuid_e) 810 852 + sizeof(struct wlp_attr_uuid_r) ··· 814 892 goto error; 815 893 } 816 894 _d2 = (void *) _skb->data; 817 - d_printf(6, dev, "D2 starts at %p \n", _d2); 818 895 _d2->hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID); 819 896 _d2->hdr.type = WLP_FRAME_ASSOCIATION; 820 897 _d2->type = WLP_ASSOC_D2; ··· 838 917 used += wlp_set_prim_dev_type(d2_itr + used, &info->prim_dev_type); 839 918 used += wlp_set_wlp_assc_err(d2_itr + used, WLP_ASSOC_ERROR_NONE); 840 919 skb_put(_skb, sizeof(*_d2) + used); 841 - d_printf(6, dev, "D2 message:\n"); 842 - d_dump(6, dev, _d2, mem_needed); 843 920 *skb = _skb; 844 921 error: 845 - d_fnend(6, dev, "wlp %p, result = %d\n", wlp, result); 846 922 return result; 847 923 } 848 924 ··· 865 947 struct sk_buff *_skb; 866 948 struct wlp_nonce tmp; 867 949 868 - d_fnstart(6, dev, "wlp %p\n", wlp); 869 950 _skb = dev_alloc_skb(sizeof(*f0)); 870 951 if (_skb == NULL) { 871 952 dev_err(dev, "WLP: Unable to allocate memory for F0 " ··· 872 955 goto error_alloc; 873 956 } 874 957 f0 = (void *) _skb->data; 875 - d_printf(6, dev, "F0 starts at %p \n", f0); 876 958 f0->f0_hdr.hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID); 877 959 f0->f0_hdr.hdr.type = WLP_FRAME_ASSOCIATION; 878 960 f0->f0_hdr.type = WLP_ASSOC_F0; ··· 885 969 *skb = _skb; 886 970 result = 0; 887 971 error_alloc: 888 - d_fnend(6, dev, "wlp %p, result %d \n", wlp, result); 889 972 return result; 890 973 } 891 974 ··· 1157 1242 enum wlp_wss_sel_mthd sel_mthd = 0; 1158 1243 struct wlp_device_info dev_info; 1159 1244 enum wlp_assc_error assc_err; 1160 - char uuid[WLP_WSS_UUID_STRSIZE]; 1161 1245 struct sk_buff *resp = NULL; 1162 1246 1163 1247 /* Parse D1 frame */ 1164 - d_fnstart(6, dev, "WLP: handle D1 frame. wlp = %p, skb = %p\n", 1165 - wlp, skb); 1166 1248 mutex_lock(&wss->mutex); 1167 1249 mutex_lock(&wlp->mutex); /* to access wlp->uuid */ 1168 1250 memset(&dev_info, 0, sizeof(dev_info)); ··· 1170 1258 kfree_skb(skb); 1171 1259 goto out; 1172 1260 } 1173 - wlp_wss_uuid_print(uuid, sizeof(uuid), &uuid_e); 1174 - d_printf(6, dev, "From D1 frame:\n" 1175 - "UUID-E: %s\n" 1176 - "Selection method: %d\n" 1177 - "Device name (%d bytes): %s\n" 1178 - "Model name (%d bytes): %s\n" 1179 - "Manufacturer (%d bytes): %s\n" 1180 - "Model number (%d bytes): %s\n" 1181 - "Serial number (%d bytes): %s\n" 1182 - "Primary device type: \n" 1183 - " Category: %d \n" 1184 - " OUI: %02x:%02x:%02x \n" 1185 - " OUI Subdivision: %u \n", 1186 - uuid, sel_mthd, 1187 - (int)strlen(dev_info.name), dev_info.name, 1188 - (int)strlen(dev_info.model_name), dev_info.model_name, 1189 - (int)strlen(dev_info.manufacturer), dev_info.manufacturer, 1190 - (int)strlen(dev_info.model_nr), dev_info.model_nr, 1191 - (int)strlen(dev_info.serial), dev_info.serial, 1192 - dev_info.prim_dev_type.category, 1193 - dev_info.prim_dev_type.OUI[0], 1194 - dev_info.prim_dev_type.OUI[1], 1195 - dev_info.prim_dev_type.OUI[2], 1196 - dev_info.prim_dev_type.OUIsubdiv); 1197 1261 1198 1262 kfree_skb(skb); 1199 1263 if (!wlp_uuid_is_set(&wlp->uuid)) { ··· 1204 1316 kfree(frame_ctx); 1205 1317 mutex_unlock(&wlp->mutex); 1206 1318 mutex_unlock(&wss->mutex); 1207 - d_fnend(6, dev, "WLP: handle D1 frame. wlp = %p\n", wlp); 1208 1319 } 1209 1320 1210 1321 /** ··· 1433 1546 void *ptr = skb->data; 1434 1547 size_t len = skb->len; 1435 1548 size_t used; 1436 - char buf[WLP_WSS_UUID_STRSIZE]; 1437 1549 struct wlp_frame_assoc *assoc = ptr; 1438 1550 1439 - d_fnstart(6, dev, "wlp %p, skb %p \n", wlp, skb); 1440 1551 used = sizeof(*assoc); 1441 1552 result = wlp_get_wssid(wlp, ptr + used, wssid, len - used); 1442 1553 if (result < 0) { ··· 1457 1572 wlp_assoc_frame_str(assoc->type)); 1458 1573 goto error_parse; 1459 1574 } 1460 - wlp_wss_uuid_print(buf, sizeof(buf), wssid); 1461 - d_printf(6, dev, "WLP: parsed: WSSID %s, tag 0x%02x, virt " 1462 - "%02x:%02x:%02x:%02x:%02x:%02x \n", buf, *tag, 1463 - virt_addr->data[0], virt_addr->data[1], virt_addr->data[2], 1464 - virt_addr->data[3], virt_addr->data[4], virt_addr->data[5]); 1465 - 1466 1575 error_parse: 1467 - d_fnend(6, dev, "wlp %p, skb %p, result = %d \n", wlp, skb, result); 1468 1576 return result; 1469 1577 } 1470 1578 ··· 1478 1600 } *c; 1479 1601 struct sk_buff *_skb; 1480 1602 1481 - d_fnstart(6, dev, "wlp %p, wss %p \n", wlp, wss); 1482 1603 _skb = dev_alloc_skb(sizeof(*c)); 1483 1604 if (_skb == NULL) { 1484 1605 dev_err(dev, "WLP: Unable to allocate memory for C1/C2 " ··· 1485 1608 goto error_alloc; 1486 1609 } 1487 1610 c = (void *) _skb->data; 1488 - d_printf(6, dev, "C1/C2 starts at %p \n", c); 1489 1611 c->c_hdr.hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID); 1490 1612 c->c_hdr.hdr.type = WLP_FRAME_ASSOCIATION; 1491 1613 c->c_hdr.type = type; ··· 1492 1616 wlp_set_msg_type(&c->c_hdr.msg_type, type); 1493 1617 wlp_set_wssid(&c->wssid, &wss->wssid); 1494 1618 skb_put(_skb, sizeof(*c)); 1495 - d_printf(6, dev, "C1/C2 message:\n"); 1496 - d_dump(6, dev, c, sizeof(*c)); 1497 1619 *skb = _skb; 1498 1620 result = 0; 1499 1621 error_alloc: 1500 - d_fnend(6, dev, "wlp %p, wss %p, result %d \n", wlp, wss, result); 1501 1622 return result; 1502 1623 } 1503 1624 ··· 1533 1660 } *c; 1534 1661 struct sk_buff *_skb; 1535 1662 1536 - d_fnstart(6, dev, "wlp %p, wss %p \n", wlp, wss); 1537 1663 _skb = dev_alloc_skb(sizeof(*c)); 1538 1664 if (_skb == NULL) { 1539 1665 dev_err(dev, "WLP: Unable to allocate memory for C3/C4 " ··· 1540 1668 goto error_alloc; 1541 1669 } 1542 1670 c = (void *) _skb->data; 1543 - d_printf(6, dev, "C3/C4 starts at %p \n", c); 1544 1671 c->c_hdr.hdr.mux_hdr = cpu_to_le16(WLP_PROTOCOL_ID); 1545 1672 c->c_hdr.hdr.type = WLP_FRAME_ASSOCIATION; 1546 1673 c->c_hdr.type = type; ··· 1549 1678 wlp_set_wss_tag(&c->wss_tag, wss->tag); 1550 1679 wlp_set_wss_virt(&c->wss_virt, &wss->virtual_addr); 1551 1680 skb_put(_skb, sizeof(*c)); 1552 - d_printf(6, dev, "C3/C4 message:\n"); 1553 - d_dump(6, dev, c, sizeof(*c)); 1554 1681 *skb = _skb; 1555 1682 result = 0; 1556 1683 error_alloc: 1557 - d_fnend(6, dev, "wlp %p, wss %p, result %d \n", wlp, wss, result); 1558 1684 return result; 1559 1685 } 1560 1686 ··· 1577 1709 struct device *dev = &wlp->rc->uwb_dev.dev; \ 1578 1710 int result; \ 1579 1711 struct sk_buff *skb = NULL; \ 1580 - d_fnstart(6, dev, "wlp %p, wss %p, neighbor: %02x:%02x\n", \ 1581 - wlp, wss, dev_addr->data[1], dev_addr->data[0]); \ 1582 - d_printf(6, dev, "WLP: Constructing %s frame. \n", \ 1583 - wlp_assoc_frame_str(id)); \ 1712 + \ 1584 1713 /* Build the frame */ \ 1585 1714 result = wlp_build_assoc_##type(wlp, wss, &skb); \ 1586 1715 if (result < 0) { \ ··· 1586 1721 goto error_build_assoc; \ 1587 1722 } \ 1588 1723 /* Send the frame */ \ 1589 - d_printf(6, dev, "Transmitting %s frame to %02x:%02x \n", \ 1590 - wlp_assoc_frame_str(id), \ 1591 - dev_addr->data[1], dev_addr->data[0]); \ 1592 1724 BUG_ON(wlp->xmit_frame == NULL); \ 1593 1725 result = wlp->xmit_frame(wlp, skb, dev_addr); \ 1594 1726 if (result < 0) { \ ··· 1602 1740 /* We could try again ... */ \ 1603 1741 dev_kfree_skb_any(skb);/*we need to free if tx fails*/ \ 1604 1742 error_build_assoc: \ 1605 - d_fnend(6, dev, "wlp %p, wss %p, neighbor: %02x:%02x\n", \ 1606 - wlp, wss, dev_addr->data[1], dev_addr->data[0]); \ 1607 1743 return result; \ 1608 1744 } 1609 1745 ··· 1654 1794 struct uwb_dev_addr *src = &frame_ctx->src; 1655 1795 int result; 1656 1796 struct wlp_uuid wssid; 1657 - char buf[WLP_WSS_UUID_STRSIZE]; 1658 1797 struct sk_buff *resp = NULL; 1659 1798 1660 1799 /* Parse C1 frame */ 1661 - d_fnstart(6, dev, "WLP: handle C1 frame. wlp = %p, c1 = %p\n", 1662 - wlp, c1); 1663 1800 mutex_lock(&wss->mutex); 1664 1801 result = wlp_get_wssid(wlp, (void *)c1 + sizeof(*c1), &wssid, 1665 1802 len - sizeof(*c1)); ··· 1664 1807 dev_err(dev, "WLP: unable to obtain WSSID from C1 frame.\n"); 1665 1808 goto out; 1666 1809 } 1667 - wlp_wss_uuid_print(buf, sizeof(buf), &wssid); 1668 - d_printf(6, dev, "Received C1 frame with WSSID %s \n", buf); 1669 1810 if (!memcmp(&wssid, &wss->wssid, sizeof(wssid)) 1670 1811 && wss->state == WLP_WSS_STATE_ACTIVE) { 1671 - d_printf(6, dev, "WSSID from C1 frame is known locally " 1672 - "and is active\n"); 1673 1812 /* Construct C2 frame */ 1674 1813 result = wlp_build_assoc_c2(wlp, wss, &resp); 1675 1814 if (result < 0) { ··· 1673 1820 goto out; 1674 1821 } 1675 1822 } else { 1676 - d_printf(6, dev, "WSSID from C1 frame is not known locally " 1677 - "or is not active\n"); 1678 1823 /* Construct F0 frame */ 1679 1824 result = wlp_build_assoc_f0(wlp, &resp, WLP_ASSOC_ERROR_INV); 1680 1825 if (result < 0) { ··· 1681 1830 } 1682 1831 } 1683 1832 /* Send C2 frame */ 1684 - d_printf(6, dev, "Transmitting response (C2/F0) frame to %02x:%02x \n", 1685 - src->data[1], src->data[0]); 1686 1833 BUG_ON(wlp->xmit_frame == NULL); 1687 1834 result = wlp->xmit_frame(wlp, resp, src); 1688 1835 if (result < 0) { ··· 1695 1846 kfree_skb(frame_ctx->skb); 1696 1847 kfree(frame_ctx); 1697 1848 mutex_unlock(&wss->mutex); 1698 - d_fnend(6, dev, "WLP: handle C1 frame. wlp = %p\n", wlp); 1699 1849 } 1700 1850 1701 1851 /** ··· 1716 1868 struct sk_buff *skb = frame_ctx->skb; 1717 1869 struct uwb_dev_addr *src = &frame_ctx->src; 1718 1870 int result; 1719 - char buf[WLP_WSS_UUID_STRSIZE]; 1720 1871 struct sk_buff *resp = NULL; 1721 1872 struct wlp_uuid wssid; 1722 1873 u8 tag; 1723 1874 struct uwb_mac_addr virt_addr; 1724 1875 1725 1876 /* Parse C3 frame */ 1726 - d_fnstart(6, dev, "WLP: handle C3 frame. wlp = %p, skb = %p\n", 1727 - wlp, skb); 1728 1877 mutex_lock(&wss->mutex); 1729 1878 result = wlp_parse_c3c4_frame(wlp, skb, &wssid, &tag, &virt_addr); 1730 1879 if (result < 0) { 1731 1880 dev_err(dev, "WLP: unable to obtain values from C3 frame.\n"); 1732 1881 goto out; 1733 1882 } 1734 - wlp_wss_uuid_print(buf, sizeof(buf), &wssid); 1735 - d_printf(6, dev, "Received C3 frame with WSSID %s \n", buf); 1736 1883 if (!memcmp(&wssid, &wss->wssid, sizeof(wssid)) 1737 1884 && wss->state >= WLP_WSS_STATE_ACTIVE) { 1738 - d_printf(6, dev, "WSSID from C3 frame is known locally " 1739 - "and is active\n"); 1740 1885 result = wlp_eda_update_node(&wlp->eda, src, wss, 1741 1886 (void *) virt_addr.data, tag, 1742 1887 WLP_WSS_CONNECTED); ··· 1754 1913 } 1755 1914 } 1756 1915 } else { 1757 - d_printf(6, dev, "WSSID from C3 frame is not known locally " 1758 - "or is not active\n"); 1759 1916 /* Construct F0 frame */ 1760 1917 result = wlp_build_assoc_f0(wlp, &resp, WLP_ASSOC_ERROR_INV); 1761 1918 if (result < 0) { ··· 1762 1923 } 1763 1924 } 1764 1925 /* Send C4 frame */ 1765 - d_printf(6, dev, "Transmitting response (C4/F0) frame to %02x:%02x \n", 1766 - src->data[1], src->data[0]); 1767 1926 BUG_ON(wlp->xmit_frame == NULL); 1768 1927 result = wlp->xmit_frame(wlp, resp, src); 1769 1928 if (result < 0) { ··· 1776 1939 kfree_skb(frame_ctx->skb); 1777 1940 kfree(frame_ctx); 1778 1941 mutex_unlock(&wss->mutex); 1779 - d_fnend(6, dev, "WLP: handle C3 frame. wlp = %p, skb = %p\n", 1780 - wlp, skb); 1781 1942 } 1782 1943 1783 1944
+1 -1
drivers/uwb/wlp/sysfs.c
··· 23 23 * FIXME: Docs 24 24 * 25 25 */ 26 - 27 26 #include <linux/wlp.h> 27 + 28 28 #include "wlp-internal.h" 29 29 30 30 static
+8 -29
drivers/uwb/wlp/txrx.c
··· 26 26 27 27 #include <linux/etherdevice.h> 28 28 #include <linux/wlp.h> 29 - #define D_LOCAL 5 30 - #include <linux/uwb/debug.h> 29 + 31 30 #include "wlp-internal.h" 32 31 33 - 34 - /** 32 + /* 35 33 * Direct incoming association msg to correct parsing routine 36 34 * 37 35 * We only expect D1, E1, C1, C3 messages as new. All other incoming ··· 46 48 struct device *dev = &wlp->rc->uwb_dev.dev; 47 49 struct wlp_frame_assoc *assoc = (void *) skb->data; 48 50 struct wlp_assoc_frame_ctx *frame_ctx; 49 - d_fnstart(5, dev, "wlp %p, skb %p\n", wlp, skb); 51 + 50 52 frame_ctx = kmalloc(sizeof(*frame_ctx), GFP_ATOMIC); 51 53 if (frame_ctx == NULL) { 52 54 dev_err(dev, "WLP: Unable to allocate memory for association " 53 55 "frame handling.\n"); 54 56 kfree_skb(skb); 55 - goto out; 57 + return; 56 58 } 57 59 frame_ctx->wlp = wlp; 58 60 frame_ctx->skb = skb; 59 61 frame_ctx->src = *src; 60 62 switch (assoc->type) { 61 63 case WLP_ASSOC_D1: 62 - d_printf(5, dev, "Received a D1 frame.\n"); 63 64 INIT_WORK(&frame_ctx->ws, wlp_handle_d1_frame); 64 65 schedule_work(&frame_ctx->ws); 65 66 break; 66 67 case WLP_ASSOC_E1: 67 - d_printf(5, dev, "Received a E1 frame. FIXME?\n"); 68 68 kfree_skb(skb); /* Temporary until we handle it */ 69 69 kfree(frame_ctx); /* Temporary until we handle it */ 70 70 break; 71 71 case WLP_ASSOC_C1: 72 - d_printf(5, dev, "Received a C1 frame.\n"); 73 72 INIT_WORK(&frame_ctx->ws, wlp_handle_c1_frame); 74 73 schedule_work(&frame_ctx->ws); 75 74 break; 76 75 case WLP_ASSOC_C3: 77 - d_printf(5, dev, "Received a C3 frame.\n"); 78 76 INIT_WORK(&frame_ctx->ws, wlp_handle_c3_frame); 79 77 schedule_work(&frame_ctx->ws); 80 78 break; ··· 81 87 kfree(frame_ctx); 82 88 break; 83 89 } 84 - out: 85 - d_fnend(5, dev, "wlp %p\n", wlp); 86 90 } 87 91 88 - /** 92 + /* 89 93 * Process incoming association frame 90 94 * 91 95 * Although it could be possible to deal with some incoming association ··· 104 112 struct wlp_frame_assoc *assoc = (void *) skb->data; 105 113 struct wlp_session *session = wlp->session; 106 114 u8 version; 107 - d_fnstart(5, dev, "wlp %p, skb %p\n", wlp, skb); 108 115 109 116 if (wlp_get_version(wlp, &assoc->version, &version, 110 117 sizeof(assoc->version)) < 0) ··· 141 150 } else { 142 151 wlp_direct_assoc_frame(wlp, skb, src); 143 152 } 144 - d_fnend(5, dev, "wlp %p\n", wlp); 145 153 return; 146 154 error: 147 155 kfree_skb(skb); 148 - d_fnend(5, dev, "wlp %p\n", wlp); 149 156 } 150 157 151 - /** 158 + /* 152 159 * Verify incoming frame is from connected neighbor, prep to pass to WLP client 153 160 * 154 161 * Verification proceeds according to WLP 0.99 [7.3.1]. The source address ··· 165 176 struct wlp_eda_node eda_entry; 166 177 struct wlp_frame_std_abbrv_hdr *hdr = (void *) skb->data; 167 178 168 - d_fnstart(6, dev, "wlp %p, skb %p \n", wlp, skb); 169 179 /*verify*/ 170 180 result = wlp_copy_eda_node(&wlp->eda, src, &eda_entry); 171 181 if (result < 0) { ··· 195 207 /*prep*/ 196 208 skb_pull(skb, sizeof(*hdr)); 197 209 out: 198 - d_fnend(6, dev, "wlp %p, skb %p, result = %d \n", wlp, skb, result); 199 210 return result; 200 211 } 201 212 202 - /** 213 + /* 203 214 * Receive a WLP frame from device 204 215 * 205 216 * @returns: 1 if calling function should free the skb ··· 213 226 struct wlp_frame_hdr *hdr; 214 227 int result = 0; 215 228 216 - d_fnstart(6, dev, "skb (%p), len (%u)\n", skb, len); 217 229 if (len < sizeof(*hdr)) { 218 230 dev_err(dev, "Not enough data to parse WLP header.\n"); 219 231 result = -EINVAL; 220 232 goto out; 221 233 } 222 234 hdr = ptr; 223 - d_dump(6, dev, hdr, sizeof(*hdr)); 224 235 if (le16_to_cpu(hdr->mux_hdr) != WLP_PROTOCOL_ID) { 225 236 dev_err(dev, "Not a WLP frame type.\n"); 226 237 result = -EINVAL; ··· 255 270 "WLP header.\n"); 256 271 goto out; 257 272 } 258 - d_printf(5, dev, "Association frame received.\n"); 259 273 wlp_receive_assoc_frame(wlp, skb, src); 260 274 break; 261 275 default: ··· 267 283 kfree_skb(skb); 268 284 result = 0; 269 285 } 270 - d_fnend(6, dev, "skb (%p)\n", skb); 271 286 return result; 272 287 } 273 288 EXPORT_SYMBOL_GPL(wlp_receive_frame); 274 289 275 290 276 - /** 291 + /* 277 292 * Verify frame from network stack, prepare for further transmission 278 293 * 279 294 * @skb: the socket buffer that needs to be prepared for transmission (it ··· 326 343 int result = -EINVAL; 327 344 struct ethhdr *eth_hdr = (void *) skb->data; 328 345 329 - d_fnstart(6, dev, "wlp (%p), skb (%p) \n", wlp, skb); 330 346 if (is_broadcast_ether_addr(eth_hdr->h_dest)) { 331 - d_printf(6, dev, "WLP: handling broadcast frame. \n"); 332 347 result = wlp_eda_for_each(&wlp->eda, wlp_wss_send_copy, skb); 333 348 if (result < 0) { 334 349 if (printk_ratelimit()) ··· 338 357 result = 1; 339 358 /* Frame will be transmitted by WLP. */ 340 359 } else { 341 - d_printf(6, dev, "WLP: handling unicast frame. \n"); 342 360 result = wlp_eda_for_virtual(&wlp->eda, eth_hdr->h_dest, dst, 343 361 wlp_wss_prep_hdr, skb); 344 362 if (unlikely(result < 0)) { ··· 348 368 } 349 369 } 350 370 out: 351 - d_fnend(6, dev, "wlp (%p), skb (%p). result = %d \n", wlp, skb, result); 352 371 return result; 353 372 } 354 373 EXPORT_SYMBOL_GPL(wlp_prepare_tx_frame);
-4
drivers/uwb/wlp/wlp-internal.h
··· 42 42 extern struct kobj_type wss_ktype; 43 43 extern struct attribute_group wss_attr_group; 44 44 45 - extern int uwb_rc_ie_add(struct uwb_rc *, const struct uwb_ie_hdr *, size_t); 46 - extern int uwb_rc_ie_rm(struct uwb_rc *, enum uwb_ie); 47 - 48 - 49 45 /* This should be changed to a dynamic array where entries are sorted 50 46 * by eth_addr and search is done in a binary form 51 47 *
+27 -53
drivers/uwb/wlp/wlp-lc.c
··· 21 21 * 22 22 * FIXME: docs 23 23 */ 24 - 25 24 #include <linux/wlp.h> 26 - #define D_LOCAL 6 27 - #include <linux/uwb/debug.h> 28 - #include "wlp-internal.h" 29 25 26 + #include "wlp-internal.h" 30 27 31 28 static 32 29 void wlp_neighbor_init(struct wlp_neighbor_e *neighbor) ··· 58 61 static 59 62 void __wlp_fill_device_info(struct wlp *wlp) 60 63 { 61 - struct device *dev = &wlp->rc->uwb_dev.dev; 62 - 63 - BUG_ON(wlp->fill_device_info == NULL); 64 - d_printf(6, dev, "Retrieving device information " 65 - "from device driver.\n"); 66 64 wlp->fill_device_info(wlp, wlp->dev_info); 67 65 } 68 66 ··· 119 127 } 120 128 } 121 129 122 - /** 130 + /* 123 131 * Populate WLP neighborhood cache with neighbor information 124 132 * 125 133 * A new neighbor is found. If it is discoverable then we add it to the ··· 133 141 int discoverable; 134 142 struct wlp_neighbor_e *neighbor; 135 143 136 - d_fnstart(6, &dev->dev, "uwb %p \n", dev); 137 - d_printf(6, &dev->dev, "Found neighbor device %02x:%02x \n", 138 - dev->dev_addr.data[1], dev->dev_addr.data[0]); 139 - /** 144 + /* 140 145 * FIXME: 141 146 * Use contents of WLP IE found in beacon cache to determine if 142 147 * neighbor is discoverable. ··· 156 167 list_add(&neighbor->node, &wlp->neighbors); 157 168 } 158 169 error_no_mem: 159 - d_fnend(6, &dev->dev, "uwb %p, result = %d \n", dev, result); 160 170 return result; 161 171 } 162 172 ··· 243 255 dev_err(dev, "Unable to send D1 frame to neighbor " 244 256 "%02x:%02x (%d)\n", dev_addr->data[1], 245 257 dev_addr->data[0], result); 246 - d_printf(6, dev, "Add placeholders into buffer next to " 247 - "neighbor information we have (dev address).\n"); 248 258 goto out; 249 259 } 250 260 /* Create session, wait for response */ ··· 270 284 /* Parse message in session->data: it will be either D2 or F0 */ 271 285 skb = session.data; 272 286 resp = (void *) skb->data; 273 - d_printf(6, dev, "Received response to D1 frame. \n"); 274 - d_dump(6, dev, skb->data, skb->len > 72 ? 72 : skb->len); 275 287 276 288 if (resp->type == WLP_ASSOC_F0) { 277 289 result = wlp_parse_f0(wlp, skb); ··· 321 337 struct device *dev = &wlp->rc->uwb_dev.dev; 322 338 char buf[WLP_WSS_UUID_STRSIZE]; 323 339 struct uwb_dev_addr *dev_addr = &neighbor->uwb_dev->dev_addr; 340 + 324 341 wlp_wss_uuid_print(buf, sizeof(buf), wssid); 325 - d_fnstart(6, dev, "wlp %p, neighbor %p, wss %p, wssid %p (%s)\n", 326 - wlp, neighbor, wss, wssid, buf); 327 - d_printf(6, dev, "Complete me.\n"); 342 + 328 343 result = wlp_d1d2_exchange(wlp, neighbor, wss, wssid); 329 344 if (result < 0) { 330 345 dev_err(dev, "WLP: D1/D2 message exchange for enrollment " ··· 343 360 goto error; 344 361 } else { 345 362 wss->state = WLP_WSS_STATE_ENROLLED; 346 - d_printf(2, dev, "WLP: Success Enrollment into unsecure WSS " 347 - "%s using neighbor %02x:%02x. \n", buf, 348 - dev_addr->data[1], dev_addr->data[0]); 363 + dev_dbg(dev, "WLP: Success Enrollment into unsecure WSS " 364 + "%s using neighbor %02x:%02x. \n", 365 + buf, dev_addr->data[1], dev_addr->data[0]); 349 366 } 350 - 351 - d_fnend(6, dev, "wlp %p, neighbor %p, wss %p, wssid %p (%s)\n", 352 - wlp, neighbor, wss, wssid, buf); 353 367 out: 354 368 return result; 355 369 error: ··· 429 449 int result = 0; 430 450 struct device *dev = &wlp->rc->uwb_dev.dev; 431 451 432 - d_fnstart(6, dev, "wlp %p \n", wlp); 433 452 mutex_lock(&wlp->nbmutex); 434 453 /* Clear current neighborhood cache. */ 435 454 __wlp_neighbors_release(wlp); ··· 448 469 } 449 470 error_dev_for_each: 450 471 mutex_unlock(&wlp->nbmutex); 451 - d_fnend(6, dev, "wlp %p \n", wlp); 452 472 return result; 453 473 } 454 474 ··· 470 492 int result; 471 493 switch (event) { 472 494 case UWB_NOTIF_ONAIR: 473 - d_printf(6, dev, "UWB device %02x:%02x is onair\n", 474 - uwb_dev->dev_addr.data[1], 475 - uwb_dev->dev_addr.data[0]); 476 495 result = wlp_eda_create_node(&wlp->eda, 477 496 uwb_dev->mac_addr.data, 478 497 &uwb_dev->dev_addr); ··· 480 505 uwb_dev->dev_addr.data[0]); 481 506 break; 482 507 case UWB_NOTIF_OFFAIR: 483 - d_printf(6, dev, "UWB device %02x:%02x is offair\n", 484 - uwb_dev->dev_addr.data[1], 485 - uwb_dev->dev_addr.data[0]); 486 508 wlp_eda_rm_node(&wlp->eda, &uwb_dev->dev_addr); 487 509 mutex_lock(&wlp->nbmutex); 488 - list_for_each_entry_safe(neighbor, next, &wlp->neighbors, 489 - node) { 490 - if (neighbor->uwb_dev == uwb_dev) { 491 - d_printf(6, dev, "Removing device from " 492 - "neighborhood.\n"); 510 + list_for_each_entry_safe(neighbor, next, &wlp->neighbors, node) { 511 + if (neighbor->uwb_dev == uwb_dev) 493 512 __wlp_neighbor_release(neighbor); 494 - } 495 513 } 496 514 mutex_unlock(&wlp->nbmutex); 497 515 break; ··· 494 526 } 495 527 } 496 528 497 - int wlp_setup(struct wlp *wlp, struct uwb_rc *rc) 529 + static void wlp_channel_changed(struct uwb_pal *pal, int channel) 498 530 { 499 - struct device *dev = &rc->uwb_dev.dev; 531 + struct wlp *wlp = container_of(pal, struct wlp, pal); 532 + 533 + if (channel < 0) 534 + netif_carrier_off(wlp->ndev); 535 + else 536 + netif_carrier_on(wlp->ndev); 537 + } 538 + 539 + int wlp_setup(struct wlp *wlp, struct uwb_rc *rc, struct net_device *ndev) 540 + { 500 541 int result; 501 542 502 - d_fnstart(6, dev, "wlp %p\n", wlp); 503 543 BUG_ON(wlp->fill_device_info == NULL); 504 544 BUG_ON(wlp->xmit_frame == NULL); 505 545 BUG_ON(wlp->stop_queue == NULL); 506 546 BUG_ON(wlp->start_queue == NULL); 547 + 507 548 wlp->rc = rc; 549 + wlp->ndev = ndev; 508 550 wlp_eda_init(&wlp->eda);/* Set up address cache */ 509 551 wlp->uwb_notifs_handler.cb = wlp_uwb_notifs_cb; 510 552 wlp->uwb_notifs_handler.data = wlp; 511 553 uwb_notifs_register(rc, &wlp->uwb_notifs_handler); 512 554 513 555 uwb_pal_init(&wlp->pal); 514 - result = uwb_pal_register(rc, &wlp->pal); 556 + wlp->pal.rc = rc; 557 + wlp->pal.channel_changed = wlp_channel_changed; 558 + result = uwb_pal_register(&wlp->pal); 515 559 if (result < 0) 516 560 uwb_notifs_deregister(wlp->rc, &wlp->uwb_notifs_handler); 517 561 518 - d_fnend(6, dev, "wlp %p, result = %d\n", wlp, result); 519 562 return result; 520 563 } 521 564 EXPORT_SYMBOL_GPL(wlp_setup); 522 565 523 566 void wlp_remove(struct wlp *wlp) 524 567 { 525 - struct device *dev = &wlp->rc->uwb_dev.dev; 526 - d_fnstart(6, dev, "wlp %p\n", wlp); 527 568 wlp_neighbors_release(wlp); 528 - uwb_pal_unregister(wlp->rc, &wlp->pal); 569 + uwb_pal_unregister(&wlp->pal); 529 570 uwb_notifs_deregister(wlp->rc, &wlp->uwb_notifs_handler); 530 571 wlp_eda_release(&wlp->eda); 531 572 mutex_lock(&wlp->mutex); ··· 542 565 kfree(wlp->dev_info); 543 566 mutex_unlock(&wlp->mutex); 544 567 wlp->rc = NULL; 545 - /* We have to use NULL here because this function can be called 546 - * when the device disappeared. */ 547 - d_fnend(6, NULL, "wlp %p\n", wlp); 548 568 } 549 569 EXPORT_SYMBOL_GPL(wlp_remove); 550 570
+18 -112
drivers/uwb/wlp/wss-lc.c
··· 43 43 * wlp_wss_release() 44 44 * wlp_wss_reset() 45 45 */ 46 - 47 46 #include <linux/etherdevice.h> /* for is_valid_ether_addr */ 48 47 #include <linux/skbuff.h> 49 48 #include <linux/wlp.h> 50 - #define D_LOCAL 5 51 - #include <linux/uwb/debug.h> 52 - #include "wlp-internal.h" 53 49 50 + #include "wlp-internal.h" 54 51 55 52 size_t wlp_wss_key_print(char *buf, size_t bufsize, u8 *key) 56 53 { ··· 113 116 */ 114 117 void wlp_wss_reset(struct wlp_wss *wss) 115 118 { 116 - struct wlp *wlp = container_of(wss, struct wlp, wss); 117 - struct device *dev = &wlp->rc->uwb_dev.dev; 118 - d_fnstart(5, dev, "wss (%p) \n", wss); 119 119 memset(&wss->wssid, 0, sizeof(wss->wssid)); 120 120 wss->hash = 0; 121 121 memset(&wss->name[0], 0, sizeof(wss->name)); ··· 121 127 memset(&wss->master_key[0], 0, sizeof(wss->master_key)); 122 128 wss->tag = 0; 123 129 wss->state = WLP_WSS_STATE_NONE; 124 - d_fnend(5, dev, "wss (%p) \n", wss); 125 130 } 126 131 127 132 /** ··· 138 145 struct device *dev = &wlp->rc->uwb_dev.dev; 139 146 int result; 140 147 141 - d_fnstart(5, dev, "wss (%p), wssid: %s\n", wss, wssid_str); 142 148 result = kobject_set_name(&wss->kobj, "wss-%s", wssid_str); 143 149 if (result < 0) 144 150 return result; ··· 154 162 result); 155 163 goto error_sysfs_create_group; 156 164 } 157 - d_fnend(5, dev, "Completed. result = %d \n", result); 158 165 return 0; 159 166 error_sysfs_create_group: 160 167 ··· 205 214 struct wlp *wlp = container_of(wss, struct wlp, wss); 206 215 struct device *dev = &wlp->rc->uwb_dev.dev; 207 216 struct wlp_neighbor_e *neighbor; 208 - char buf[WLP_WSS_UUID_STRSIZE]; 209 217 int result = -ENXIO; 210 218 struct uwb_dev_addr *dev_addr; 211 219 212 - wlp_wss_uuid_print(buf, sizeof(buf), wssid); 213 - d_fnstart(5, dev, "wss %p, wssid %s, registrar %02x:%02x \n", 214 - wss, buf, dest->data[1], dest->data[0]); 215 220 mutex_lock(&wlp->nbmutex); 216 221 list_for_each_entry(neighbor, &wlp->neighbors, node) { 217 222 dev_addr = &neighbor->uwb_dev->dev_addr; 218 223 if (!memcmp(dest, dev_addr, sizeof(*dest))) { 219 - d_printf(5, dev, "Neighbor %02x:%02x is valid, " 220 - "enrolling. \n", 221 - dev_addr->data[1], dev_addr->data[0]); 222 - result = wlp_enroll_neighbor(wlp, neighbor, wss, 223 - wssid); 224 + result = wlp_enroll_neighbor(wlp, neighbor, wss, wssid); 224 225 break; 225 226 } 226 227 } ··· 220 237 dev_err(dev, "WLP: Cannot find neighbor %02x:%02x. \n", 221 238 dest->data[1], dest->data[0]); 222 239 mutex_unlock(&wlp->nbmutex); 223 - d_fnend(5, dev, "wss %p, wssid %s, registrar %02x:%02x, result %d \n", 224 - wss, buf, dest->data[1], dest->data[0], result); 225 240 return result; 226 241 } 227 242 ··· 241 260 char buf[WLP_WSS_UUID_STRSIZE]; 242 261 int result = -ENXIO; 243 262 244 - wlp_wss_uuid_print(buf, sizeof(buf), wssid); 245 - d_fnstart(5, dev, "wss %p, wssid %s \n", wss, buf); 263 + 246 264 mutex_lock(&wlp->nbmutex); 247 265 list_for_each_entry(neighbor, &wlp->neighbors, node) { 248 266 list_for_each_entry(wssid_e, &neighbor->wssid, node) { 249 267 if (!memcmp(wssid, &wssid_e->wssid, sizeof(*wssid))) { 250 - d_printf(5, dev, "Found WSSID %s in neighbor " 251 - "%02x:%02x cache. \n", buf, 252 - neighbor->uwb_dev->dev_addr.data[1], 253 - neighbor->uwb_dev->dev_addr.data[0]); 254 268 result = wlp_enroll_neighbor(wlp, neighbor, 255 269 wss, wssid); 256 270 if (result == 0) /* enrollment success */ ··· 255 279 } 256 280 } 257 281 out: 258 - if (result == -ENXIO) 282 + if (result == -ENXIO) { 283 + wlp_wss_uuid_print(buf, sizeof(buf), wssid); 259 284 dev_err(dev, "WLP: Cannot find WSSID %s in cache. \n", buf); 285 + } 260 286 mutex_unlock(&wlp->nbmutex); 261 - d_fnend(5, dev, "wss %p, wssid %s, result %d \n", wss, buf, result); 262 287 return result; 263 288 } 264 289 ··· 284 307 struct uwb_dev_addr bcast = {.data = {0xff, 0xff} }; 285 308 286 309 wlp_wss_uuid_print(buf, sizeof(buf), wssid); 310 + 287 311 if (wss->state != WLP_WSS_STATE_NONE) { 288 312 dev_err(dev, "WLP: Already enrolled in WSS %s.\n", buf); 289 313 result = -EEXIST; 290 314 goto error; 291 315 } 292 - if (!memcmp(&bcast, devaddr, sizeof(bcast))) { 293 - d_printf(5, dev, "Request to enroll in discovered WSS " 294 - "with WSSID %s \n", buf); 316 + if (!memcmp(&bcast, devaddr, sizeof(bcast))) 295 317 result = wlp_wss_enroll_discovered(wss, wssid); 296 - } else { 297 - d_printf(5, dev, "Request to enroll in WSSID %s with " 298 - "registrar %02x:%02x\n", buf, devaddr->data[1], 299 - devaddr->data[0]); 318 + else 300 319 result = wlp_wss_enroll_target(wss, wssid, devaddr); 301 - } 302 320 if (result < 0) { 303 321 dev_err(dev, "WLP: Unable to enroll into WSS %s, result %d \n", 304 322 buf, result); 305 323 goto error; 306 324 } 307 - d_printf(2, dev, "Successfully enrolled into WSS %s \n", buf); 325 + dev_dbg(dev, "Successfully enrolled into WSS %s \n", buf); 308 326 result = wlp_wss_sysfs_add(wss, buf); 309 327 if (result < 0) { 310 328 dev_err(dev, "WLP: Unable to set up sysfs for WSS kobject.\n"); ··· 335 363 u8 hash; /* only include one hash */ 336 364 } ie_data; 337 365 338 - d_fnstart(5, dev, "Activating WSS %p. \n", wss); 339 366 BUG_ON(wss->state != WLP_WSS_STATE_ENROLLED); 340 367 wss->hash = wlp_wss_comp_wssid_hash(&wss->wssid); 341 368 wss->tag = wss->hash; ··· 353 382 wss->state = WLP_WSS_STATE_ACTIVE; 354 383 result = 0; 355 384 error_wlp_ie: 356 - d_fnend(5, dev, "Activating WSS %p, result = %d \n", wss, result); 357 385 return result; 358 386 } 359 387 ··· 375 405 int result = 0; 376 406 char buf[WLP_WSS_UUID_STRSIZE]; 377 407 378 - d_fnstart(5, dev, "Enrollment and activation requested. \n"); 379 408 mutex_lock(&wss->mutex); 380 409 result = wlp_wss_enroll(wss, wssid, devaddr); 381 410 if (result < 0) { ··· 393 424 error_activate: 394 425 error_enroll: 395 426 mutex_unlock(&wss->mutex); 396 - d_fnend(5, dev, "Completed. result = %d \n", result); 397 427 return result; 398 428 } 399 429 ··· 415 447 struct device *dev = &wlp->rc->uwb_dev.dev; 416 448 int result = 0; 417 449 char buf[WLP_WSS_UUID_STRSIZE]; 418 - d_fnstart(5, dev, "Request to create new WSS.\n"); 450 + 419 451 result = wlp_wss_uuid_print(buf, sizeof(buf), wssid); 420 - d_printf(5, dev, "Request to create WSS: WSSID=%s, name=%s, " 421 - "sec_status=%u, accepting enrollment=%u \n", 422 - buf, name, sec_status, accept); 452 + 423 453 if (!mutex_trylock(&wss->mutex)) { 424 454 dev_err(dev, "WLP: WLP association session in progress.\n"); 425 455 return -EBUSY; ··· 464 498 result = 0; 465 499 out: 466 500 mutex_unlock(&wss->mutex); 467 - d_fnend(5, dev, "Completed. result = %d \n", result); 468 501 return result; 469 502 } 470 503 ··· 485 520 { 486 521 int result = 0; 487 522 struct device *dev = &wlp->rc->uwb_dev.dev; 488 - char buf[WLP_WSS_UUID_STRSIZE]; 489 523 DECLARE_COMPLETION_ONSTACK(completion); 490 524 struct wlp_session session; 491 525 struct sk_buff *skb; 492 526 struct wlp_frame_assoc *resp; 493 527 struct wlp_uuid wssid; 494 528 495 - wlp_wss_uuid_print(buf, sizeof(buf), &wss->wssid); 496 - d_fnstart(5, dev, "wlp %p, wss %p (wssid %s), neighbor %02x:%02x \n", 497 - wlp, wss, buf, dev_addr->data[1], dev_addr->data[0]); 498 529 mutex_lock(&wlp->mutex); 499 530 /* Send C1 association frame */ 500 531 result = wlp_send_assoc_frame(wlp, wss, dev_addr, WLP_ASSOC_C1); ··· 526 565 /* Parse message in session->data: it will be either C2 or F0 */ 527 566 skb = session.data; 528 567 resp = (void *) skb->data; 529 - d_printf(5, dev, "Received response to C1 frame. \n"); 530 - d_dump(5, dev, skb->data, skb->len > 72 ? 72 : skb->len); 531 568 if (resp->type == WLP_ASSOC_F0) { 532 569 result = wlp_parse_f0(wlp, skb); 533 570 if (result < 0) ··· 543 584 result = 0; 544 585 goto error_resp_parse; 545 586 } 546 - if (!memcmp(&wssid, &wss->wssid, sizeof(wssid))) { 547 - d_printf(5, dev, "WSSID in C2 frame matches local " 548 - "active WSS.\n"); 587 + if (!memcmp(&wssid, &wss->wssid, sizeof(wssid))) 549 588 result = 1; 550 - } else { 589 + else { 551 590 dev_err(dev, "WLP: Received a C2 frame without matching " 552 591 "WSSID.\n"); 553 592 result = 0; ··· 555 598 out: 556 599 wlp->session = NULL; 557 600 mutex_unlock(&wlp->mutex); 558 - d_fnend(5, dev, "wlp %p, wss %p (wssid %s), neighbor %02x:%02x \n", 559 - wlp, wss, buf, dev_addr->data[1], dev_addr->data[0]); 560 601 return result; 561 602 } 562 603 ··· 575 620 { 576 621 struct device *dev = &wlp->rc->uwb_dev.dev; 577 622 int result = 0; 578 - char buf[WLP_WSS_UUID_STRSIZE]; 579 - wlp_wss_uuid_print(buf, sizeof(buf), wssid); 580 - d_fnstart(5, dev, "wlp %p, wss %p, wssid %s, tag %u, virtual " 581 - "%02x:%02x:%02x:%02x:%02x:%02x \n", wlp, wss, buf, *tag, 582 - virt_addr->data[0], virt_addr->data[1], virt_addr->data[2], 583 - virt_addr->data[3], virt_addr->data[4], virt_addr->data[5]); 584 623 585 624 if (!memcmp(wssid, &wss->wssid, sizeof(*wssid))) { 586 - d_printf(5, dev, "WSSID from neighbor frame matches local " 587 - "active WSS.\n"); 588 625 /* Update EDA cache */ 589 626 result = wlp_eda_update_node(&wlp->eda, dev_addr, wss, 590 627 (void *) virt_addr->data, *tag, ··· 585 638 dev_err(dev, "WLP: Unable to update EDA cache " 586 639 "with new connected neighbor information.\n"); 587 640 } else { 588 - dev_err(dev, "WLP: Neighbor does not have matching " 589 - "WSSID.\n"); 641 + dev_err(dev, "WLP: Neighbor does not have matching WSSID.\n"); 590 642 result = -EINVAL; 591 643 } 592 - 593 - d_fnend(5, dev, "wlp %p, wss %p, wssid %s, tag %u, virtual " 594 - "%02x:%02x:%02x:%02x:%02x:%02x, result = %d \n", 595 - wlp, wss, buf, *tag, 596 - virt_addr->data[0], virt_addr->data[1], virt_addr->data[2], 597 - virt_addr->data[3], virt_addr->data[4], virt_addr->data[5], 598 - result); 599 - 600 644 return result; 601 645 } 602 646 ··· 603 665 { 604 666 int result; 605 667 struct device *dev = &wlp->rc->uwb_dev.dev; 606 - char buf[WLP_WSS_UUID_STRSIZE]; 607 668 struct wlp_uuid wssid; 608 669 u8 tag; 609 670 struct uwb_mac_addr virt_addr; ··· 611 674 struct wlp_frame_assoc *resp; 612 675 struct sk_buff *skb; 613 676 614 - wlp_wss_uuid_print(buf, sizeof(buf), &wss->wssid); 615 - d_fnstart(5, dev, "wlp %p, wss %p (wssid %s), neighbor %02x:%02x \n", 616 - wlp, wss, buf, dev_addr->data[1], dev_addr->data[0]); 617 677 mutex_lock(&wlp->mutex); 618 678 /* Send C3 association frame */ 619 679 result = wlp_send_assoc_frame(wlp, wss, dev_addr, WLP_ASSOC_C3); ··· 645 711 /* Parse message in session->data: it will be either C4 or F0 */ 646 712 skb = session.data; 647 713 resp = (void *) skb->data; 648 - d_printf(5, dev, "Received response to C3 frame. \n"); 649 - d_dump(5, dev, skb->data, skb->len > 72 ? 72 : skb->len); 650 714 if (resp->type == WLP_ASSOC_F0) { 651 715 result = wlp_parse_f0(wlp, skb); 652 716 if (result < 0) ··· 676 744 WLP_WSS_CONNECT_FAILED); 677 745 wlp->session = NULL; 678 746 mutex_unlock(&wlp->mutex); 679 - d_fnend(5, dev, "wlp %p, wss %p (wssid %s), neighbor %02x:%02x \n", 680 - wlp, wss, buf, dev_addr->data[1], dev_addr->data[0]); 681 747 return result; 682 748 } 683 749 ··· 710 780 struct wlp_wss *wss = &wlp->wss; 711 781 int result; 712 782 struct device *dev = &wlp->rc->uwb_dev.dev; 713 - char buf[WLP_WSS_UUID_STRSIZE]; 714 783 715 784 mutex_lock(&wss->mutex); 716 - wlp_wss_uuid_print(buf, sizeof(buf), &wss->wssid); 717 - d_fnstart(5, dev, "wlp %p, wss %p (wssid %s), neighbor %02x:%02x \n", 718 - wlp, wss, buf, dev_addr->data[1], dev_addr->data[0]); 719 785 if (wss->state < WLP_WSS_STATE_ACTIVE) { 720 786 if (printk_ratelimit()) 721 787 dev_err(dev, "WLP: Attempting to connect with " ··· 762 836 BUG_ON(wlp->start_queue == NULL); 763 837 wlp->start_queue(wlp); 764 838 mutex_unlock(&wss->mutex); 765 - d_fnend(5, dev, "wlp %p, wss %p (wssid %s)\n", wlp, wss, buf); 766 839 } 767 840 768 841 /** ··· 780 855 struct sk_buff *skb = _skb; 781 856 struct wlp_frame_std_abbrv_hdr *std_hdr; 782 857 783 - d_fnstart(6, dev, "wlp %p \n", wlp); 784 858 if (eda_entry->state == WLP_WSS_CONNECTED) { 785 859 /* Add WLP header */ 786 860 BUG_ON(skb_headroom(skb) < sizeof(*std_hdr)); ··· 797 873 dev_addr->data[0]); 798 874 result = -EINVAL; 799 875 } 800 - d_fnend(6, dev, "wlp %p \n", wlp); 801 876 return result; 802 877 } 803 878 ··· 816 893 { 817 894 int result = 0; 818 895 struct device *dev = &wlp->rc->uwb_dev.dev; 819 - struct uwb_dev_addr *dev_addr = &eda_entry->dev_addr; 820 - unsigned char *eth_addr = eda_entry->eth_addr; 821 896 struct sk_buff *skb = _skb; 822 897 struct wlp_assoc_conn_ctx *conn_ctx; 823 898 824 - d_fnstart(5, dev, "wlp %p\n", wlp); 825 - d_printf(5, dev, "To neighbor %02x:%02x with eth " 826 - "%02x:%02x:%02x:%02x:%02x:%02x\n", dev_addr->data[1], 827 - dev_addr->data[0], eth_addr[0], eth_addr[1], eth_addr[2], 828 - eth_addr[3], eth_addr[4], eth_addr[5]); 829 899 if (eda_entry->state == WLP_WSS_UNCONNECTED) { 830 900 /* We don't want any more packets while we set up connection */ 831 901 BUG_ON(wlp->stop_queue == NULL); ··· 845 929 "previously. Not retrying. \n"); 846 930 result = -ENONET; 847 931 goto out; 848 - } else { /* eda_entry->state == WLP_WSS_CONNECTED */ 849 - d_printf(5, dev, "Neighbor is connected, preparing frame.\n"); 932 + } else /* eda_entry->state == WLP_WSS_CONNECTED */ 850 933 result = wlp_wss_prep_hdr(wlp, eda_entry, skb); 851 - } 852 934 out: 853 - d_fnend(5, dev, "wlp %p, result = %d \n", wlp, result); 854 935 return result; 855 936 } 856 937 ··· 870 957 struct sk_buff *copy; 871 958 struct uwb_dev_addr *dev_addr = &eda_entry->dev_addr; 872 959 873 - d_fnstart(5, dev, "to neighbor %02x:%02x, skb (%p) \n", 874 - dev_addr->data[1], dev_addr->data[0], skb); 875 960 copy = skb_copy(skb, GFP_ATOMIC); 876 961 if (copy == NULL) { 877 962 if (printk_ratelimit()) ··· 899 988 dev_kfree_skb_irq(copy);/*we need to free if tx fails */ 900 989 } 901 990 out: 902 - d_fnend(5, dev, "to neighbor %02x:%02x \n", dev_addr->data[1], 903 - dev_addr->data[0]); 904 991 return result; 905 992 } 906 993 ··· 914 1005 struct wlp *wlp = container_of(wss, struct wlp, wss); 915 1006 struct device *dev = &wlp->rc->uwb_dev.dev; 916 1007 int result = 0; 917 - d_fnstart(5, dev, "wss (%p) \n", wss); 1008 + 918 1009 mutex_lock(&wss->mutex); 919 1010 wss->kobj.parent = &net_dev->dev.kobj; 920 1011 if (!is_valid_ether_addr(net_dev->dev_addr)) { ··· 927 1018 sizeof(wss->virtual_addr.data)); 928 1019 out: 929 1020 mutex_unlock(&wss->mutex); 930 - d_fnend(5, dev, "wss (%p) \n", wss); 931 1021 return result; 932 1022 } 933 1023 EXPORT_SYMBOL_GPL(wlp_wss_setup); ··· 943 1035 void wlp_wss_remove(struct wlp_wss *wss) 944 1036 { 945 1037 struct wlp *wlp = container_of(wss, struct wlp, wss); 946 - struct device *dev = &wlp->rc->uwb_dev.dev; 947 - d_fnstart(5, dev, "wss (%p) \n", wss); 1038 + 948 1039 mutex_lock(&wss->mutex); 949 1040 if (wss->state == WLP_WSS_STATE_ACTIVE) 950 1041 uwb_rc_ie_rm(wlp->rc, UWB_IE_WLP); ··· 957 1050 wlp_eda_release(&wlp->eda); 958 1051 wlp_eda_init(&wlp->eda); 959 1052 mutex_unlock(&wss->mutex); 960 - d_fnend(5, dev, "wss (%p) \n", wss); 961 1053 } 962 1054 EXPORT_SYMBOL_GPL(wlp_wss_remove);
+1
include/linux/usb/wusb-wa.h
··· 51 51 WUSB_REQ_GET_TIME = 25, 52 52 WUSB_REQ_SET_STREAM_IDX = 26, 53 53 WUSB_REQ_SET_WUSB_MAS = 27, 54 + WUSB_REQ_CHAN_STOP = 28, 54 55 }; 55 56 56 57
+94 -29
include/linux/uwb.h
··· 30 30 #include <linux/device.h> 31 31 #include <linux/mutex.h> 32 32 #include <linux/timer.h> 33 + #include <linux/wait.h> 33 34 #include <linux/workqueue.h> 34 35 #include <linux/uwb/spec.h> 35 36 ··· 67 66 struct uwb_dev_addr dev_addr; 68 67 int beacon_slot; 69 68 DECLARE_BITMAP(streams, UWB_NUM_STREAMS); 69 + DECLARE_BITMAP(last_availability_bm, UWB_NUM_MAS); 70 70 }; 71 71 #define to_uwb_dev(d) container_of(d, struct uwb_dev, dev) 72 72 ··· 88 86 struct mutex mutex; 89 87 }; 90 88 89 + /* Beacon cache list */ 90 + struct uwb_beca { 91 + struct list_head list; 92 + size_t entries; 93 + struct mutex mutex; 94 + }; 95 + 96 + /* Event handling thread. */ 97 + struct uwbd { 98 + int pid; 99 + struct task_struct *task; 100 + wait_queue_head_t wq; 101 + struct list_head event_list; 102 + spinlock_t event_list_lock; 103 + }; 104 + 91 105 /** 92 106 * struct uwb_mas_bm - a bitmap of all MAS in a superframe 93 107 * @bm: a bitmap of length #UWB_NUM_MAS 94 108 */ 95 109 struct uwb_mas_bm { 96 110 DECLARE_BITMAP(bm, UWB_NUM_MAS); 111 + DECLARE_BITMAP(unsafe_bm, UWB_NUM_MAS); 112 + int safe; 113 + int unsafe; 97 114 }; 98 115 99 116 /** ··· 138 117 * FIXME: further target states TBD. 139 118 */ 140 119 enum uwb_rsv_state { 141 - UWB_RSV_STATE_NONE, 120 + UWB_RSV_STATE_NONE = 0, 142 121 UWB_RSV_STATE_O_INITIATED, 143 122 UWB_RSV_STATE_O_PENDING, 144 123 UWB_RSV_STATE_O_MODIFIED, 145 124 UWB_RSV_STATE_O_ESTABLISHED, 125 + UWB_RSV_STATE_O_TO_BE_MOVED, 126 + UWB_RSV_STATE_O_MOVE_EXPANDING, 127 + UWB_RSV_STATE_O_MOVE_COMBINING, 128 + UWB_RSV_STATE_O_MOVE_REDUCING, 146 129 UWB_RSV_STATE_T_ACCEPTED, 147 130 UWB_RSV_STATE_T_DENIED, 131 + UWB_RSV_STATE_T_CONFLICT, 148 132 UWB_RSV_STATE_T_PENDING, 133 + UWB_RSV_STATE_T_EXPANDING_ACCEPTED, 134 + UWB_RSV_STATE_T_EXPANDING_CONFLICT, 135 + UWB_RSV_STATE_T_EXPANDING_PENDING, 136 + UWB_RSV_STATE_T_EXPANDING_DENIED, 137 + UWB_RSV_STATE_T_RESIZED, 149 138 150 139 UWB_RSV_STATE_LAST, 151 140 }; ··· 178 147 struct uwb_dev *dev; 179 148 struct uwb_dev_addr devaddr; 180 149 }; 150 + }; 151 + 152 + struct uwb_rsv_move { 153 + struct uwb_mas_bm final_mas; 154 + struct uwb_ie_drp *companion_drp_ie; 155 + struct uwb_mas_bm companion_mas; 181 156 }; 182 157 183 158 /* ··· 223 186 * 224 187 * @status: negotiation status 225 188 * @stream: stream index allocated for this reservation 189 + * @tiebreaker: conflict tiebreaker for this reservation 226 190 * @mas: reserved MAS 227 191 * @drp_ie: the DRP IE 228 192 * @ie_valid: true iff the DRP IE matches the reservation parameters ··· 239 201 struct uwb_rc *rc; 240 202 struct list_head rc_node; 241 203 struct list_head pal_node; 204 + struct kref kref; 242 205 243 206 struct uwb_dev *owner; 244 207 struct uwb_rsv_target target; 245 208 enum uwb_drp_type type; 246 209 int max_mas; 247 210 int min_mas; 248 - int sparsity; 211 + int max_interval; 249 212 bool is_multicast; 250 213 251 214 uwb_rsv_cb_f callback; 252 215 void *pal_priv; 253 216 254 217 enum uwb_rsv_state state; 218 + bool needs_release_companion_mas; 255 219 u8 stream; 220 + u8 tiebreaker; 256 221 struct uwb_mas_bm mas; 257 222 struct uwb_ie_drp *drp_ie; 223 + struct uwb_rsv_move mv; 258 224 bool ie_valid; 259 225 struct timer_list timer; 260 - bool expired; 226 + struct work_struct handle_timeout_work; 261 227 }; 262 228 263 229 static const ··· 303 261 bool ie_valid; 304 262 }; 305 263 264 + struct uwb_drp_backoff_win { 265 + u8 window; 266 + u8 n; 267 + int total_expired; 268 + struct timer_list timer; 269 + bool can_reserve_extra_mases; 270 + }; 306 271 307 272 const char *uwb_rsv_state_str(enum uwb_rsv_state state); 308 273 const char *uwb_rsv_type_str(enum uwb_drp_type type); ··· 324 275 void uwb_rsv_terminate(struct uwb_rsv *rsv); 325 276 326 277 void uwb_rsv_accept(struct uwb_rsv *rsv, uwb_rsv_cb_f cb, void *pal_priv); 278 + 279 + void uwb_rsv_get_usable_mas(struct uwb_rsv *orig_rsv, struct uwb_mas_bm *mas); 327 280 328 281 /** 329 282 * Radio Control Interface instance ··· 388 337 u8 ctx_roll; 389 338 390 339 int beaconing; /* Beaconing state [channel number] */ 340 + int beaconing_forced; 391 341 int scanning; 392 342 enum uwb_scan_type scan_type:3; 393 343 unsigned ready:1; 394 344 struct uwb_notifs_chain notifs_chain; 345 + struct uwb_beca uwb_beca; 395 346 347 + struct uwbd uwbd; 348 + 349 + struct uwb_drp_backoff_win bow; 396 350 struct uwb_drp_avail drp_avail; 397 351 struct list_head reservations; 352 + struct list_head cnflt_alien_list; 353 + struct uwb_mas_bm cnflt_alien_bitmap; 398 354 struct mutex rsvs_mutex; 355 + spinlock_t rsvs_lock; 399 356 struct workqueue_struct *rsv_workq; 400 - struct work_struct rsv_update_work; 401 357 358 + struct delayed_work rsv_update_work; 359 + struct delayed_work rsv_alien_bp_work; 360 + int set_drp_ie_pending; 402 361 struct mutex ies_mutex; 403 362 struct uwb_rc_cmd_set_ie *ies; 404 363 size_t ies_capacity; 405 364 406 - spinlock_t pal_lock; 407 365 struct list_head pals; 366 + int active_pals; 408 367 409 368 struct uwb_dbg *dbg; 410 369 }; ··· 422 361 423 362 /** 424 363 * struct uwb_pal - a UWB PAL 425 - * @name: descriptive name for this PAL (wushc, wlp, etc.). 364 + * @name: descriptive name for this PAL (wusbhc, wlp, etc.). 426 365 * @device: a device for the PAL. Used to link the PAL and the radio 427 366 * controller in sysfs. 367 + * @rc: the radio controller the PAL uses. 368 + * @channel_changed: called when the channel used by the radio changes. 369 + * A channel of -1 means the channel has been stopped. 428 370 * @new_rsv: called when a peer requests a reservation (may be NULL if 429 371 * the PAL cannot accept reservation requests). 372 + * @channel: channel being used by the PAL; 0 if the PAL isn't using 373 + * the radio; -1 if the PAL wishes to use the radio but 374 + * cannot. 375 + * @debugfs_dir: a debugfs directory which the PAL can use for its own 376 + * debugfs files. 430 377 * 431 378 * A Protocol Adaptation Layer (PAL) is a user of the WiMedia UWB 432 379 * radio platform (e.g., WUSB, WLP or Bluetooth UWB AMP). ··· 453 384 struct list_head node; 454 385 const char *name; 455 386 struct device *device; 456 - void (*new_rsv)(struct uwb_rsv *rsv); 387 + struct uwb_rc *rc; 388 + 389 + void (*channel_changed)(struct uwb_pal *pal, int channel); 390 + void (*new_rsv)(struct uwb_pal *pal, struct uwb_rsv *rsv); 391 + 392 + int channel; 393 + struct dentry *debugfs_dir; 457 394 }; 458 395 459 396 void uwb_pal_init(struct uwb_pal *pal); 460 - int uwb_pal_register(struct uwb_rc *rc, struct uwb_pal *pal); 461 - void uwb_pal_unregister(struct uwb_rc *rc, struct uwb_pal *pal); 397 + int uwb_pal_register(struct uwb_pal *pal); 398 + void uwb_pal_unregister(struct uwb_pal *pal); 399 + 400 + int uwb_radio_start(struct uwb_pal *pal); 401 + void uwb_radio_stop(struct uwb_pal *pal); 462 402 463 403 /* 464 404 * General public API ··· 521 443 struct uwb_rccb *cmd, size_t cmd_size, 522 444 u8 expected_type, u16 expected_event, 523 445 struct uwb_rceb **preply); 524 - ssize_t uwb_rc_get_ie(struct uwb_rc *, struct uwb_rc_evt_get_ie **); 525 - int uwb_bg_joined(struct uwb_rc *rc); 526 446 527 447 size_t __uwb_addr_print(char *, size_t, const unsigned char *, int); 528 448 ··· 596 520 void uwb_rc_neh_grok(struct uwb_rc *, void *, size_t); 597 521 void uwb_rc_neh_error(struct uwb_rc *, int); 598 522 void uwb_rc_reset_all(struct uwb_rc *rc); 523 + void uwb_rc_pre_reset(struct uwb_rc *rc); 524 + void uwb_rc_post_reset(struct uwb_rc *rc); 599 525 600 526 /** 601 527 * uwb_rsv_is_owner - is the owner of this reservation the RC? ··· 609 531 } 610 532 611 533 /** 612 - * Events generated by UWB that can be passed to any listeners 534 + * enum uwb_notifs - UWB events that can be passed to any listeners 535 + * @UWB_NOTIF_ONAIR: a new neighbour has joined the beacon group. 536 + * @UWB_NOTIF_OFFAIR: a neighbour has left the beacon group. 613 537 * 614 538 * Higher layers can register callback functions with the radio 615 539 * controller using uwb_notifs_register(). The radio controller ··· 619 539 * nodes when an event occurs. 620 540 */ 621 541 enum uwb_notifs { 622 - UWB_NOTIF_BG_JOIN = 0, /* radio controller joined a beacon group */ 623 - UWB_NOTIF_BG_LEAVE = 1, /* radio controller left a beacon group */ 624 542 UWB_NOTIF_ONAIR, 625 543 UWB_NOTIF_OFFAIR, 626 544 }; ··· 730 652 731 653 /* Information Element handling */ 732 654 733 - /* For representing the state of writing to a buffer when iterating */ 734 - struct uwb_buf_ctx { 735 - char *buf; 736 - size_t bytes, size; 737 - }; 738 - 739 - typedef int (*uwb_ie_f)(struct uwb_dev *, const struct uwb_ie_hdr *, 740 - size_t, void *); 741 655 struct uwb_ie_hdr *uwb_ie_next(void **ptr, size_t *len); 742 - ssize_t uwb_ie_for_each(struct uwb_dev *uwb_dev, uwb_ie_f fn, void *data, 743 - const void *buf, size_t size); 744 - int uwb_ie_dump_hex(struct uwb_dev *, const struct uwb_ie_hdr *, 745 - size_t, void *); 746 - int uwb_rc_set_ie(struct uwb_rc *, struct uwb_rc_cmd_set_ie *); 747 - struct uwb_ie_hdr *uwb_ie_next(void **ptr, size_t *len); 748 - 656 + int uwb_rc_ie_add(struct uwb_rc *uwb_rc, const struct uwb_ie_hdr *ies, size_t size); 657 + int uwb_rc_ie_rm(struct uwb_rc *uwb_rc, enum uwb_ie element_id); 749 658 750 659 /* 751 660 * Transmission statistics
+12 -1
include/linux/uwb/debug-cmd.h
··· 32 32 enum uwb_dbg_cmd_type { 33 33 UWB_DBG_CMD_RSV_ESTABLISH = 1, 34 34 UWB_DBG_CMD_RSV_TERMINATE = 2, 35 + UWB_DBG_CMD_IE_ADD = 3, 36 + UWB_DBG_CMD_IE_RM = 4, 37 + UWB_DBG_CMD_RADIO_START = 5, 38 + UWB_DBG_CMD_RADIO_STOP = 6, 35 39 }; 36 40 37 41 struct uwb_dbg_cmd_rsv_establish { ··· 43 39 __u8 type; 44 40 __u16 max_mas; 45 41 __u16 min_mas; 46 - __u8 sparsity; 42 + __u8 max_interval; 47 43 }; 48 44 49 45 struct uwb_dbg_cmd_rsv_terminate { 50 46 int index; 47 + }; 48 + 49 + struct uwb_dbg_cmd_ie { 50 + __u8 data[128]; 51 + int len; 51 52 }; 52 53 53 54 struct uwb_dbg_cmd { ··· 60 51 union { 61 52 struct uwb_dbg_cmd_rsv_establish rsv_establish; 62 53 struct uwb_dbg_cmd_rsv_terminate rsv_terminate; 54 + struct uwb_dbg_cmd_ie ie_add; 55 + struct uwb_dbg_cmd_ie ie_rm; 63 56 }; 64 57 }; 65 58
-82
include/linux/uwb/debug.h
··· 1 - /* 2 - * Ultra Wide Band 3 - * Debug Support 4 - * 5 - * Copyright (C) 2005-2006 Intel Corporation 6 - * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> 7 - * 8 - * This program is free software; you can redistribute it and/or 9 - * modify it under the terms of the GNU General Public License version 10 - * 2 as published by the Free Software Foundation. 11 - * 12 - * This program is distributed in the hope that it will be useful, 13 - * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 - * GNU General Public License for more details. 16 - * 17 - * You should have received a copy of the GNU General Public License 18 - * along with this program; if not, write to the Free Software 19 - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 20 - * 02110-1301, USA. 21 - * 22 - * 23 - * FIXME: doc 24 - * Invoke like: 25 - * 26 - * #define D_LOCAL 4 27 - * #include <linux/uwb/debug.h> 28 - * 29 - * At the end of your include files. 30 - */ 31 - #include <linux/types.h> 32 - 33 - struct device; 34 - extern void dump_bytes(struct device *dev, const void *_buf, size_t rsize); 35 - 36 - /* Master debug switch; !0 enables, 0 disables */ 37 - #define D_MASTER (!0) 38 - 39 - /* Local (per-file) debug switch; #define before #including */ 40 - #ifndef D_LOCAL 41 - #define D_LOCAL 0 42 - #endif 43 - 44 - #undef __d_printf 45 - #undef d_fnstart 46 - #undef d_fnend 47 - #undef d_printf 48 - #undef d_dump 49 - 50 - #define __d_printf(l, _tag, _dev, f, a...) \ 51 - do { \ 52 - struct device *__dev = (_dev); \ 53 - if (D_MASTER && D_LOCAL >= (l)) { \ 54 - char __head[64] = ""; \ 55 - if (_dev != NULL) { \ 56 - if ((unsigned long)__dev < 4096) \ 57 - printk(KERN_ERR "E: Corrupt dev %p\n", \ 58 - __dev); \ 59 - else \ 60 - snprintf(__head, sizeof(__head), \ 61 - "%s %s: ", \ 62 - dev_driver_string(__dev), \ 63 - __dev->bus_id); \ 64 - } \ 65 - printk(KERN_ERR "%s%s" _tag ": " f, __head, \ 66 - __func__, ## a); \ 67 - } \ 68 - } while (0 && _dev) 69 - 70 - #define d_fnstart(l, _dev, f, a...) \ 71 - __d_printf(l, " FNSTART", _dev, f, ## a) 72 - #define d_fnend(l, _dev, f, a...) \ 73 - __d_printf(l, " FNEND", _dev, f, ## a) 74 - #define d_printf(l, _dev, f, a...) \ 75 - __d_printf(l, "", _dev, f, ## a) 76 - #define d_dump(l, _dev, ptr, size) \ 77 - do { \ 78 - struct device *__dev = _dev; \ 79 - if (D_MASTER && D_LOCAL >= (l)) \ 80 - dump_bytes(__dev, ptr, size); \ 81 - } while (0 && _dev) 82 - #define d_test(l) (D_MASTER && D_LOCAL >= (l))
+53
include/linux/uwb/spec.h
··· 59 59 #define UWB_MAS_PER_ZONE (UWB_NUM_MAS / UWB_NUM_ZONES) 60 60 61 61 /* 62 + * Number of MAS required before a row can be considered available. 63 + */ 64 + #define UWB_USABLE_MAS_PER_ROW (UWB_NUM_ZONES - 1) 65 + 66 + /* 62 67 * Number of streams per DRP reservation between a pair of devices. 63 68 * 64 69 * [ECMA-368] section 16.8.6. ··· 97 92 * [ECMA-368] section 17.16 98 93 */ 99 94 enum { UWB_MAX_LOST_BEACONS = 3 }; 95 + 96 + /* 97 + * mDRPBackOffWinMin 98 + * 99 + * The minimum number of superframes to wait before trying to reserve 100 + * extra MAS. 101 + * 102 + * [ECMA-368] section 17.16 103 + */ 104 + enum { UWB_DRP_BACKOFF_WIN_MIN = 2 }; 105 + 106 + /* 107 + * mDRPBackOffWinMax 108 + * 109 + * The maximum number of superframes to wait before trying to reserve 110 + * extra MAS. 111 + * 112 + * [ECMA-368] section 17.16 113 + */ 114 + enum { UWB_DRP_BACKOFF_WIN_MAX = 16 }; 100 115 101 116 /* 102 117 * Length of a superframe in microseconds. ··· 225 200 UWB_DRP_REASON_MODIFIED, 226 201 }; 227 202 203 + /** Relinquish Request Reason Codes ([ECMA-368] table 113) */ 204 + enum uwb_relinquish_req_reason { 205 + UWB_RELINQUISH_REQ_REASON_NON_SPECIFIC = 0, 206 + UWB_RELINQUISH_REQ_REASON_OVER_ALLOCATION, 207 + }; 208 + 228 209 /** 229 210 * DRP Notification Reason Codes (WHCI 0.95 [3.1.4.9]) 230 211 */ ··· 283 252 UWB_APP_SPEC_PROBE_IE = 15, 284 253 UWB_IDENTIFICATION_IE = 19, 285 254 UWB_MASTER_KEY_ID_IE = 20, 255 + UWB_RELINQUISH_REQUEST_IE = 21, 286 256 UWB_IE_WLP = 250, /* WiMedia Logical Link Control Protocol WLP 0.99 */ 287 257 UWB_APP_SPEC_IE = 255, 288 258 }; ··· 396 364 struct uwb_ie_hdr hdr; 397 365 DECLARE_BITMAP(bmp, UWB_NUM_MAS); 398 366 } __attribute__((packed)); 367 + 368 + /* Relinqish Request IE ([ECMA-368] section 16.8.19). */ 369 + struct uwb_relinquish_request_ie { 370 + struct uwb_ie_hdr hdr; 371 + __le16 relinquish_req_control; 372 + struct uwb_dev_addr dev_addr; 373 + struct uwb_drp_alloc allocs[]; 374 + } __attribute__((packed)); 375 + 376 + static inline int uwb_ie_relinquish_req_reason_code(struct uwb_relinquish_request_ie *ie) 377 + { 378 + return (le16_to_cpu(ie->relinquish_req_control) >> 0) & 0xf; 379 + } 380 + 381 + static inline void uwb_ie_relinquish_req_set_reason_code(struct uwb_relinquish_request_ie *ie, 382 + int reason_code) 383 + { 384 + u16 ctrl = le16_to_cpu(ie->relinquish_req_control); 385 + ctrl = (ctrl & ~(0xf << 0)) | (reason_code << 0); 386 + ie->relinquish_req_control = cpu_to_le16(ctrl); 387 + } 399 388 400 389 /** 401 390 * The Vendor ID is set to an OUI that indicates the vendor of the device.
+2
include/linux/uwb/umc.h
··· 89 89 void (*remove)(struct umc_dev *); 90 90 int (*suspend)(struct umc_dev *, pm_message_t state); 91 91 int (*resume)(struct umc_dev *); 92 + int (*pre_reset)(struct umc_dev *); 93 + int (*post_reset)(struct umc_dev *); 92 94 93 95 struct device_driver driver; 94 96 };
+2 -1
include/linux/wlp.h
··· 646 646 struct wlp { 647 647 struct mutex mutex; 648 648 struct uwb_rc *rc; /* UWB radio controller */ 649 + struct net_device *ndev; 649 650 struct uwb_pal pal; 650 651 struct wlp_eda eda; 651 652 struct wlp_uuid uuid; ··· 676 675 static struct wlp_wss_attribute wss_attr_##_name = __ATTR(_name, _mode, \ 677 676 _show, _store) 678 677 679 - extern int wlp_setup(struct wlp *, struct uwb_rc *); 678 + extern int wlp_setup(struct wlp *, struct uwb_rc *, struct net_device *ndev); 680 679 extern void wlp_remove(struct wlp *); 681 680 extern ssize_t wlp_neighborhood_show(struct wlp *, char *); 682 681 extern int wlp_wss_setup(struct net_device *, struct wlp_wss *);