Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

uwb: improved MAS allocator and reservation conflict handling

Greatly enhance the MAS allocator:
- Handle row and column reservations.
- Permit all the available MAS to be allocated.
- Follows the WiMedia rules on MAS selection.

Take appropriate action when reservation conflicts are detected.
- Correctly identify which reservation wins the conflict.
- Protect alien BP reservations.
- If an owned reservation loses, resize/move it.
- Follow the backoff procedure before requesting additional MAS.

When reservations are terminated, move the remaining reservations (if
necessary) so they keep following the MAS allocation rules.

Signed-off-by: Stefano Panella <stefano.panella@csr.com>
Signed-off-by: David Vrabel <david.vrabel@csr.com>

authored by

Stefano Panella and committed by
David Vrabel
5b37717a c35fa3ea

+1605 -329
+7 -6
drivers/usb/wusbcore/reservation.c
··· 48 48 { 49 49 struct wusbhc *wusbhc = rsv->pal_priv; 50 50 struct device *dev = wusbhc->dev; 51 + struct uwb_mas_bm mas; 51 52 char buf[72]; 52 53 53 54 switch (rsv->state) { 54 55 case UWB_RSV_STATE_O_ESTABLISHED: 55 - bitmap_scnprintf(buf, sizeof(buf), rsv->mas.bm, UWB_NUM_MAS); 56 + uwb_rsv_get_usable_mas(rsv, &mas); 57 + bitmap_scnprintf(buf, sizeof(buf), mas.bm, UWB_NUM_MAS); 56 58 dev_dbg(dev, "established reservation: %s\n", buf); 57 - wusbhc_bwa_set(wusbhc, rsv->stream, &rsv->mas); 59 + wusbhc_bwa_set(wusbhc, rsv->stream, &mas); 58 60 break; 59 61 case UWB_RSV_STATE_NONE: 60 62 dev_dbg(dev, "removed reservation\n"); ··· 87 85 bcid.data[0] = wusbhc->cluster_id; 88 86 bcid.data[1] = 0; 89 87 90 - rsv->owner = &rc->uwb_dev; 91 88 rsv->target.type = UWB_RSV_TARGET_DEVADDR; 92 89 rsv->target.devaddr = bcid; 93 90 rsv->type = UWB_DRP_TYPE_PRIVATE; 94 - rsv->max_mas = 256; 95 - rsv->min_mas = 16; /* one MAS per zone? */ 96 - rsv->sparsity = 16; /* at least one MAS in each zone? */ 91 + rsv->max_mas = 256; /* try to get as much as possible */ 92 + rsv->min_mas = 15; /* one MAS per zone */ 93 + rsv->max_interval = 1; /* max latency is one zone */ 97 94 rsv->is_multicast = true; 98 95 99 96 ret = uwb_rsv_establish(rsv);
+1
drivers/uwb/Makefile
··· 6 6 7 7 uwb-objs := \ 8 8 address.o \ 9 + allocator.o \ 9 10 beacon.o \ 10 11 driver.o \ 11 12 drp.o \
+386
drivers/uwb/allocator.c
··· 1 + /* 2 + * UWB reservation management. 3 + * 4 + * Copyright (C) 2008 Cambridge Silicon Radio Ltd. 5 + * 6 + * This program is free software; you can redistribute it and/or 7 + * modify it under the terms of the GNU General Public License version 8 + * 2 as published by the Free Software Foundation. 9 + * 10 + * This program is distributed in the hope that it will be useful, 11 + * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 + * GNU General Public License for more details. 14 + * 15 + * You should have received a copy of the GNU General Public License 16 + * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 + */ 18 + #include <linux/version.h> 19 + #include <linux/kernel.h> 20 + #include <linux/uwb.h> 21 + 22 + #include "uwb-internal.h" 23 + 24 + static void uwb_rsv_fill_column_alloc(struct uwb_rsv_alloc_info *ai) 25 + { 26 + int col, mas, safe_mas, unsafe_mas; 27 + unsigned char *bm = ai->bm; 28 + struct uwb_rsv_col_info *ci = ai->ci; 29 + unsigned char c; 30 + 31 + for (col = ci->csi.start_col; col < UWB_NUM_ZONES; col += ci->csi.interval) { 32 + 33 + safe_mas = ci->csi.safe_mas_per_col; 34 + unsafe_mas = ci->csi.unsafe_mas_per_col; 35 + 36 + for (mas = 0; mas < UWB_MAS_PER_ZONE; mas++ ) { 37 + if (bm[col * UWB_MAS_PER_ZONE + mas] == 0) { 38 + 39 + if (safe_mas > 0) { 40 + safe_mas--; 41 + c = UWB_RSV_MAS_SAFE; 42 + } else if (unsafe_mas > 0) { 43 + unsafe_mas--; 44 + c = UWB_RSV_MAS_UNSAFE; 45 + } else { 46 + break; 47 + } 48 + bm[col * UWB_MAS_PER_ZONE + mas] = c; 49 + } 50 + } 51 + } 52 + } 53 + 54 + static void uwb_rsv_fill_row_alloc(struct uwb_rsv_alloc_info *ai) 55 + { 56 + int mas, col, rows; 57 + unsigned char *bm = ai->bm; 58 + struct uwb_rsv_row_info *ri = &ai->ri; 59 + unsigned char c; 60 + 61 + rows = 1; 62 + c = UWB_RSV_MAS_SAFE; 63 + for (mas = UWB_MAS_PER_ZONE - 1; mas >= 0; mas--) { 64 + if (ri->avail[mas] == 1) { 65 + 66 + if (rows > ri->used_rows) { 67 + break; 68 + } else if (rows > 7) { 69 + c = UWB_RSV_MAS_UNSAFE; 70 + } 71 + 72 + for (col = 0; col < UWB_NUM_ZONES; col++) { 73 + if (bm[col * UWB_NUM_ZONES + mas] != UWB_RSV_MAS_NOT_AVAIL) { 74 + bm[col * UWB_NUM_ZONES + mas] = c; 75 + if(c == UWB_RSV_MAS_SAFE) 76 + ai->safe_allocated_mases++; 77 + else 78 + ai->unsafe_allocated_mases++; 79 + } 80 + } 81 + rows++; 82 + } 83 + } 84 + ai->total_allocated_mases = ai->safe_allocated_mases + ai->unsafe_allocated_mases; 85 + } 86 + 87 + /* 88 + * Find the best column set for a given availability, interval, num safe mas and 89 + * num unsafe mas. 90 + * 91 + * The different sets are tried in order as shown below, depending on the interval. 92 + * 93 + * interval = 16 94 + * deep = 0 95 + * set 1 -> { 8 } 96 + * deep = 1 97 + * set 1 -> { 4 } 98 + * set 2 -> { 12 } 99 + * deep = 2 100 + * set 1 -> { 2 } 101 + * set 2 -> { 6 } 102 + * set 3 -> { 10 } 103 + * set 4 -> { 14 } 104 + * deep = 3 105 + * set 1 -> { 1 } 106 + * set 2 -> { 3 } 107 + * set 3 -> { 5 } 108 + * set 4 -> { 7 } 109 + * set 5 -> { 9 } 110 + * set 6 -> { 11 } 111 + * set 7 -> { 13 } 112 + * set 8 -> { 15 } 113 + * 114 + * interval = 8 115 + * deep = 0 116 + * set 1 -> { 4 12 } 117 + * deep = 1 118 + * set 1 -> { 2 10 } 119 + * set 2 -> { 6 14 } 120 + * deep = 2 121 + * set 1 -> { 1 9 } 122 + * set 2 -> { 3 11 } 123 + * set 3 -> { 5 13 } 124 + * set 4 -> { 7 15 } 125 + * 126 + * interval = 4 127 + * deep = 0 128 + * set 1 -> { 2 6 10 14 } 129 + * deep = 1 130 + * set 1 -> { 1 5 9 13 } 131 + * set 2 -> { 3 7 11 15 } 132 + * 133 + * interval = 2 134 + * deep = 0 135 + * set 1 -> { 1 3 5 7 9 11 13 15 } 136 + */ 137 + static int uwb_rsv_find_best_column_set(struct uwb_rsv_alloc_info *ai, int interval, 138 + int num_safe_mas, int num_unsafe_mas) 139 + { 140 + struct uwb_rsv_col_info *ci = ai->ci; 141 + struct uwb_rsv_col_set_info *csi = &ci->csi; 142 + struct uwb_rsv_col_set_info tmp_csi; 143 + int deep, set, col, start_col_deep, col_start_set; 144 + int start_col, max_mas_in_set, lowest_max_mas_in_deep; 145 + int n_mas; 146 + int found = UWB_RSV_ALLOC_NOT_FOUND; 147 + 148 + tmp_csi.start_col = 0; 149 + start_col_deep = interval; 150 + n_mas = num_unsafe_mas + num_safe_mas; 151 + 152 + for (deep = 0; ((interval >> deep) & 0x1) == 0; deep++) { 153 + start_col_deep /= 2; 154 + col_start_set = 0; 155 + lowest_max_mas_in_deep = UWB_MAS_PER_ZONE; 156 + 157 + for (set = 1; set <= (1 << deep); set++) { 158 + max_mas_in_set = 0; 159 + start_col = start_col_deep + col_start_set; 160 + for (col = start_col; col < UWB_NUM_ZONES; col += interval) { 161 + 162 + if (ci[col].max_avail_safe >= num_safe_mas && 163 + ci[col].max_avail_unsafe >= n_mas) { 164 + if (ci[col].highest_mas[n_mas] > max_mas_in_set) 165 + max_mas_in_set = ci[col].highest_mas[n_mas]; 166 + } else { 167 + max_mas_in_set = 0; 168 + break; 169 + } 170 + } 171 + if ((lowest_max_mas_in_deep > max_mas_in_set) && max_mas_in_set) { 172 + lowest_max_mas_in_deep = max_mas_in_set; 173 + 174 + tmp_csi.start_col = start_col; 175 + } 176 + col_start_set += (interval >> deep); 177 + } 178 + 179 + if (lowest_max_mas_in_deep < 8) { 180 + csi->start_col = tmp_csi.start_col; 181 + found = UWB_RSV_ALLOC_FOUND; 182 + break; 183 + } else if ((lowest_max_mas_in_deep > 8) && 184 + (lowest_max_mas_in_deep != UWB_MAS_PER_ZONE) && 185 + (found == UWB_RSV_ALLOC_NOT_FOUND)) { 186 + csi->start_col = tmp_csi.start_col; 187 + found = UWB_RSV_ALLOC_FOUND; 188 + } 189 + } 190 + 191 + if (found == UWB_RSV_ALLOC_FOUND) { 192 + csi->interval = interval; 193 + csi->safe_mas_per_col = num_safe_mas; 194 + csi->unsafe_mas_per_col = num_unsafe_mas; 195 + 196 + ai->safe_allocated_mases = (UWB_NUM_ZONES / interval) * num_safe_mas; 197 + ai->unsafe_allocated_mases = (UWB_NUM_ZONES / interval) * num_unsafe_mas; 198 + ai->total_allocated_mases = ai->safe_allocated_mases + ai->unsafe_allocated_mases; 199 + ai->interval = interval; 200 + } 201 + return found; 202 + } 203 + 204 + static void get_row_descriptors(struct uwb_rsv_alloc_info *ai) 205 + { 206 + unsigned char *bm = ai->bm; 207 + struct uwb_rsv_row_info *ri = &ai->ri; 208 + int col, mas; 209 + 210 + ri->free_rows = 16; 211 + for (mas = 0; mas < UWB_MAS_PER_ZONE; mas ++) { 212 + ri->avail[mas] = 1; 213 + for (col = 1; col < UWB_NUM_ZONES; col++) { 214 + if (bm[col * UWB_NUM_ZONES + mas] == UWB_RSV_MAS_NOT_AVAIL) { 215 + ri->free_rows--; 216 + ri->avail[mas]=0; 217 + break; 218 + } 219 + } 220 + } 221 + } 222 + 223 + static void uwb_rsv_fill_column_info(unsigned char *bm, int column, struct uwb_rsv_col_info *rci) 224 + { 225 + int mas; 226 + int block_count = 0, start_block = 0; 227 + int previous_avail = 0; 228 + int available = 0; 229 + int safe_mas_in_row[UWB_MAS_PER_ZONE] = { 230 + 8, 7, 6, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 2, 1, 231 + }; 232 + 233 + rci->max_avail_safe = 0; 234 + 235 + for (mas = 0; mas < UWB_MAS_PER_ZONE; mas ++) { 236 + if (!bm[column * UWB_NUM_ZONES + mas]) { 237 + available++; 238 + rci->max_avail_unsafe = available; 239 + 240 + rci->highest_mas[available] = mas; 241 + 242 + if (previous_avail) { 243 + block_count++; 244 + if ((block_count > safe_mas_in_row[start_block]) && 245 + (!rci->max_avail_safe)) 246 + rci->max_avail_safe = available - 1; 247 + } else { 248 + previous_avail = 1; 249 + start_block = mas; 250 + block_count = 1; 251 + } 252 + } else { 253 + previous_avail = 0; 254 + } 255 + } 256 + if (!rci->max_avail_safe) 257 + rci->max_avail_safe = rci->max_avail_unsafe; 258 + } 259 + 260 + static void get_column_descriptors(struct uwb_rsv_alloc_info *ai) 261 + { 262 + unsigned char *bm = ai->bm; 263 + struct uwb_rsv_col_info *ci = ai->ci; 264 + int col; 265 + 266 + for (col = 1; col < UWB_NUM_ZONES; col++) { 267 + uwb_rsv_fill_column_info(bm, col, &ci[col]); 268 + } 269 + } 270 + 271 + static int uwb_rsv_find_best_row_alloc(struct uwb_rsv_alloc_info *ai) 272 + { 273 + int n_rows; 274 + int max_rows = ai->max_mas / UWB_USABLE_MAS_PER_ROW; 275 + int min_rows = ai->min_mas / UWB_USABLE_MAS_PER_ROW; 276 + if (ai->min_mas % UWB_USABLE_MAS_PER_ROW) 277 + min_rows++; 278 + for (n_rows = max_rows; n_rows >= min_rows; n_rows--) { 279 + if (n_rows <= ai->ri.free_rows) { 280 + ai->ri.used_rows = n_rows; 281 + ai->interval = 1; /* row reservation */ 282 + uwb_rsv_fill_row_alloc(ai); 283 + return UWB_RSV_ALLOC_FOUND; 284 + } 285 + } 286 + return UWB_RSV_ALLOC_NOT_FOUND; 287 + } 288 + 289 + static int uwb_rsv_find_best_col_alloc(struct uwb_rsv_alloc_info *ai, int interval) 290 + { 291 + int n_safe, n_unsafe, n_mas; 292 + int n_column = UWB_NUM_ZONES / interval; 293 + int max_per_zone = ai->max_mas / n_column; 294 + int min_per_zone = ai->min_mas / n_column; 295 + 296 + if (ai->min_mas % n_column) 297 + min_per_zone++; 298 + 299 + if (min_per_zone > UWB_MAS_PER_ZONE) { 300 + return UWB_RSV_ALLOC_NOT_FOUND; 301 + } 302 + 303 + if (max_per_zone > UWB_MAS_PER_ZONE) { 304 + max_per_zone = UWB_MAS_PER_ZONE; 305 + } 306 + 307 + for (n_mas = max_per_zone; n_mas >= min_per_zone; n_mas--) { 308 + if (uwb_rsv_find_best_column_set(ai, interval, 0, n_mas) == UWB_RSV_ALLOC_NOT_FOUND) 309 + continue; 310 + for (n_safe = n_mas; n_safe >= 0; n_safe--) { 311 + n_unsafe = n_mas - n_safe; 312 + if (uwb_rsv_find_best_column_set(ai, interval, n_safe, n_unsafe) == UWB_RSV_ALLOC_FOUND) { 313 + uwb_rsv_fill_column_alloc(ai); 314 + return UWB_RSV_ALLOC_FOUND; 315 + } 316 + } 317 + } 318 + return UWB_RSV_ALLOC_NOT_FOUND; 319 + } 320 + 321 + int uwb_rsv_find_best_allocation(struct uwb_rsv *rsv, struct uwb_mas_bm *available, 322 + struct uwb_mas_bm *result) 323 + { 324 + struct uwb_rsv_alloc_info *ai; 325 + int interval; 326 + int bit_index; 327 + 328 + ai = kzalloc(sizeof(struct uwb_rsv_alloc_info), GFP_KERNEL); 329 + 330 + ai->min_mas = rsv->min_mas; 331 + ai->max_mas = rsv->max_mas; 332 + ai->max_interval = rsv->max_interval; 333 + 334 + 335 + /* fill the not available vector from the available bm */ 336 + for (bit_index = 0; bit_index < UWB_NUM_MAS; bit_index++) { 337 + if (!test_bit(bit_index, available->bm)) 338 + ai->bm[bit_index] = UWB_RSV_MAS_NOT_AVAIL; 339 + } 340 + 341 + if (ai->max_interval == 1) { 342 + get_row_descriptors(ai); 343 + if (uwb_rsv_find_best_row_alloc(ai) == UWB_RSV_ALLOC_FOUND) 344 + goto alloc_found; 345 + else 346 + goto alloc_not_found; 347 + } 348 + 349 + get_column_descriptors(ai); 350 + 351 + for (interval = 16; interval >= 2; interval>>=1) { 352 + if (interval > ai->max_interval) 353 + continue; 354 + if (uwb_rsv_find_best_col_alloc(ai, interval) == UWB_RSV_ALLOC_FOUND) 355 + goto alloc_found; 356 + } 357 + 358 + /* try row reservation if no column is found */ 359 + get_row_descriptors(ai); 360 + if (uwb_rsv_find_best_row_alloc(ai) == UWB_RSV_ALLOC_FOUND) 361 + goto alloc_found; 362 + else 363 + goto alloc_not_found; 364 + 365 + alloc_found: 366 + bitmap_zero(result->bm, UWB_NUM_MAS); 367 + bitmap_zero(result->unsafe_bm, UWB_NUM_MAS); 368 + /* fill the safe and unsafe bitmaps */ 369 + for (bit_index = 0; bit_index < UWB_NUM_MAS; bit_index++) { 370 + if (ai->bm[bit_index] == UWB_RSV_MAS_SAFE) 371 + set_bit(bit_index, result->bm); 372 + else if (ai->bm[bit_index] == UWB_RSV_MAS_UNSAFE) 373 + set_bit(bit_index, result->unsafe_bm); 374 + } 375 + bitmap_or(result->bm, result->bm, result->unsafe_bm, UWB_NUM_MAS); 376 + 377 + result->safe = ai->safe_allocated_mases; 378 + result->unsafe = ai->unsafe_allocated_mases; 379 + 380 + kfree(ai); 381 + return UWB_RSV_ALLOC_FOUND; 382 + 383 + alloc_not_found: 384 + kfree(ai); 385 + return UWB_RSV_ALLOC_NOT_FOUND; 386 + }
+3 -1
drivers/uwb/drp-avail.c
··· 58 58 * 59 59 * avail = global & local & pending 60 60 */ 61 - static void uwb_drp_available(struct uwb_rc *rc, struct uwb_mas_bm *avail) 61 + void uwb_drp_available(struct uwb_rc *rc, struct uwb_mas_bm *avail) 62 62 { 63 63 bitmap_and(avail->bm, rc->drp_avail.global, rc->drp_avail.local, UWB_NUM_MAS); 64 64 bitmap_and(avail->bm, avail->bm, rc->drp_avail.pending, UWB_NUM_MAS); ··· 105 105 bitmap_or(rc->drp_avail.local, rc->drp_avail.local, mas->bm, UWB_NUM_MAS); 106 106 bitmap_or(rc->drp_avail.pending, rc->drp_avail.pending, mas->bm, UWB_NUM_MAS); 107 107 rc->drp_avail.ie_valid = false; 108 + uwb_rsv_handle_drp_avail_change(rc); 108 109 } 109 110 110 111 /** ··· 281 280 mutex_lock(&rc->rsvs_mutex); 282 281 bitmap_copy(rc->drp_avail.global, bmp, UWB_NUM_MAS); 283 282 rc->drp_avail.ie_valid = false; 283 + uwb_rsv_handle_drp_avail_change(rc); 284 284 mutex_unlock(&rc->rsvs_mutex); 285 285 286 286 uwb_rsv_sched_update(rc);
+123 -37
drivers/uwb/drp-ie.c
··· 22 22 23 23 #include "uwb-internal.h" 24 24 25 + 26 + /* 27 + * Return the reason code for a reservations's DRP IE. 28 + */ 29 + int uwb_rsv_reason_code(struct uwb_rsv *rsv) 30 + { 31 + static const int reason_codes[] = { 32 + [UWB_RSV_STATE_O_INITIATED] = UWB_DRP_REASON_ACCEPTED, 33 + [UWB_RSV_STATE_O_PENDING] = UWB_DRP_REASON_ACCEPTED, 34 + [UWB_RSV_STATE_O_MODIFIED] = UWB_DRP_REASON_MODIFIED, 35 + [UWB_RSV_STATE_O_ESTABLISHED] = UWB_DRP_REASON_ACCEPTED, 36 + [UWB_RSV_STATE_O_TO_BE_MOVED] = UWB_DRP_REASON_ACCEPTED, 37 + [UWB_RSV_STATE_O_MOVE_COMBINING] = UWB_DRP_REASON_MODIFIED, 38 + [UWB_RSV_STATE_O_MOVE_REDUCING] = UWB_DRP_REASON_MODIFIED, 39 + [UWB_RSV_STATE_O_MOVE_EXPANDING] = UWB_DRP_REASON_ACCEPTED, 40 + [UWB_RSV_STATE_T_ACCEPTED] = UWB_DRP_REASON_ACCEPTED, 41 + [UWB_RSV_STATE_T_CONFLICT] = UWB_DRP_REASON_CONFLICT, 42 + [UWB_RSV_STATE_T_PENDING] = UWB_DRP_REASON_PENDING, 43 + [UWB_RSV_STATE_T_DENIED] = UWB_DRP_REASON_DENIED, 44 + [UWB_RSV_STATE_T_RESIZED] = UWB_DRP_REASON_ACCEPTED, 45 + [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = UWB_DRP_REASON_ACCEPTED, 46 + [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = UWB_DRP_REASON_CONFLICT, 47 + [UWB_RSV_STATE_T_EXPANDING_PENDING] = UWB_DRP_REASON_PENDING, 48 + [UWB_RSV_STATE_T_EXPANDING_DENIED] = UWB_DRP_REASON_DENIED, 49 + }; 50 + 51 + return reason_codes[rsv->state]; 52 + } 53 + 54 + /* 55 + * Return the reason code for a reservations's companion DRP IE . 56 + */ 57 + int uwb_rsv_companion_reason_code(struct uwb_rsv *rsv) 58 + { 59 + static const int companion_reason_codes[] = { 60 + [UWB_RSV_STATE_O_MOVE_EXPANDING] = UWB_DRP_REASON_ACCEPTED, 61 + [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = UWB_DRP_REASON_ACCEPTED, 62 + [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = UWB_DRP_REASON_CONFLICT, 63 + [UWB_RSV_STATE_T_EXPANDING_PENDING] = UWB_DRP_REASON_PENDING, 64 + [UWB_RSV_STATE_T_EXPANDING_DENIED] = UWB_DRP_REASON_DENIED, 65 + }; 66 + 67 + return companion_reason_codes[rsv->state]; 68 + } 69 + 70 + /* 71 + * Return the status bit for a reservations's DRP IE. 72 + */ 73 + int uwb_rsv_status(struct uwb_rsv *rsv) 74 + { 75 + static const int statuses[] = { 76 + [UWB_RSV_STATE_O_INITIATED] = 0, 77 + [UWB_RSV_STATE_O_PENDING] = 0, 78 + [UWB_RSV_STATE_O_MODIFIED] = 1, 79 + [UWB_RSV_STATE_O_ESTABLISHED] = 1, 80 + [UWB_RSV_STATE_O_TO_BE_MOVED] = 0, 81 + [UWB_RSV_STATE_O_MOVE_COMBINING] = 1, 82 + [UWB_RSV_STATE_O_MOVE_REDUCING] = 1, 83 + [UWB_RSV_STATE_O_MOVE_EXPANDING] = 1, 84 + [UWB_RSV_STATE_T_ACCEPTED] = 1, 85 + [UWB_RSV_STATE_T_CONFLICT] = 0, 86 + [UWB_RSV_STATE_T_PENDING] = 0, 87 + [UWB_RSV_STATE_T_DENIED] = 0, 88 + [UWB_RSV_STATE_T_RESIZED] = 1, 89 + [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = 1, 90 + [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = 1, 91 + [UWB_RSV_STATE_T_EXPANDING_PENDING] = 1, 92 + [UWB_RSV_STATE_T_EXPANDING_DENIED] = 1, 93 + 94 + }; 95 + 96 + return statuses[rsv->state]; 97 + } 98 + 99 + /* 100 + * Return the status bit for a reservations's companion DRP IE . 101 + */ 102 + int uwb_rsv_companion_status(struct uwb_rsv *rsv) 103 + { 104 + static const int companion_statuses[] = { 105 + [UWB_RSV_STATE_O_MOVE_EXPANDING] = 0, 106 + [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = 1, 107 + [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = 0, 108 + [UWB_RSV_STATE_T_EXPANDING_PENDING] = 0, 109 + [UWB_RSV_STATE_T_EXPANDING_DENIED] = 0, 110 + }; 111 + 112 + return companion_statuses[rsv->state]; 113 + } 114 + 25 115 /* 26 116 * Allocate a DRP IE. 27 117 * ··· 123 33 static struct uwb_ie_drp *uwb_drp_ie_alloc(void) 124 34 { 125 35 struct uwb_ie_drp *drp_ie; 126 - unsigned tiebreaker; 127 36 128 37 drp_ie = kzalloc(sizeof(struct uwb_ie_drp) + 129 38 UWB_NUM_ZONES * sizeof(struct uwb_drp_alloc), 130 39 GFP_KERNEL); 131 40 if (drp_ie) { 132 41 drp_ie->hdr.element_id = UWB_IE_DRP; 133 - 134 - get_random_bytes(&tiebreaker, sizeof(unsigned)); 135 - uwb_ie_drp_set_tiebreaker(drp_ie, tiebreaker & 1); 136 42 } 137 43 return drp_ie; 138 44 } ··· 189 103 */ 190 104 int uwb_drp_ie_update(struct uwb_rsv *rsv) 191 105 { 192 - struct device *dev = &rsv->rc->uwb_dev.dev; 193 106 struct uwb_ie_drp *drp_ie; 194 - int reason_code, status; 107 + struct uwb_rsv_move *mv; 108 + int unsafe; 195 109 196 - switch (rsv->state) { 197 - case UWB_RSV_STATE_NONE: 110 + if (rsv->state == UWB_RSV_STATE_NONE) { 198 111 kfree(rsv->drp_ie); 199 112 rsv->drp_ie = NULL; 200 113 return 0; 201 - case UWB_RSV_STATE_O_INITIATED: 202 - reason_code = UWB_DRP_REASON_ACCEPTED; 203 - status = 0; 204 - break; 205 - case UWB_RSV_STATE_O_PENDING: 206 - reason_code = UWB_DRP_REASON_ACCEPTED; 207 - status = 0; 208 - break; 209 - case UWB_RSV_STATE_O_MODIFIED: 210 - reason_code = UWB_DRP_REASON_MODIFIED; 211 - status = 1; 212 - break; 213 - case UWB_RSV_STATE_O_ESTABLISHED: 214 - reason_code = UWB_DRP_REASON_ACCEPTED; 215 - status = 1; 216 - break; 217 - case UWB_RSV_STATE_T_ACCEPTED: 218 - reason_code = UWB_DRP_REASON_ACCEPTED; 219 - status = 1; 220 - break; 221 - case UWB_RSV_STATE_T_DENIED: 222 - reason_code = UWB_DRP_REASON_DENIED; 223 - status = 0; 224 - break; 225 - default: 226 - dev_dbg(dev, "rsv with unhandled state (%d)\n", rsv->state); 227 - return -EINVAL; 228 114 } 115 + 116 + unsafe = rsv->mas.unsafe ? 1 : 0; 229 117 230 118 if (rsv->drp_ie == NULL) { 231 119 rsv->drp_ie = uwb_drp_ie_alloc(); ··· 208 148 } 209 149 drp_ie = rsv->drp_ie; 210 150 151 + uwb_ie_drp_set_unsafe(drp_ie, unsafe); 152 + uwb_ie_drp_set_tiebreaker(drp_ie, rsv->tiebreaker); 211 153 uwb_ie_drp_set_owner(drp_ie, uwb_rsv_is_owner(rsv)); 212 - uwb_ie_drp_set_status(drp_ie, status); 213 - uwb_ie_drp_set_reason_code(drp_ie, reason_code); 154 + uwb_ie_drp_set_status(drp_ie, uwb_rsv_status(rsv)); 155 + uwb_ie_drp_set_reason_code(drp_ie, uwb_rsv_reason_code(rsv)); 214 156 uwb_ie_drp_set_stream_index(drp_ie, rsv->stream); 215 157 uwb_ie_drp_set_type(drp_ie, rsv->type); 216 158 ··· 229 167 drp_ie->dev_addr = rsv->owner->dev_addr; 230 168 231 169 uwb_drp_ie_from_bm(drp_ie, &rsv->mas); 170 + 171 + if (uwb_rsv_has_two_drp_ies(rsv)) { 172 + mv = &rsv->mv; 173 + if (mv->companion_drp_ie == NULL) { 174 + mv->companion_drp_ie = uwb_drp_ie_alloc(); 175 + if (mv->companion_drp_ie == NULL) 176 + return -ENOMEM; 177 + } 178 + drp_ie = mv->companion_drp_ie; 179 + 180 + /* keep all the same configuration of the main drp_ie */ 181 + memcpy(drp_ie, rsv->drp_ie, sizeof(struct uwb_ie_drp)); 182 + 183 + 184 + /* FIXME: handle properly the unsafe bit */ 185 + uwb_ie_drp_set_unsafe(drp_ie, 1); 186 + uwb_ie_drp_set_status(drp_ie, uwb_rsv_companion_status(rsv)); 187 + uwb_ie_drp_set_reason_code(drp_ie, uwb_rsv_companion_reason_code(rsv)); 188 + 189 + uwb_drp_ie_from_bm(drp_ie, &mv->companion_mas); 190 + } 232 191 233 192 rsv->ie_valid = true; 234 193 return 0; ··· 301 218 u8 zone; 302 219 u16 zone_mask; 303 220 221 + bitmap_zero(bm->bm, UWB_NUM_MAS); 222 + 304 223 for (cnt = 0; cnt < numallocs; cnt++) { 305 224 alloc = &drp_ie->allocs[cnt]; 306 225 zone_bm = le16_to_cpu(alloc->zone_bm); ··· 314 229 } 315 230 } 316 231 } 232 +
+529 -156
drivers/uwb/drp.c
··· 23 23 #include <linux/delay.h> 24 24 #include "uwb-internal.h" 25 25 26 + 27 + /* DRP Conflict Actions ([ECMA-368 2nd Edition] 17.4.6) */ 28 + enum uwb_drp_conflict_action { 29 + /* Reservation is mantained, no action needed */ 30 + UWB_DRP_CONFLICT_MANTAIN = 0, 31 + 32 + /* the device shall not transmit frames in conflicting MASs in 33 + * the following superframe. If the device is the reservation 34 + * target, it shall also set the Reason Code in its DRP IE to 35 + * Conflict in its beacon in the following superframe. 36 + */ 37 + UWB_DRP_CONFLICT_ACT1, 38 + 39 + /* the device shall not set the Reservation Status bit to ONE 40 + * and shall not transmit frames in conflicting MASs. If the 41 + * device is the reservation target, it shall also set the 42 + * Reason Code in its DRP IE to Conflict. 43 + */ 44 + UWB_DRP_CONFLICT_ACT2, 45 + 46 + /* the device shall not transmit frames in conflicting MASs in 47 + * the following superframe. It shall remove the conflicting 48 + * MASs from the reservation or set the Reservation Status to 49 + * ZERO in its beacon in the following superframe. If the 50 + * device is the reservation target, it shall also set the 51 + * Reason Code in its DRP IE to Conflict. 52 + */ 53 + UWB_DRP_CONFLICT_ACT3, 54 + }; 55 + 56 + 57 + static void uwb_rc_set_drp_cmd_done(struct uwb_rc *rc, void *arg, 58 + struct uwb_rceb *reply, ssize_t reply_size) 59 + { 60 + struct uwb_rc_evt_set_drp_ie *r = (struct uwb_rc_evt_set_drp_ie *)reply; 61 + 62 + if (r != NULL) { 63 + if (r->bResultCode != UWB_RC_RES_SUCCESS) 64 + dev_err(&rc->uwb_dev.dev, "SET-DRP-IE failed: %s (%d)\n", 65 + uwb_rc_strerror(r->bResultCode), r->bResultCode); 66 + } else 67 + dev_err(&rc->uwb_dev.dev, "SET-DRP-IE: timeout\n"); 68 + 69 + spin_lock(&rc->rsvs_lock); 70 + if (rc->set_drp_ie_pending > 1) { 71 + rc->set_drp_ie_pending = 0; 72 + uwb_rsv_queue_update(rc); 73 + } else { 74 + rc->set_drp_ie_pending = 0; 75 + } 76 + spin_unlock(&rc->rsvs_lock); 77 + } 78 + 26 79 /** 27 80 * Construct and send the SET DRP IE 28 81 * ··· 99 46 int uwb_rc_send_all_drp_ie(struct uwb_rc *rc) 100 47 { 101 48 int result; 102 - struct device *dev = &rc->uwb_dev.dev; 103 49 struct uwb_rc_cmd_set_drp_ie *cmd; 104 - struct uwb_rc_evt_set_drp_ie reply; 105 50 struct uwb_rsv *rsv; 51 + struct uwb_rsv_move *mv; 106 52 int num_bytes = 0; 107 53 u8 *IEDataptr; 108 54 109 55 result = -ENOMEM; 110 56 /* First traverse all reservations to determine memory needed. */ 111 57 list_for_each_entry(rsv, &rc->reservations, rc_node) { 112 - if (rsv->drp_ie != NULL) 58 + if (rsv->drp_ie != NULL) { 113 59 num_bytes += rsv->drp_ie->hdr.length + 2; 60 + if (uwb_rsv_has_two_drp_ies(rsv) && 61 + (rsv->mv.companion_drp_ie != NULL)) { 62 + mv = &rsv->mv; 63 + num_bytes += mv->companion_drp_ie->hdr.length + 2; 64 + } 65 + } 114 66 } 115 67 num_bytes += sizeof(rc->drp_avail.ie); 116 68 cmd = kzalloc(sizeof(*cmd) + num_bytes, GFP_KERNEL); ··· 126 68 cmd->wIELength = num_bytes; 127 69 IEDataptr = (u8 *)&cmd->IEData[0]; 128 70 71 + /* FIXME: DRV avail IE is not always needed */ 72 + /* put DRP avail IE first */ 73 + memcpy(IEDataptr, &rc->drp_avail.ie, sizeof(rc->drp_avail.ie)); 74 + IEDataptr += sizeof(struct uwb_ie_drp_avail); 75 + 129 76 /* Next traverse all reservations to place IEs in allocated memory. */ 130 77 list_for_each_entry(rsv, &rc->reservations, rc_node) { 131 78 if (rsv->drp_ie != NULL) { 132 79 memcpy(IEDataptr, rsv->drp_ie, 133 80 rsv->drp_ie->hdr.length + 2); 134 81 IEDataptr += rsv->drp_ie->hdr.length + 2; 82 + 83 + if (uwb_rsv_has_two_drp_ies(rsv) && 84 + (rsv->mv.companion_drp_ie != NULL)) { 85 + mv = &rsv->mv; 86 + memcpy(IEDataptr, mv->companion_drp_ie, 87 + mv->companion_drp_ie->hdr.length + 2); 88 + IEDataptr += mv->companion_drp_ie->hdr.length + 2; 89 + } 135 90 } 136 91 } 137 - memcpy(IEDataptr, &rc->drp_avail.ie, sizeof(rc->drp_avail.ie)); 138 92 139 - reply.rceb.bEventType = UWB_RC_CET_GENERAL; 140 - reply.rceb.wEvent = UWB_RC_CMD_SET_DRP_IE; 141 - result = uwb_rc_cmd(rc, "SET-DRP-IE", &cmd->rccb, 142 - sizeof(*cmd) + num_bytes, &reply.rceb, 143 - sizeof(reply)); 144 - if (result < 0) 145 - goto error_cmd; 146 - result = le16_to_cpu(reply.wRemainingSpace); 147 - if (reply.bResultCode != UWB_RC_RES_SUCCESS) { 148 - dev_err(&rc->uwb_dev.dev, "SET-DRP-IE: command execution " 149 - "failed: %s (%d). RemainingSpace in beacon " 150 - "= %d\n", uwb_rc_strerror(reply.bResultCode), 151 - reply.bResultCode, result); 152 - result = -EIO; 153 - } else { 154 - dev_dbg(dev, "SET-DRP-IE sent. RemainingSpace in beacon " 155 - "= %d.\n", result); 156 - result = 0; 157 - } 158 - error_cmd: 93 + result = uwb_rc_cmd_async(rc, "SET-DRP-IE", &cmd->rccb, sizeof(*cmd) + num_bytes, 94 + UWB_RC_CET_GENERAL, UWB_RC_CMD_SET_DRP_IE, 95 + uwb_rc_set_drp_cmd_done, NULL); 96 + 97 + rc->set_drp_ie_pending = 1; 98 + 159 99 kfree(cmd); 160 100 error: 161 101 return result; 162 102 } 163 103 164 - void uwb_drp_handle_timeout(struct uwb_rsv *rsv) 104 + /* 105 + * Evaluate the action to perform using conflict resolution rules 106 + * 107 + * Return a uwb_drp_conflict_action. 108 + */ 109 + static int evaluate_conflict_action(struct uwb_ie_drp *ext_drp_ie, int ext_beacon_slot, 110 + struct uwb_rsv *rsv, int our_status) 165 111 { 166 - struct device *dev = &rsv->rc->uwb_dev.dev; 112 + int our_tie_breaker = rsv->tiebreaker; 113 + int our_type = rsv->type; 114 + int our_beacon_slot = rsv->rc->uwb_dev.beacon_slot; 167 115 168 - dev_dbg(dev, "reservation timeout in state %s (%d)\n", 169 - uwb_rsv_state_str(rsv->state), rsv->state); 170 - 171 - switch (rsv->state) { 172 - case UWB_RSV_STATE_O_INITIATED: 173 - if (rsv->is_multicast) { 174 - uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); 175 - return; 176 - } 177 - break; 178 - case UWB_RSV_STATE_O_ESTABLISHED: 179 - if (rsv->is_multicast) 180 - return; 181 - break; 182 - default: 183 - break; 116 + int ext_tie_breaker = uwb_ie_drp_tiebreaker(ext_drp_ie); 117 + int ext_status = uwb_ie_drp_status(ext_drp_ie); 118 + int ext_type = uwb_ie_drp_type(ext_drp_ie); 119 + 120 + 121 + /* [ECMA-368 2nd Edition] 17.4.6 */ 122 + if (ext_type == UWB_DRP_TYPE_PCA && our_type == UWB_DRP_TYPE_PCA) { 123 + return UWB_DRP_CONFLICT_MANTAIN; 184 124 } 185 - uwb_rsv_remove(rsv); 125 + 126 + /* [ECMA-368 2nd Edition] 17.4.6-1 */ 127 + if (our_type == UWB_DRP_TYPE_ALIEN_BP) { 128 + return UWB_DRP_CONFLICT_MANTAIN; 129 + } 130 + 131 + /* [ECMA-368 2nd Edition] 17.4.6-2 */ 132 + if (ext_type == UWB_DRP_TYPE_ALIEN_BP) { 133 + /* here we know our_type != UWB_DRP_TYPE_ALIEN_BP */ 134 + return UWB_DRP_CONFLICT_ACT1; 135 + } 136 + 137 + /* [ECMA-368 2nd Edition] 17.4.6-3 */ 138 + if (our_status == 0 && ext_status == 1) { 139 + return UWB_DRP_CONFLICT_ACT2; 140 + } 141 + 142 + /* [ECMA-368 2nd Edition] 17.4.6-4 */ 143 + if (our_status == 1 && ext_status == 0) { 144 + return UWB_DRP_CONFLICT_MANTAIN; 145 + } 146 + 147 + /* [ECMA-368 2nd Edition] 17.4.6-5a */ 148 + if (our_tie_breaker == ext_tie_breaker && 149 + our_beacon_slot < ext_beacon_slot) { 150 + return UWB_DRP_CONFLICT_MANTAIN; 151 + } 152 + 153 + /* [ECMA-368 2nd Edition] 17.4.6-5b */ 154 + if (our_tie_breaker != ext_tie_breaker && 155 + our_beacon_slot > ext_beacon_slot) { 156 + return UWB_DRP_CONFLICT_MANTAIN; 157 + } 158 + 159 + if (our_status == 0) { 160 + if (our_tie_breaker == ext_tie_breaker) { 161 + /* [ECMA-368 2nd Edition] 17.4.6-6a */ 162 + if (our_beacon_slot > ext_beacon_slot) { 163 + return UWB_DRP_CONFLICT_ACT2; 164 + } 165 + } else { 166 + /* [ECMA-368 2nd Edition] 17.4.6-6b */ 167 + if (our_beacon_slot < ext_beacon_slot) { 168 + return UWB_DRP_CONFLICT_ACT2; 169 + } 170 + } 171 + } else { 172 + if (our_tie_breaker == ext_tie_breaker) { 173 + /* [ECMA-368 2nd Edition] 17.4.6-7a */ 174 + if (our_beacon_slot > ext_beacon_slot) { 175 + return UWB_DRP_CONFLICT_ACT3; 176 + } 177 + } else { 178 + /* [ECMA-368 2nd Edition] 17.4.6-7b */ 179 + if (our_beacon_slot < ext_beacon_slot) { 180 + return UWB_DRP_CONFLICT_ACT3; 181 + } 182 + } 183 + } 184 + return UWB_DRP_CONFLICT_MANTAIN; 186 185 } 187 186 187 + static void handle_conflict_normal(struct uwb_ie_drp *drp_ie, 188 + int ext_beacon_slot, 189 + struct uwb_rsv *rsv, 190 + struct uwb_mas_bm *conflicting_mas) 191 + { 192 + struct uwb_rc *rc = rsv->rc; 193 + struct uwb_rsv_move *mv = &rsv->mv; 194 + struct uwb_drp_backoff_win *bow = &rc->bow; 195 + int action; 196 + 197 + action = evaluate_conflict_action(drp_ie, ext_beacon_slot, rsv, uwb_rsv_status(rsv)); 198 + 199 + if (uwb_rsv_is_owner(rsv)) { 200 + switch(action) { 201 + case UWB_DRP_CONFLICT_ACT2: 202 + /* try move */ 203 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_TO_BE_MOVED); 204 + if (bow->can_reserve_extra_mases == false) 205 + uwb_rsv_backoff_win_increment(rc); 206 + 207 + break; 208 + case UWB_DRP_CONFLICT_ACT3: 209 + uwb_rsv_backoff_win_increment(rc); 210 + /* drop some mases with reason modified */ 211 + /* put in the companion the mases to be dropped */ 212 + bitmap_and(mv->companion_mas.bm, rsv->mas.bm, conflicting_mas->bm, UWB_NUM_MAS); 213 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED); 214 + default: 215 + break; 216 + } 217 + } else { 218 + switch(action) { 219 + case UWB_DRP_CONFLICT_ACT2: 220 + case UWB_DRP_CONFLICT_ACT3: 221 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT); 222 + default: 223 + break; 224 + } 225 + 226 + } 227 + 228 + } 229 + 230 + static void handle_conflict_expanding(struct uwb_ie_drp *drp_ie, int ext_beacon_slot, 231 + struct uwb_rsv *rsv, bool companion_only, 232 + struct uwb_mas_bm *conflicting_mas) 233 + { 234 + struct uwb_rc *rc = rsv->rc; 235 + struct uwb_drp_backoff_win *bow = &rc->bow; 236 + struct uwb_rsv_move *mv = &rsv->mv; 237 + int action; 238 + 239 + if (companion_only) { 240 + /* status of companion is 0 at this point */ 241 + action = evaluate_conflict_action(drp_ie, ext_beacon_slot, rsv, 0); 242 + if (uwb_rsv_is_owner(rsv)) { 243 + switch(action) { 244 + case UWB_DRP_CONFLICT_ACT2: 245 + case UWB_DRP_CONFLICT_ACT3: 246 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); 247 + rsv->needs_release_companion_mas = false; 248 + if (bow->can_reserve_extra_mases == false) 249 + uwb_rsv_backoff_win_increment(rc); 250 + uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas); 251 + } 252 + } else { /* rsv is target */ 253 + switch(action) { 254 + case UWB_DRP_CONFLICT_ACT2: 255 + case UWB_DRP_CONFLICT_ACT3: 256 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_EXPANDING_CONFLICT); 257 + /* send_drp_avail_ie = true; */ 258 + } 259 + } 260 + } else { /* also base part of the reservation is conflicting */ 261 + if (uwb_rsv_is_owner(rsv)) { 262 + uwb_rsv_backoff_win_increment(rc); 263 + /* remove companion part */ 264 + uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas); 265 + 266 + /* drop some mases with reason modified */ 267 + 268 + /* put in the companion the mases to be dropped */ 269 + bitmap_andnot(mv->companion_mas.bm, rsv->mas.bm, conflicting_mas->bm, UWB_NUM_MAS); 270 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED); 271 + } else { /* it is a target rsv */ 272 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT); 273 + /* send_drp_avail_ie = true; */ 274 + } 275 + } 276 + } 277 + 278 + static void uwb_drp_handle_conflict_rsv(struct uwb_rc *rc, struct uwb_rsv *rsv, 279 + struct uwb_rc_evt_drp *drp_evt, 280 + struct uwb_ie_drp *drp_ie, 281 + struct uwb_mas_bm *conflicting_mas) 282 + { 283 + struct uwb_rsv_move *mv; 284 + 285 + /* check if the conflicting reservation has two drp_ies */ 286 + if (uwb_rsv_has_two_drp_ies(rsv)) { 287 + mv = &rsv->mv; 288 + if (bitmap_intersects(rsv->mas.bm, conflicting_mas->bm, UWB_NUM_MAS)) { 289 + handle_conflict_expanding(drp_ie, drp_evt->beacon_slot_number, 290 + rsv, false, conflicting_mas); 291 + } else { 292 + if (bitmap_intersects(mv->companion_mas.bm, conflicting_mas->bm, UWB_NUM_MAS)) { 293 + handle_conflict_expanding(drp_ie, drp_evt->beacon_slot_number, 294 + rsv, true, conflicting_mas); 295 + } 296 + } 297 + } else if (bitmap_intersects(rsv->mas.bm, conflicting_mas->bm, UWB_NUM_MAS)) { 298 + handle_conflict_normal(drp_ie, drp_evt->beacon_slot_number, rsv, conflicting_mas); 299 + } 300 + } 301 + 302 + static void uwb_drp_handle_all_conflict_rsv(struct uwb_rc *rc, 303 + struct uwb_rc_evt_drp *drp_evt, 304 + struct uwb_ie_drp *drp_ie, 305 + struct uwb_mas_bm *conflicting_mas) 306 + { 307 + struct uwb_rsv *rsv; 308 + 309 + list_for_each_entry(rsv, &rc->reservations, rc_node) { 310 + uwb_drp_handle_conflict_rsv(rc, rsv, drp_evt, drp_ie, conflicting_mas); 311 + } 312 + } 313 + 188 314 /* 189 315 * Based on the DRP IE, transition a target reservation to a new 190 316 * state. 191 317 */ 192 318 static void uwb_drp_process_target(struct uwb_rc *rc, struct uwb_rsv *rsv, 193 - struct uwb_ie_drp *drp_ie) 319 + struct uwb_ie_drp *drp_ie, struct uwb_rc_evt_drp *drp_evt) 194 320 { 195 321 struct device *dev = &rc->uwb_dev.dev; 322 + struct uwb_rsv_move *mv = &rsv->mv; 196 323 int status; 197 324 enum uwb_drp_reason reason_code; 198 - 325 + struct uwb_mas_bm mas; 326 + 199 327 status = uwb_ie_drp_status(drp_ie); 200 328 reason_code = uwb_ie_drp_reason_code(drp_ie); 329 + uwb_drp_ie_to_bm(&mas, drp_ie); 201 330 202 - if (status) { 203 - switch (reason_code) { 204 - case UWB_DRP_REASON_ACCEPTED: 331 + switch (reason_code) { 332 + case UWB_DRP_REASON_ACCEPTED: 333 + 334 + if (rsv->state == UWB_RSV_STATE_T_CONFLICT) { 335 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_CONFLICT); 336 + break; 337 + } 338 + 339 + if (rsv->state == UWB_RSV_STATE_T_EXPANDING_ACCEPTED) { 340 + /* drp_ie is companion */ 341 + if (!bitmap_equal(rsv->mas.bm, mas.bm, UWB_NUM_MAS)) 342 + /* stroke companion */ 343 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_EXPANDING_ACCEPTED); 344 + } else { 345 + if (!bitmap_equal(rsv->mas.bm, mas.bm, UWB_NUM_MAS)) { 346 + if (uwb_drp_avail_reserve_pending(rc, &mas) == -EBUSY) { 347 + /* FIXME: there is a conflict, find 348 + * the conflicting reservations and 349 + * take a sensible action. Consider 350 + * that in drp_ie there is the 351 + * "neighbour" */ 352 + uwb_drp_handle_all_conflict_rsv(rc, drp_evt, drp_ie, &mas); 353 + } else { 354 + /* accept the extra reservation */ 355 + bitmap_copy(mv->companion_mas.bm, mas.bm, UWB_NUM_MAS); 356 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_EXPANDING_ACCEPTED); 357 + } 358 + } else { 359 + if (status) { 360 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED); 361 + } 362 + } 363 + 364 + } 365 + break; 366 + 367 + case UWB_DRP_REASON_MODIFIED: 368 + /* check to see if we have already modified the reservation */ 369 + if (bitmap_equal(rsv->mas.bm, mas.bm, UWB_NUM_MAS)) { 205 370 uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED); 206 371 break; 207 - case UWB_DRP_REASON_MODIFIED: 208 - dev_err(dev, "FIXME: unhandled reason code (%d/%d)\n", 209 - reason_code, status); 210 - break; 211 - default: 212 - dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n", 213 - reason_code, status); 214 372 } 215 - } else { 216 - switch (reason_code) { 217 - case UWB_DRP_REASON_ACCEPTED: 218 - /* New reservations are handled in uwb_rsv_find(). */ 219 - break; 220 - case UWB_DRP_REASON_DENIED: 221 - uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); 222 - break; 223 - case UWB_DRP_REASON_CONFLICT: 224 - case UWB_DRP_REASON_MODIFIED: 225 - dev_err(dev, "FIXME: unhandled reason code (%d/%d)\n", 226 - reason_code, status); 227 - break; 228 - default: 229 - dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n", 230 - reason_code, status); 373 + 374 + /* find if the owner wants to expand or reduce */ 375 + if (bitmap_subset(mas.bm, rsv->mas.bm, UWB_NUM_MAS)) { 376 + /* owner is reducing */ 377 + bitmap_andnot(mv->companion_mas.bm, rsv->mas.bm, mas.bm, UWB_NUM_MAS); 378 + uwb_drp_avail_release(rsv->rc, &mv->companion_mas); 231 379 } 380 + 381 + bitmap_copy(rsv->mas.bm, mas.bm, UWB_NUM_MAS); 382 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_RESIZED); 383 + break; 384 + default: 385 + dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n", 386 + reason_code, status); 232 387 } 233 388 } 234 389 ··· 450 179 * state. 451 180 */ 452 181 static void uwb_drp_process_owner(struct uwb_rc *rc, struct uwb_rsv *rsv, 453 - struct uwb_ie_drp *drp_ie) 182 + struct uwb_dev *src, struct uwb_ie_drp *drp_ie, 183 + struct uwb_rc_evt_drp *drp_evt) 454 184 { 455 185 struct device *dev = &rc->uwb_dev.dev; 186 + struct uwb_rsv_move *mv = &rsv->mv; 456 187 int status; 457 188 enum uwb_drp_reason reason_code; 189 + struct uwb_mas_bm mas; 458 190 459 191 status = uwb_ie_drp_status(drp_ie); 460 192 reason_code = uwb_ie_drp_reason_code(drp_ie); 193 + uwb_drp_ie_to_bm(&mas, drp_ie); 461 194 462 195 if (status) { 463 196 switch (reason_code) { 464 197 case UWB_DRP_REASON_ACCEPTED: 465 - uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); 466 - break; 467 - case UWB_DRP_REASON_MODIFIED: 468 - dev_err(dev, "FIXME: unhandled reason code (%d/%d)\n", 469 - reason_code, status); 198 + switch (rsv->state) { 199 + case UWB_RSV_STATE_O_PENDING: 200 + case UWB_RSV_STATE_O_INITIATED: 201 + case UWB_RSV_STATE_O_ESTABLISHED: 202 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); 203 + break; 204 + case UWB_RSV_STATE_O_MODIFIED: 205 + if (bitmap_equal(mas.bm, rsv->mas.bm, UWB_NUM_MAS)) { 206 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); 207 + } else { 208 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MODIFIED); 209 + } 210 + break; 211 + 212 + case UWB_RSV_STATE_O_MOVE_REDUCING: /* shouldn' t be a problem */ 213 + if (bitmap_equal(mas.bm, rsv->mas.bm, UWB_NUM_MAS)) { 214 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); 215 + } else { 216 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_REDUCING); 217 + } 218 + break; 219 + case UWB_RSV_STATE_O_MOVE_EXPANDING: 220 + if (bitmap_equal(mas.bm, mv->companion_mas.bm, UWB_NUM_MAS)) { 221 + /* Companion reservation accepted */ 222 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_COMBINING); 223 + } else { 224 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_EXPANDING); 225 + } 226 + break; 227 + case UWB_RSV_STATE_O_MOVE_COMBINING: 228 + if (bitmap_equal(mas.bm, rsv->mas.bm, UWB_NUM_MAS)) 229 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_REDUCING); 230 + else 231 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_COMBINING); 232 + break; 233 + default: 234 + break; 235 + } 470 236 break; 471 237 default: 472 238 dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n", ··· 518 210 uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); 519 211 break; 520 212 case UWB_DRP_REASON_CONFLICT: 521 - case UWB_DRP_REASON_MODIFIED: 522 - dev_err(dev, "FIXME: unhandled reason code (%d/%d)\n", 523 - reason_code, status); 213 + /* resolve the conflict */ 214 + bitmap_complement(mas.bm, src->last_availability_bm, 215 + UWB_NUM_MAS); 216 + uwb_drp_handle_conflict_rsv(rc, rsv, drp_evt, drp_ie, &mas); 524 217 break; 525 218 default: 526 219 dev_warn(dev, "ignoring invalid DRP IE state (%d/%d)\n", ··· 530 221 } 531 222 } 532 223 224 + static void uwb_cnflt_alien_stroke_timer(struct uwb_cnflt_alien *cnflt) 225 + { 226 + unsigned timeout_us = UWB_MAX_LOST_BEACONS * UWB_SUPERFRAME_LENGTH_US; 227 + mod_timer(&cnflt->timer, jiffies + usecs_to_jiffies(timeout_us)); 228 + } 229 + 230 + static void uwb_cnflt_update_work(struct work_struct *work) 231 + { 232 + struct uwb_cnflt_alien *cnflt = container_of(work, 233 + struct uwb_cnflt_alien, 234 + cnflt_update_work); 235 + struct uwb_cnflt_alien *c; 236 + struct uwb_rc *rc = cnflt->rc; 237 + 238 + unsigned long delay_us = UWB_MAS_LENGTH_US * UWB_MAS_PER_ZONE; 239 + 240 + mutex_lock(&rc->rsvs_mutex); 241 + 242 + list_del(&cnflt->rc_node); 243 + 244 + /* update rc global conflicting alien bitmap */ 245 + bitmap_zero(rc->cnflt_alien_bitmap.bm, UWB_NUM_MAS); 246 + 247 + list_for_each_entry(c, &rc->cnflt_alien_list, rc_node) { 248 + bitmap_or(rc->cnflt_alien_bitmap.bm, rc->cnflt_alien_bitmap.bm, c->mas.bm, UWB_NUM_MAS); 249 + } 250 + 251 + queue_delayed_work(rc->rsv_workq, &rc->rsv_alien_bp_work, usecs_to_jiffies(delay_us)); 252 + 253 + kfree(cnflt); 254 + mutex_unlock(&rc->rsvs_mutex); 255 + } 256 + 257 + static void uwb_cnflt_timer(unsigned long arg) 258 + { 259 + struct uwb_cnflt_alien *cnflt = (struct uwb_cnflt_alien *)arg; 260 + 261 + queue_work(cnflt->rc->rsv_workq, &cnflt->cnflt_update_work); 262 + } 263 + 533 264 /* 534 - * Process a received DRP IE, it's either for a reservation owned by 535 - * the RC or targeted at it (or it's for a WUSB cluster reservation). 265 + * We have received an DRP_IE of type Alien BP and we need to make 266 + * sure we do not transmit in conflicting MASs. 536 267 */ 537 - static void uwb_drp_process(struct uwb_rc *rc, struct uwb_dev *src, 538 - struct uwb_ie_drp *drp_ie) 268 + static void uwb_drp_handle_alien_drp(struct uwb_rc *rc, struct uwb_ie_drp *drp_ie) 269 + { 270 + struct device *dev = &rc->uwb_dev.dev; 271 + struct uwb_mas_bm mas; 272 + struct uwb_cnflt_alien *cnflt; 273 + char buf[72]; 274 + unsigned long delay_us = UWB_MAS_LENGTH_US * UWB_MAS_PER_ZONE; 275 + 276 + uwb_drp_ie_to_bm(&mas, drp_ie); 277 + bitmap_scnprintf(buf, sizeof(buf), mas.bm, UWB_NUM_MAS); 278 + 279 + list_for_each_entry(cnflt, &rc->cnflt_alien_list, rc_node) { 280 + if (bitmap_equal(cnflt->mas.bm, mas.bm, UWB_NUM_MAS)) { 281 + /* Existing alien BP reservation conflicting 282 + * bitmap, just reset the timer */ 283 + uwb_cnflt_alien_stroke_timer(cnflt); 284 + return; 285 + } 286 + } 287 + 288 + /* New alien BP reservation conflicting bitmap */ 289 + 290 + /* alloc and initialize new uwb_cnflt_alien */ 291 + cnflt = kzalloc(sizeof(struct uwb_cnflt_alien), GFP_KERNEL); 292 + if (!cnflt) 293 + dev_err(dev, "failed to alloc uwb_cnflt_alien struct\n"); 294 + INIT_LIST_HEAD(&cnflt->rc_node); 295 + init_timer(&cnflt->timer); 296 + cnflt->timer.function = uwb_cnflt_timer; 297 + cnflt->timer.data = (unsigned long)cnflt; 298 + 299 + cnflt->rc = rc; 300 + INIT_WORK(&cnflt->cnflt_update_work, uwb_cnflt_update_work); 301 + 302 + bitmap_copy(cnflt->mas.bm, mas.bm, UWB_NUM_MAS); 303 + 304 + list_add_tail(&cnflt->rc_node, &rc->cnflt_alien_list); 305 + 306 + /* update rc global conflicting alien bitmap */ 307 + bitmap_or(rc->cnflt_alien_bitmap.bm, rc->cnflt_alien_bitmap.bm, mas.bm, UWB_NUM_MAS); 308 + 309 + queue_delayed_work(rc->rsv_workq, &rc->rsv_alien_bp_work, usecs_to_jiffies(delay_us)); 310 + 311 + /* start the timer */ 312 + uwb_cnflt_alien_stroke_timer(cnflt); 313 + } 314 + 315 + static void uwb_drp_process_not_involved(struct uwb_rc *rc, 316 + struct uwb_rc_evt_drp *drp_evt, 317 + struct uwb_ie_drp *drp_ie) 318 + { 319 + struct uwb_mas_bm mas; 320 + 321 + uwb_drp_ie_to_bm(&mas, drp_ie); 322 + uwb_drp_handle_all_conflict_rsv(rc, drp_evt, drp_ie, &mas); 323 + } 324 + 325 + static void uwb_drp_process_involved(struct uwb_rc *rc, struct uwb_dev *src, 326 + struct uwb_rc_evt_drp *drp_evt, 327 + struct uwb_ie_drp *drp_ie) 539 328 { 540 329 struct uwb_rsv *rsv; 541 330 ··· 646 239 */ 647 240 return; 648 241 } 649 - 242 + 650 243 /* 651 244 * Do nothing with DRP IEs for reservations that have been 652 245 * terminated. ··· 655 248 uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); 656 249 return; 657 250 } 658 - 251 + 659 252 if (uwb_ie_drp_owner(drp_ie)) 660 - uwb_drp_process_target(rc, rsv, drp_ie); 253 + uwb_drp_process_target(rc, rsv, drp_ie, drp_evt); 661 254 else 662 - uwb_drp_process_owner(rc, rsv, drp_ie); 255 + uwb_drp_process_owner(rc, rsv, src, drp_ie, drp_evt); 256 + 663 257 } 664 258 259 + 260 + static bool uwb_drp_involves_us(struct uwb_rc *rc, struct uwb_ie_drp *drp_ie) 261 + { 262 + return uwb_dev_addr_cmp(&rc->uwb_dev.dev_addr, &drp_ie->dev_addr) == 0; 263 + } 264 + 265 + /* 266 + * Process a received DRP IE. 267 + */ 268 + static void uwb_drp_process(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt, 269 + struct uwb_dev *src, struct uwb_ie_drp *drp_ie) 270 + { 271 + if (uwb_ie_drp_type(drp_ie) == UWB_DRP_TYPE_ALIEN_BP) 272 + uwb_drp_handle_alien_drp(rc, drp_ie); 273 + else if (uwb_drp_involves_us(rc, drp_ie)) 274 + uwb_drp_process_involved(rc, src, drp_evt, drp_ie); 275 + else 276 + uwb_drp_process_not_involved(rc, drp_evt, drp_ie); 277 + } 278 + 279 + /* 280 + * Process a received DRP Availability IE 281 + */ 282 + static void uwb_drp_availability_process(struct uwb_rc *rc, struct uwb_dev *src, 283 + struct uwb_ie_drp_avail *drp_availability_ie) 284 + { 285 + bitmap_copy(src->last_availability_bm, 286 + drp_availability_ie->bmp, UWB_NUM_MAS); 287 + } 665 288 666 289 /* 667 290 * Process all the DRP IEs (both DRP IEs and the DRP Availability IE) ··· 713 276 714 277 switch (ie_hdr->element_id) { 715 278 case UWB_IE_DRP_AVAILABILITY: 716 - /* FIXME: does something need to be done with this? */ 279 + uwb_drp_availability_process(rc, src_dev, (struct uwb_ie_drp_avail *)ie_hdr); 717 280 break; 718 281 case UWB_IE_DRP: 719 - uwb_drp_process(rc, src_dev, (struct uwb_ie_drp *)ie_hdr); 282 + uwb_drp_process(rc, drp_evt, src_dev, (struct uwb_ie_drp *)ie_hdr); 720 283 break; 721 284 default: 722 285 dev_warn(dev, "unexpected IE in DRP notification\n"); ··· 728 291 dev_warn(dev, "%d octets remaining in DRP notification\n", 729 292 (int)ielen); 730 293 } 731 - 732 - 733 - /* 734 - * Go through all the DRP IEs and find the ones that conflict with our 735 - * reservations. 736 - * 737 - * FIXME: must resolve the conflict according the the rules in 738 - * [ECMA-368]. 739 - */ 740 - static 741 - void uwb_drp_process_conflict_all(struct uwb_rc *rc, struct uwb_rc_evt_drp *drp_evt, 742 - size_t ielen, struct uwb_dev *src_dev) 743 - { 744 - struct device *dev = &rc->uwb_dev.dev; 745 - struct uwb_ie_hdr *ie_hdr; 746 - struct uwb_ie_drp *drp_ie; 747 - void *ptr; 748 - 749 - ptr = drp_evt->ie_data; 750 - for (;;) { 751 - ie_hdr = uwb_ie_next(&ptr, &ielen); 752 - if (!ie_hdr) 753 - break; 754 - 755 - drp_ie = container_of(ie_hdr, struct uwb_ie_drp, hdr); 756 - 757 - /* FIXME: check if this DRP IE conflicts. */ 758 - } 759 - 760 - if (ielen > 0) 761 - dev_warn(dev, "%d octets remaining in DRP notification\n", 762 - (int)ielen); 763 - } 764 - 765 - 766 - /* 767 - * Terminate all reservations owned by, or targeted at, 'uwb_dev'. 768 - */ 769 - static void uwb_drp_terminate_all(struct uwb_rc *rc, struct uwb_dev *uwb_dev) 770 - { 771 - struct uwb_rsv *rsv; 772 - 773 - list_for_each_entry(rsv, &rc->reservations, rc_node) { 774 - if (rsv->owner == uwb_dev 775 - || (rsv->target.type == UWB_RSV_TARGET_DEV && rsv->target.dev == uwb_dev)) 776 - uwb_rsv_remove(rsv); 777 - } 778 - } 779 - 780 294 781 295 /** 782 296 * uwbd_evt_handle_rc_drp - handle a DRP_IE event ··· 769 381 size_t ielength, bytes_left; 770 382 struct uwb_dev_addr src_addr; 771 383 struct uwb_dev *src_dev; 772 - int reason; 773 384 774 385 /* Is there enough data to decode the event (and any IEs in 775 386 its payload)? */ ··· 804 417 805 418 mutex_lock(&rc->rsvs_mutex); 806 419 807 - reason = uwb_rc_evt_drp_reason(drp_evt); 808 - 809 - switch (reason) { 810 - case UWB_DRP_NOTIF_DRP_IE_RCVD: 811 - uwb_drp_process_all(rc, drp_evt, ielength, src_dev); 812 - break; 813 - case UWB_DRP_NOTIF_CONFLICT: 814 - uwb_drp_process_conflict_all(rc, drp_evt, ielength, src_dev); 815 - break; 816 - case UWB_DRP_NOTIF_TERMINATE: 817 - uwb_drp_terminate_all(rc, src_dev); 818 - break; 819 - default: 820 - dev_warn(dev, "ignored DRP event with reason code: %d\n", reason); 821 - break; 822 - } 420 + /* We do not distinguish from the reason */ 421 + uwb_drp_process_all(rc, drp_evt, ielength, src_dev); 823 422 824 423 mutex_unlock(&rc->rsvs_mutex); 825 424
+382 -100
drivers/uwb/rsv.c
··· 17 17 */ 18 18 #include <linux/kernel.h> 19 19 #include <linux/uwb.h> 20 + #include <linux/random.h> 20 21 21 22 #include "uwb-internal.h" 22 23 23 24 static void uwb_rsv_timer(unsigned long arg); 24 25 25 26 static const char *rsv_states[] = { 26 - [UWB_RSV_STATE_NONE] = "none", 27 - [UWB_RSV_STATE_O_INITIATED] = "initiated", 28 - [UWB_RSV_STATE_O_PENDING] = "pending", 29 - [UWB_RSV_STATE_O_MODIFIED] = "modified", 30 - [UWB_RSV_STATE_O_ESTABLISHED] = "established", 31 - [UWB_RSV_STATE_T_ACCEPTED] = "accepted", 32 - [UWB_RSV_STATE_T_DENIED] = "denied", 33 - [UWB_RSV_STATE_T_PENDING] = "pending", 27 + [UWB_RSV_STATE_NONE] = "none ", 28 + [UWB_RSV_STATE_O_INITIATED] = "o initiated ", 29 + [UWB_RSV_STATE_O_PENDING] = "o pending ", 30 + [UWB_RSV_STATE_O_MODIFIED] = "o modified ", 31 + [UWB_RSV_STATE_O_ESTABLISHED] = "o established ", 32 + [UWB_RSV_STATE_O_TO_BE_MOVED] = "o to be moved ", 33 + [UWB_RSV_STATE_O_MOVE_EXPANDING] = "o move expanding", 34 + [UWB_RSV_STATE_O_MOVE_COMBINING] = "o move combining", 35 + [UWB_RSV_STATE_O_MOVE_REDUCING] = "o move reducing ", 36 + [UWB_RSV_STATE_T_ACCEPTED] = "t accepted ", 37 + [UWB_RSV_STATE_T_CONFLICT] = "t conflict ", 38 + [UWB_RSV_STATE_T_PENDING] = "t pending ", 39 + [UWB_RSV_STATE_T_DENIED] = "t denied ", 40 + [UWB_RSV_STATE_T_RESIZED] = "t resized ", 41 + [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = "t expanding acc ", 42 + [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = "t expanding conf", 43 + [UWB_RSV_STATE_T_EXPANDING_PENDING] = "t expanding pend", 44 + [UWB_RSV_STATE_T_EXPANDING_DENIED] = "t expanding den ", 34 45 }; 35 46 36 47 static const char *rsv_types[] = { ··· 51 40 [UWB_DRP_TYPE_PRIVATE] = "private", 52 41 [UWB_DRP_TYPE_PCA] = "pca", 53 42 }; 43 + 44 + bool uwb_rsv_has_two_drp_ies(struct uwb_rsv *rsv) 45 + { 46 + static const bool has_two_drp_ies[] = { 47 + [UWB_RSV_STATE_O_INITIATED] = false, 48 + [UWB_RSV_STATE_O_PENDING] = false, 49 + [UWB_RSV_STATE_O_MODIFIED] = false, 50 + [UWB_RSV_STATE_O_ESTABLISHED] = false, 51 + [UWB_RSV_STATE_O_TO_BE_MOVED] = false, 52 + [UWB_RSV_STATE_O_MOVE_COMBINING] = false, 53 + [UWB_RSV_STATE_O_MOVE_REDUCING] = false, 54 + [UWB_RSV_STATE_O_MOVE_EXPANDING] = true, 55 + [UWB_RSV_STATE_T_ACCEPTED] = false, 56 + [UWB_RSV_STATE_T_CONFLICT] = false, 57 + [UWB_RSV_STATE_T_PENDING] = false, 58 + [UWB_RSV_STATE_T_DENIED] = false, 59 + [UWB_RSV_STATE_T_RESIZED] = false, 60 + [UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = true, 61 + [UWB_RSV_STATE_T_EXPANDING_CONFLICT] = true, 62 + [UWB_RSV_STATE_T_EXPANDING_PENDING] = true, 63 + [UWB_RSV_STATE_T_EXPANDING_DENIED] = true, 64 + }; 65 + 66 + return has_two_drp_ies[rsv->state]; 67 + } 54 68 55 69 /** 56 70 * uwb_rsv_state_str - return a string for a reservation state ··· 101 65 } 102 66 EXPORT_SYMBOL_GPL(uwb_rsv_type_str); 103 67 104 - static void uwb_rsv_dump(struct uwb_rsv *rsv) 68 + void uwb_rsv_dump(char *text, struct uwb_rsv *rsv) 105 69 { 106 70 struct device *dev = &rsv->rc->uwb_dev.dev; 107 71 struct uwb_dev_addr devaddr; ··· 124 88 kfree(rsv); 125 89 } 126 90 127 - static void uwb_rsv_get(struct uwb_rsv *rsv) 91 + void uwb_rsv_get(struct uwb_rsv *rsv) 128 92 { 129 93 kref_get(&rsv->kref); 130 94 } 131 95 132 - static void uwb_rsv_put(struct uwb_rsv *rsv) 96 + void uwb_rsv_put(struct uwb_rsv *rsv) 133 97 { 134 98 kref_put(&rsv->kref, uwb_rsv_release); 135 99 } ··· 144 108 static int uwb_rsv_get_stream(struct uwb_rsv *rsv) 145 109 { 146 110 struct uwb_rc *rc = rsv->rc; 111 + struct device *dev = &rc->uwb_dev.dev; 147 112 unsigned long *streams_bm; 148 113 int stream; 149 114 ··· 166 129 rsv->stream = stream; 167 130 set_bit(stream, streams_bm); 168 131 132 + dev_dbg(dev, "get stream %d\n", rsv->stream); 133 + 169 134 return 0; 170 135 } 171 136 172 137 static void uwb_rsv_put_stream(struct uwb_rsv *rsv) 173 138 { 174 139 struct uwb_rc *rc = rsv->rc; 140 + struct device *dev = &rc->uwb_dev.dev; 175 141 unsigned long *streams_bm; 176 142 177 143 switch (rsv->target.type) { ··· 189 149 } 190 150 191 151 clear_bit(rsv->stream, streams_bm); 152 + 153 + dev_dbg(dev, "put stream %d\n", rsv->stream); 192 154 } 193 155 194 - /* 195 - * Generate a MAS allocation with a single row component. 196 - */ 197 - static void uwb_rsv_gen_alloc_row(struct uwb_mas_bm *mas, 198 - int first_mas, int mas_per_zone, 199 - int zs, int ze) 156 + void uwb_rsv_backoff_win_timer(unsigned long arg) 200 157 { 201 - struct uwb_mas_bm col; 202 - int z; 158 + struct uwb_drp_backoff_win *bow = (struct uwb_drp_backoff_win *)arg; 159 + struct uwb_rc *rc = container_of(bow, struct uwb_rc, bow); 160 + struct device *dev = &rc->uwb_dev.dev; 203 161 204 - bitmap_zero(mas->bm, UWB_NUM_MAS); 205 - bitmap_zero(col.bm, UWB_NUM_MAS); 206 - bitmap_fill(col.bm, mas_per_zone); 207 - bitmap_shift_left(col.bm, col.bm, first_mas + zs * UWB_MAS_PER_ZONE, UWB_NUM_MAS); 208 - 209 - for (z = zs; z <= ze; z++) { 210 - bitmap_or(mas->bm, mas->bm, col.bm, UWB_NUM_MAS); 211 - bitmap_shift_left(col.bm, col.bm, UWB_MAS_PER_ZONE, UWB_NUM_MAS); 162 + bow->can_reserve_extra_mases = true; 163 + if (bow->total_expired <= 4) { 164 + bow->total_expired++; 165 + } else { 166 + /* after 4 backoff window has expired we can exit from 167 + * the backoff procedure */ 168 + bow->total_expired = 0; 169 + bow->window = UWB_DRP_BACKOFF_WIN_MIN >> 1; 212 170 } 171 + dev_dbg(dev, "backoff_win_timer total_expired=%d, n=%d\n: ", bow->total_expired, bow->n); 172 + 173 + /* try to relocate all the "to be moved" relocations */ 174 + uwb_rsv_handle_drp_avail_change(rc); 213 175 } 214 176 215 - /* 216 - * Allocate some MAS for this reservation based on current local 217 - * availability, the reservation parameters (max_mas, min_mas, 218 - * sparsity), and the WiMedia rules for MAS allocations. 219 - * 220 - * Returns -EBUSY is insufficient free MAS are available. 221 - * 222 - * FIXME: to simplify this, only safe reservations with a single row 223 - * component in zones 1 to 15 are tried (zone 0 is skipped to avoid 224 - * problems with the MAS reserved for the BP). 225 - * 226 - * [ECMA-368] section B.2. 227 - */ 228 - static int uwb_rsv_alloc_mas(struct uwb_rsv *rsv) 177 + void uwb_rsv_backoff_win_increment(struct uwb_rc *rc) 229 178 { 230 - static const int safe_mas_in_row[UWB_NUM_ZONES] = { 231 - 8, 7, 6, 5, 4, 4, 4, 4, 4, 4, 4, 4, 4, 3, 2, 1, 232 - }; 233 - int n, r; 234 - struct uwb_mas_bm mas; 235 - bool found = false; 179 + struct uwb_drp_backoff_win *bow = &rc->bow; 180 + struct device *dev = &rc->uwb_dev.dev; 181 + unsigned timeout_us; 236 182 237 - /* 238 - * Search all valid safe allocations until either: too few MAS 239 - * are available; or the smallest allocation with sufficient 240 - * MAS is found. 241 - * 242 - * The top of the zones are preferred, so space for larger 243 - * allocations is available in the bottom of the zone (e.g., a 244 - * 15 MAS allocation should start in row 14 leaving space for 245 - * a 120 MAS allocation at row 0). 246 - */ 247 - for (n = safe_mas_in_row[0]; n >= 1; n--) { 248 - int num_mas; 183 + dev_dbg(dev, "backoff_win_increment: window=%d\n", bow->window); 249 184 250 - num_mas = n * (UWB_NUM_ZONES - 1); 251 - if (num_mas < rsv->min_mas) 252 - break; 253 - if (found && num_mas < rsv->max_mas) 254 - break; 185 + bow->can_reserve_extra_mases = false; 255 186 256 - for (r = UWB_MAS_PER_ZONE-1; r >= 0; r--) { 257 - if (safe_mas_in_row[r] < n) 258 - continue; 259 - uwb_rsv_gen_alloc_row(&mas, r, n, 1, UWB_NUM_ZONES); 260 - if (uwb_drp_avail_reserve_pending(rsv->rc, &mas) == 0) { 261 - found = true; 262 - break; 263 - } 264 - } 265 - } 187 + if((bow->window << 1) == UWB_DRP_BACKOFF_WIN_MAX) 188 + return; 266 189 267 - if (!found) 268 - return -EBUSY; 190 + bow->window <<= 1; 191 + bow->n = random32() & (bow->window - 1); 192 + dev_dbg(dev, "new_window=%d, n=%d\n: ", bow->window, bow->n); 269 193 270 - bitmap_copy(rsv->mas.bm, mas.bm, UWB_NUM_MAS); 271 - return 0; 194 + /* reset the timer associated variables */ 195 + timeout_us = bow->n * UWB_SUPERFRAME_LENGTH_US; 196 + bow->total_expired = 0; 197 + mod_timer(&bow->timer, jiffies + usecs_to_jiffies(timeout_us)); 272 198 } 273 199 274 200 static void uwb_rsv_stroke_timer(struct uwb_rsv *rsv) ··· 247 241 * received. 248 242 */ 249 243 if (rsv->is_multicast) { 250 - if (rsv->state == UWB_RSV_STATE_O_INITIATED) 244 + if (rsv->state == UWB_RSV_STATE_O_INITIATED 245 + || rsv->state == UWB_RSV_STATE_O_MOVE_EXPANDING 246 + || rsv->state == UWB_RSV_STATE_O_MOVE_COMBINING 247 + || rsv->state == UWB_RSV_STATE_O_MOVE_REDUCING) 251 248 sframes = 1; 252 249 if (rsv->state == UWB_RSV_STATE_O_ESTABLISHED) 253 250 sframes = 0; 251 + 254 252 } 255 253 256 - rsv->expired = false; 257 254 if (sframes > 0) { 258 255 /* 259 256 * Add an additional 2 superframes to account for the ··· 278 269 rsv->state = new_state; 279 270 rsv->ie_valid = false; 280 271 281 - uwb_rsv_dump(rsv); 272 + uwb_rsv_dump("SU", rsv); 282 273 283 274 uwb_rsv_stroke_timer(rsv); 284 275 uwb_rsv_sched_update(rsv->rc); ··· 292 283 293 284 void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state) 294 285 { 286 + struct uwb_rsv_move *mv = &rsv->mv; 287 + 295 288 if (rsv->state == new_state) { 296 289 switch (rsv->state) { 297 290 case UWB_RSV_STATE_O_ESTABLISHED: 291 + case UWB_RSV_STATE_O_MOVE_EXPANDING: 292 + case UWB_RSV_STATE_O_MOVE_COMBINING: 293 + case UWB_RSV_STATE_O_MOVE_REDUCING: 298 294 case UWB_RSV_STATE_T_ACCEPTED: 295 + case UWB_RSV_STATE_T_EXPANDING_ACCEPTED: 296 + case UWB_RSV_STATE_T_RESIZED: 299 297 case UWB_RSV_STATE_NONE: 300 298 uwb_rsv_stroke_timer(rsv); 301 299 break; ··· 314 298 return; 315 299 } 316 300 301 + uwb_rsv_dump("SC", rsv); 302 + 317 303 switch (new_state) { 318 304 case UWB_RSV_STATE_NONE: 319 - uwb_drp_avail_release(rsv->rc, &rsv->mas); 320 - if (uwb_rsv_is_owner(rsv)) 321 - uwb_rsv_put_stream(rsv); 322 305 uwb_rsv_state_update(rsv, UWB_RSV_STATE_NONE); 323 306 uwb_rsv_callback(rsv); 324 307 break; ··· 327 312 case UWB_RSV_STATE_O_PENDING: 328 313 uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_PENDING); 329 314 break; 315 + case UWB_RSV_STATE_O_MODIFIED: 316 + /* in the companion there are the MASes to drop */ 317 + bitmap_andnot(rsv->mas.bm, rsv->mas.bm, mv->companion_mas.bm, UWB_NUM_MAS); 318 + uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MODIFIED); 319 + break; 330 320 case UWB_RSV_STATE_O_ESTABLISHED: 321 + if (rsv->state == UWB_RSV_STATE_O_MODIFIED 322 + || rsv->state == UWB_RSV_STATE_O_MOVE_REDUCING) { 323 + uwb_drp_avail_release(rsv->rc, &mv->companion_mas); 324 + rsv->needs_release_companion_mas = false; 325 + } 331 326 uwb_drp_avail_reserve(rsv->rc, &rsv->mas); 332 327 uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_ESTABLISHED); 333 328 uwb_rsv_callback(rsv); 334 329 break; 330 + case UWB_RSV_STATE_O_MOVE_EXPANDING: 331 + rsv->needs_release_companion_mas = true; 332 + uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MOVE_EXPANDING); 333 + break; 334 + case UWB_RSV_STATE_O_MOVE_COMBINING: 335 + rsv->needs_release_companion_mas = false; 336 + uwb_drp_avail_reserve(rsv->rc, &mv->companion_mas); 337 + bitmap_or(rsv->mas.bm, rsv->mas.bm, mv->companion_mas.bm, UWB_NUM_MAS); 338 + rsv->mas.safe += mv->companion_mas.safe; 339 + rsv->mas.unsafe += mv->companion_mas.unsafe; 340 + uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MOVE_COMBINING); 341 + break; 342 + case UWB_RSV_STATE_O_MOVE_REDUCING: 343 + bitmap_andnot(mv->companion_mas.bm, rsv->mas.bm, mv->final_mas.bm, UWB_NUM_MAS); 344 + rsv->needs_release_companion_mas = true; 345 + rsv->mas.safe = mv->final_mas.safe; 346 + rsv->mas.unsafe = mv->final_mas.unsafe; 347 + bitmap_copy(rsv->mas.bm, mv->final_mas.bm, UWB_NUM_MAS); 348 + bitmap_copy(rsv->mas.unsafe_bm, mv->final_mas.unsafe_bm, UWB_NUM_MAS); 349 + uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MOVE_REDUCING); 350 + break; 335 351 case UWB_RSV_STATE_T_ACCEPTED: 352 + case UWB_RSV_STATE_T_RESIZED: 353 + rsv->needs_release_companion_mas = false; 336 354 uwb_drp_avail_reserve(rsv->rc, &rsv->mas); 337 355 uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_ACCEPTED); 338 356 uwb_rsv_callback(rsv); ··· 373 325 case UWB_RSV_STATE_T_DENIED: 374 326 uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_DENIED); 375 327 break; 328 + case UWB_RSV_STATE_T_CONFLICT: 329 + uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_CONFLICT); 330 + break; 331 + case UWB_RSV_STATE_T_PENDING: 332 + uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_PENDING); 333 + break; 334 + case UWB_RSV_STATE_T_EXPANDING_ACCEPTED: 335 + rsv->needs_release_companion_mas = true; 336 + uwb_drp_avail_reserve(rsv->rc, &mv->companion_mas); 337 + uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_EXPANDING_ACCEPTED); 338 + break; 376 339 default: 377 340 dev_err(&rsv->rc->uwb_dev.dev, "unhandled state: %s (%d)\n", 378 341 uwb_rsv_state_str(new_state), new_state); 379 342 } 343 + } 344 + 345 + static void uwb_rsv_handle_timeout_work(struct work_struct *work) 346 + { 347 + struct uwb_rsv *rsv = container_of(work, struct uwb_rsv, 348 + handle_timeout_work); 349 + struct uwb_rc *rc = rsv->rc; 350 + 351 + mutex_lock(&rc->rsvs_mutex); 352 + 353 + uwb_rsv_dump("TO", rsv); 354 + 355 + switch (rsv->state) { 356 + case UWB_RSV_STATE_O_INITIATED: 357 + if (rsv->is_multicast) { 358 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); 359 + goto unlock; 360 + } 361 + break; 362 + case UWB_RSV_STATE_O_MOVE_EXPANDING: 363 + if (rsv->is_multicast) { 364 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_COMBINING); 365 + goto unlock; 366 + } 367 + break; 368 + case UWB_RSV_STATE_O_MOVE_COMBINING: 369 + if (rsv->is_multicast) { 370 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_REDUCING); 371 + goto unlock; 372 + } 373 + break; 374 + case UWB_RSV_STATE_O_MOVE_REDUCING: 375 + if (rsv->is_multicast) { 376 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED); 377 + goto unlock; 378 + } 379 + break; 380 + case UWB_RSV_STATE_O_ESTABLISHED: 381 + if (rsv->is_multicast) 382 + goto unlock; 383 + break; 384 + case UWB_RSV_STATE_T_EXPANDING_ACCEPTED: 385 + /* 386 + * The time out could be for the main or of the 387 + * companion DRP, assume it's for the companion and 388 + * drop that first. A further time out is required to 389 + * drop the main. 390 + */ 391 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED); 392 + uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas); 393 + goto unlock; 394 + default: 395 + break; 396 + } 397 + 398 + uwb_rsv_remove(rsv); 399 + 400 + unlock: 401 + mutex_unlock(&rc->rsvs_mutex); 380 402 } 381 403 382 404 static struct uwb_rsv *uwb_rsv_alloc(struct uwb_rc *rc) ··· 465 347 rsv->timer.data = (unsigned long)rsv; 466 348 467 349 rsv->rc = rc; 350 + INIT_WORK(&rsv->handle_timeout_work, uwb_rsv_handle_timeout_work); 468 351 469 352 return rsv; 470 353 } ··· 500 381 501 382 void uwb_rsv_remove(struct uwb_rsv *rsv) 502 383 { 384 + uwb_rsv_dump("RM", rsv); 385 + 503 386 if (rsv->state != UWB_RSV_STATE_NONE) 504 387 uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE); 388 + 389 + if (rsv->needs_release_companion_mas) 390 + uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas); 391 + uwb_drp_avail_release(rsv->rc, &rsv->mas); 392 + 393 + if (uwb_rsv_is_owner(rsv)) 394 + uwb_rsv_put_stream(rsv); 395 + 505 396 del_timer_sync(&rsv->timer); 506 397 uwb_dev_put(rsv->owner); 507 398 if (rsv->target.type == UWB_RSV_TARGET_DEV) ··· 538 409 * @rsv: the reservation 539 410 * 540 411 * The PAL should fill in @rsv's owner, target, type, max_mas, 541 - * min_mas, sparsity and is_multicast fields. If the target is a 412 + * min_mas, max_interval and is_multicast fields. If the target is a 542 413 * uwb_dev it must be referenced. 543 414 * 544 415 * The reservation's callback will be called when the reservation is ··· 547 418 int uwb_rsv_establish(struct uwb_rsv *rsv) 548 419 { 549 420 struct uwb_rc *rc = rsv->rc; 421 + struct uwb_mas_bm available; 550 422 int ret; 551 423 552 424 mutex_lock(&rc->rsvs_mutex); 553 - 554 425 ret = uwb_rsv_get_stream(rsv); 555 426 if (ret) 556 427 goto out; 557 428 558 - ret = uwb_rsv_alloc_mas(rsv); 559 - if (ret) { 429 + rsv->tiebreaker = random32() & 1; 430 + /* get available mas bitmap */ 431 + uwb_drp_available(rc, &available); 432 + 433 + ret = uwb_rsv_find_best_allocation(rsv, &available, &rsv->mas); 434 + if (ret == UWB_RSV_ALLOC_NOT_FOUND) { 435 + ret = -EBUSY; 436 + uwb_rsv_put_stream(rsv); 437 + goto out; 438 + } 439 + 440 + ret = uwb_drp_avail_reserve_pending(rc, &rsv->mas); 441 + if (ret != 0) { 560 442 uwb_rsv_put_stream(rsv); 561 443 goto out; 562 444 } ··· 588 448 * @rsv: the reservation to modify 589 449 * @max_mas: new maximum MAS to reserve 590 450 * @min_mas: new minimum MAS to reserve 591 - * @sparsity: new sparsity to use 451 + * @max_interval: new max_interval to use 592 452 * 593 453 * FIXME: implement this once there are PALs that use it. 594 454 */ 595 - int uwb_rsv_modify(struct uwb_rsv *rsv, int max_mas, int min_mas, int sparsity) 455 + int uwb_rsv_modify(struct uwb_rsv *rsv, int max_mas, int min_mas, int max_interval) 596 456 { 597 457 return -ENOSYS; 598 458 } 599 459 EXPORT_SYMBOL_GPL(uwb_rsv_modify); 460 + 461 + /* 462 + * move an already established reservation (rc->rsvs_mutex must to be 463 + * taken when tis function is called) 464 + */ 465 + int uwb_rsv_try_move(struct uwb_rsv *rsv, struct uwb_mas_bm *available) 466 + { 467 + struct uwb_rc *rc = rsv->rc; 468 + struct uwb_drp_backoff_win *bow = &rc->bow; 469 + struct device *dev = &rc->uwb_dev.dev; 470 + struct uwb_rsv_move *mv; 471 + int ret = 0; 472 + 473 + if (bow->can_reserve_extra_mases == false) 474 + return -EBUSY; 475 + 476 + mv = &rsv->mv; 477 + 478 + if (uwb_rsv_find_best_allocation(rsv, available, &mv->final_mas) == UWB_RSV_ALLOC_FOUND) { 479 + 480 + if (!bitmap_equal(rsv->mas.bm, mv->final_mas.bm, UWB_NUM_MAS)) { 481 + /* We want to move the reservation */ 482 + bitmap_andnot(mv->companion_mas.bm, mv->final_mas.bm, rsv->mas.bm, UWB_NUM_MAS); 483 + uwb_drp_avail_reserve_pending(rc, &mv->companion_mas); 484 + uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_EXPANDING); 485 + } 486 + } else { 487 + dev_dbg(dev, "new allocation not found\n"); 488 + } 489 + 490 + return ret; 491 + } 492 + 493 + /* It will try to move every reservation in state O_ESTABLISHED giving 494 + * to the MAS allocator algorithm an availability that is the real one 495 + * plus the allocation already established from the reservation. */ 496 + void uwb_rsv_handle_drp_avail_change(struct uwb_rc *rc) 497 + { 498 + struct uwb_drp_backoff_win *bow = &rc->bow; 499 + struct uwb_rsv *rsv; 500 + struct uwb_mas_bm mas; 501 + 502 + if (bow->can_reserve_extra_mases == false) 503 + return; 504 + 505 + list_for_each_entry(rsv, &rc->reservations, rc_node) { 506 + if (rsv->state == UWB_RSV_STATE_O_ESTABLISHED || 507 + rsv->state == UWB_RSV_STATE_O_TO_BE_MOVED) { 508 + uwb_drp_available(rc, &mas); 509 + bitmap_or(mas.bm, mas.bm, rsv->mas.bm, UWB_NUM_MAS); 510 + uwb_rsv_try_move(rsv, &mas); 511 + } 512 + } 513 + 514 + } 600 515 601 516 /** 602 517 * uwb_rsv_terminate - terminate an established reservation ··· 741 546 uwb_dev_get(rsv->owner); 742 547 rsv->target.type = UWB_RSV_TARGET_DEV; 743 548 rsv->target.dev = &rc->uwb_dev; 549 + uwb_dev_get(&rc->uwb_dev); 744 550 rsv->type = uwb_ie_drp_type(drp_ie); 745 551 rsv->stream = uwb_ie_drp_stream_index(drp_ie); 746 552 uwb_drp_ie_to_bm(&rsv->mas, drp_ie); ··· 763 567 list_add_tail(&rsv->rc_node, &rc->reservations); 764 568 state = rsv->state; 765 569 rsv->state = UWB_RSV_STATE_NONE; 766 - uwb_rsv_set_state(rsv, state); 570 + 571 + /* FIXME: do something sensible here */ 572 + if (state == UWB_RSV_STATE_T_ACCEPTED 573 + && uwb_drp_avail_reserve_pending(rc, &rsv->mas) == -EBUSY) { 574 + /* FIXME: do something sensible here */ 575 + } else { 576 + uwb_rsv_set_state(rsv, state); 577 + } 767 578 768 579 return rsv; 769 580 } 581 + 582 + /** 583 + * uwb_rsv_get_usable_mas - get the bitmap of the usable MAS of a reservations 584 + * @rsv: the reservation. 585 + * @mas: returns the available MAS. 586 + * 587 + * The usable MAS of a reservation may be less than the negotiated MAS 588 + * if alien BPs are present. 589 + */ 590 + void uwb_rsv_get_usable_mas(struct uwb_rsv *rsv, struct uwb_mas_bm *mas) 591 + { 592 + bitmap_zero(mas->bm, UWB_NUM_MAS); 593 + bitmap_andnot(mas->bm, rsv->mas.bm, rsv->rc->cnflt_alien_bitmap.bm, UWB_NUM_MAS); 594 + } 595 + EXPORT_SYMBOL_GPL(uwb_rsv_get_usable_mas); 770 596 771 597 /** 772 598 * uwb_rsv_find - find a reservation for a received DRP IE. ··· 829 611 bool ie_updated = false; 830 612 831 613 list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) { 832 - if (rsv->expired) 833 - uwb_drp_handle_timeout(rsv); 834 614 if (!rsv->ie_valid) { 835 615 uwb_drp_ie_update(rsv); 836 616 ie_updated = true; ··· 838 622 return ie_updated; 839 623 } 840 624 625 + void uwb_rsv_queue_update(struct uwb_rc *rc) 626 + { 627 + unsigned long delay_us = UWB_MAS_LENGTH_US * UWB_MAS_PER_ZONE; 628 + 629 + queue_delayed_work(rc->rsv_workq, &rc->rsv_update_work, usecs_to_jiffies(delay_us)); 630 + } 631 + 632 + /** 633 + * uwb_rsv_sched_update - schedule an update of the DRP IEs 634 + * @rc: the radio controller. 635 + * 636 + * To improve performance and ensure correctness with [ECMA-368] the 637 + * number of SET-DRP-IE commands that are done are limited. 638 + * 639 + * DRP IEs update come from two sources: DRP events from the hardware 640 + * which all occur at the beginning of the superframe ('syncronous' 641 + * events) and reservation establishment/termination requests from 642 + * PALs or timers ('asynchronous' events). 643 + * 644 + * A delayed work ensures that all the synchronous events result in 645 + * one SET-DRP-IE command. 646 + * 647 + * Additional logic (the set_drp_ie_pending and rsv_updated_postponed 648 + * flags) will prevent an asynchrous event starting a SET-DRP-IE 649 + * command if one is currently awaiting a response. 650 + * 651 + * FIXME: this does leave a window where an asynchrous event can delay 652 + * the SET-DRP-IE for a synchronous event by one superframe. 653 + */ 841 654 void uwb_rsv_sched_update(struct uwb_rc *rc) 842 655 { 843 - queue_work(rc->rsv_workq, &rc->rsv_update_work); 656 + spin_lock(&rc->rsvs_lock); 657 + if (!delayed_work_pending(&rc->rsv_update_work)) { 658 + if (rc->set_drp_ie_pending > 0) { 659 + rc->set_drp_ie_pending++; 660 + goto unlock; 661 + } 662 + uwb_rsv_queue_update(rc); 663 + } 664 + unlock: 665 + spin_unlock(&rc->rsvs_lock); 844 666 } 845 667 846 668 /* ··· 887 633 */ 888 634 static void uwb_rsv_update_work(struct work_struct *work) 889 635 { 890 - struct uwb_rc *rc = container_of(work, struct uwb_rc, rsv_update_work); 636 + struct uwb_rc *rc = container_of(work, struct uwb_rc, 637 + rsv_update_work.work); 891 638 bool ie_updated; 892 639 893 640 mutex_lock(&rc->rsvs_mutex); ··· 900 645 ie_updated = true; 901 646 } 902 647 903 - if (ie_updated) 648 + if (ie_updated && (rc->set_drp_ie_pending == 0)) 904 649 uwb_rc_send_all_drp_ie(rc); 650 + 651 + mutex_unlock(&rc->rsvs_mutex); 652 + } 653 + 654 + static void uwb_rsv_alien_bp_work(struct work_struct *work) 655 + { 656 + struct uwb_rc *rc = container_of(work, struct uwb_rc, 657 + rsv_alien_bp_work.work); 658 + struct uwb_rsv *rsv; 659 + 660 + mutex_lock(&rc->rsvs_mutex); 661 + 662 + list_for_each_entry(rsv, &rc->reservations, rc_node) { 663 + if (rsv->type != UWB_DRP_TYPE_ALIEN_BP) { 664 + rsv->callback(rsv); 665 + } 666 + } 905 667 906 668 mutex_unlock(&rc->rsvs_mutex); 907 669 } ··· 927 655 { 928 656 struct uwb_rsv *rsv = (struct uwb_rsv *)arg; 929 657 930 - rsv->expired = true; 931 - uwb_rsv_sched_update(rsv->rc); 658 + queue_work(rsv->rc->rsv_workq, &rsv->handle_timeout_work); 932 659 } 933 660 934 661 /** ··· 944 673 list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) { 945 674 uwb_rsv_remove(rsv); 946 675 } 676 + /* Cancel any postponed update. */ 677 + rc->set_drp_ie_pending = 0; 947 678 mutex_unlock(&rc->rsvs_mutex); 948 679 949 - cancel_work_sync(&rc->rsv_update_work); 680 + cancel_delayed_work_sync(&rc->rsv_update_work); 950 681 } 951 682 952 683 void uwb_rsv_init(struct uwb_rc *rc) 953 684 { 954 685 INIT_LIST_HEAD(&rc->reservations); 686 + INIT_LIST_HEAD(&rc->cnflt_alien_list); 955 687 mutex_init(&rc->rsvs_mutex); 956 - INIT_WORK(&rc->rsv_update_work, uwb_rsv_update_work); 688 + spin_lock_init(&rc->rsvs_lock); 689 + INIT_DELAYED_WORK(&rc->rsv_update_work, uwb_rsv_update_work); 690 + INIT_DELAYED_WORK(&rc->rsv_alien_bp_work, uwb_rsv_alien_bp_work); 691 + rc->bow.can_reserve_extra_mases = true; 692 + rc->bow.total_expired = 0; 693 + rc->bow.window = UWB_DRP_BACKOFF_WIN_MIN >> 1; 694 + init_timer(&rc->bow.timer); 695 + rc->bow.timer.function = uwb_rsv_backoff_win_timer; 696 + rc->bow.timer.data = (unsigned long)&rc->bow; 957 697 958 698 bitmap_complement(rc->uwb_dev.streams, rc->uwb_dev.streams, UWB_NUM_STREAMS); 959 699 }
+26 -23
drivers/uwb/uwb-debug.c
··· 82 82 struct dentry *reservations_f; 83 83 struct dentry *accept_f; 84 84 struct dentry *drp_avail_f; 85 + spinlock_t list_lock; 85 86 }; 86 87 87 88 static struct dentry *root_dir; 88 89 89 90 static void uwb_dbg_rsv_cb(struct uwb_rsv *rsv) 90 91 { 91 - struct uwb_rc *rc = rsv->rc; 92 - struct device *dev = &rc->uwb_dev.dev; 93 - struct uwb_dev_addr devaddr; 94 - char owner[UWB_ADDR_STRSIZE], target[UWB_ADDR_STRSIZE]; 92 + struct uwb_dbg *dbg = rsv->pal_priv; 95 93 96 - uwb_dev_addr_print(owner, sizeof(owner), &rsv->owner->dev_addr); 97 - if (rsv->target.type == UWB_RSV_TARGET_DEV) 98 - devaddr = rsv->target.dev->dev_addr; 99 - else 100 - devaddr = rsv->target.devaddr; 101 - uwb_dev_addr_print(target, sizeof(target), &devaddr); 102 - 103 - dev_dbg(dev, "debug: rsv %s -> %s: %s\n", 104 - owner, target, uwb_rsv_state_str(rsv->state)); 94 + uwb_rsv_dump("debug", rsv); 105 95 106 96 if (rsv->state == UWB_RSV_STATE_NONE) { 97 + spin_lock(&dbg->list_lock); 107 98 list_del(&rsv->pal_node); 99 + spin_unlock(&dbg->list_lock); 108 100 uwb_rsv_destroy(rsv); 109 101 } 110 102 } ··· 120 128 return -ENOMEM; 121 129 } 122 130 123 - rsv->owner = &rc->uwb_dev; 124 - rsv->target.type = UWB_RSV_TARGET_DEV; 125 - rsv->target.dev = target; 126 - rsv->type = cmd->type; 127 - rsv->max_mas = cmd->max_mas; 128 - rsv->min_mas = cmd->min_mas; 129 - rsv->sparsity = cmd->sparsity; 131 + rsv->target.type = UWB_RSV_TARGET_DEV; 132 + rsv->target.dev = target; 133 + rsv->type = cmd->type; 134 + rsv->max_mas = cmd->max_mas; 135 + rsv->min_mas = cmd->min_mas; 136 + rsv->max_interval = cmd->max_interval; 130 137 131 138 ret = uwb_rsv_establish(rsv); 132 139 if (ret) 133 140 uwb_rsv_destroy(rsv); 134 - else 141 + else { 142 + spin_lock(&(rc->dbg)->list_lock); 135 143 list_add_tail(&rsv->pal_node, &rc->dbg->rsvs); 136 - 144 + spin_unlock(&(rc->dbg)->list_lock); 145 + } 137 146 return ret; 138 147 } 139 148 ··· 144 151 struct uwb_rsv *rsv, *found = NULL; 145 152 int i = 0; 146 153 154 + spin_lock(&(rc->dbg)->list_lock); 155 + 147 156 list_for_each_entry(rsv, &rc->dbg->rsvs, pal_node) { 148 157 if (i == cmd->index) { 149 158 found = rsv; 159 + uwb_rsv_get(found); 150 160 break; 151 161 } 152 162 i++; 153 163 } 164 + 165 + spin_unlock(&(rc->dbg)->list_lock); 166 + 154 167 if (!found) 155 168 return -EINVAL; 156 169 157 170 uwb_rsv_terminate(found); 171 + uwb_rsv_put(found); 158 172 159 173 return 0; 160 174 } ··· 191 191 struct uwb_rc *rc = file->private_data; 192 192 struct uwb_dbg_cmd cmd; 193 193 int ret = 0; 194 - 194 + 195 195 if (len != sizeof(struct uwb_dbg_cmd)) 196 196 return -EINVAL; 197 197 ··· 325 325 struct uwb_dbg *dbg = container_of(pal, struct uwb_dbg, pal); 326 326 327 327 if (dbg->accept) { 328 + spin_lock(&dbg->list_lock); 328 329 list_add_tail(&rsv->pal_node, &dbg->rsvs); 330 + spin_unlock(&dbg->list_lock); 329 331 uwb_rsv_accept(rsv, uwb_dbg_rsv_cb, dbg); 330 332 } 331 333 } ··· 343 341 return; 344 342 345 343 INIT_LIST_HEAD(&rc->dbg->rsvs); 344 + spin_lock_init(&(rc->dbg)->list_lock); 346 345 347 346 uwb_pal_init(&rc->dbg->pal); 348 347 rc->dbg->pal.rc = rc;
+79 -1
drivers/uwb/uwb-internal.h
··· 92 92 93 93 struct uwb_rc_neh; 94 94 95 + extern int uwb_rc_cmd_async(struct uwb_rc *rc, const char *cmd_name, 96 + struct uwb_rccb *cmd, size_t cmd_size, 97 + u8 expected_type, u16 expected_event, 98 + uwb_rc_cmd_cb_f cb, void *arg); 99 + 100 + 95 101 void uwb_rc_neh_create(struct uwb_rc *rc); 96 102 void uwb_rc_neh_destroy(struct uwb_rc *rc); 97 103 ··· 112 106 extern int uwb_est_create(void); 113 107 extern void uwb_est_destroy(void); 114 108 109 + /* 110 + * UWB conflicting alien reservations 111 + */ 112 + struct uwb_cnflt_alien { 113 + struct uwb_rc *rc; 114 + struct list_head rc_node; 115 + struct uwb_mas_bm mas; 116 + struct timer_list timer; 117 + struct work_struct cnflt_update_work; 118 + }; 115 119 120 + enum uwb_uwb_rsv_alloc_result { 121 + UWB_RSV_ALLOC_FOUND = 0, 122 + UWB_RSV_ALLOC_NOT_FOUND, 123 + }; 124 + 125 + enum uwb_rsv_mas_status { 126 + UWB_RSV_MAS_NOT_AVAIL = 1, 127 + UWB_RSV_MAS_SAFE, 128 + UWB_RSV_MAS_UNSAFE, 129 + }; 130 + 131 + struct uwb_rsv_col_set_info { 132 + unsigned char start_col; 133 + unsigned char interval; 134 + unsigned char safe_mas_per_col; 135 + unsigned char unsafe_mas_per_col; 136 + }; 137 + 138 + struct uwb_rsv_col_info { 139 + unsigned char max_avail_safe; 140 + unsigned char max_avail_unsafe; 141 + unsigned char highest_mas[UWB_MAS_PER_ZONE]; 142 + struct uwb_rsv_col_set_info csi; 143 + }; 144 + 145 + struct uwb_rsv_row_info { 146 + unsigned char avail[UWB_MAS_PER_ZONE]; 147 + unsigned char free_rows; 148 + unsigned char used_rows; 149 + }; 150 + 151 + /* 152 + * UWB find allocation 153 + */ 154 + struct uwb_rsv_alloc_info { 155 + unsigned char bm[UWB_MAS_PER_ZONE * UWB_NUM_ZONES]; 156 + struct uwb_rsv_col_info ci[UWB_NUM_ZONES]; 157 + struct uwb_rsv_row_info ri; 158 + struct uwb_mas_bm *not_available; 159 + struct uwb_mas_bm *result; 160 + int min_mas; 161 + int max_mas; 162 + int max_interval; 163 + int total_allocated_mases; 164 + int safe_allocated_mases; 165 + int unsafe_allocated_mases; 166 + int interval; 167 + }; 168 + 169 + int uwb_rsv_find_best_allocation(struct uwb_rsv *rsv, struct uwb_mas_bm *available, 170 + struct uwb_mas_bm *result); 171 + void uwb_rsv_handle_drp_avail_change(struct uwb_rc *rc); 116 172 /* 117 173 * UWB Events & management daemon 118 174 */ ··· 322 254 int uwb_rsv_setup(struct uwb_rc *rc); 323 255 void uwb_rsv_cleanup(struct uwb_rc *rc); 324 256 void uwb_rsv_remove_all(struct uwb_rc *rc); 257 + void uwb_rsv_get(struct uwb_rsv *rsv); 258 + void uwb_rsv_put(struct uwb_rsv *rsv); 259 + bool uwb_rsv_has_two_drp_ies(struct uwb_rsv *rsv); 260 + void uwb_rsv_dump(char *text, struct uwb_rsv *rsv); 261 + int uwb_rsv_try_move(struct uwb_rsv *rsv, struct uwb_mas_bm *available); 262 + void uwb_rsv_backoff_win_timer(unsigned long arg); 263 + void uwb_rsv_backoff_win_increment(struct uwb_rc *rc); 264 + int uwb_rsv_status(struct uwb_rsv *rsv); 265 + int uwb_rsv_companion_status(struct uwb_rsv *rsv); 325 266 326 267 void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state); 327 268 void uwb_rsv_remove(struct uwb_rsv *rsv); 328 269 struct uwb_rsv *uwb_rsv_find(struct uwb_rc *rc, struct uwb_dev *src, 329 270 struct uwb_ie_drp *drp_ie); 330 271 void uwb_rsv_sched_update(struct uwb_rc *rc); 272 + void uwb_rsv_queue_update(struct uwb_rc *rc); 331 273 332 - void uwb_drp_handle_timeout(struct uwb_rsv *rsv); 333 274 int uwb_drp_ie_update(struct uwb_rsv *rsv); 334 275 void uwb_drp_ie_to_bm(struct uwb_mas_bm *bm, const struct uwb_ie_drp *drp_ie); 335 276 336 277 void uwb_drp_avail_init(struct uwb_rc *rc); 278 + void uwb_drp_available(struct uwb_rc *rc, struct uwb_mas_bm *avail); 337 279 int uwb_drp_avail_reserve_pending(struct uwb_rc *rc, struct uwb_mas_bm *mas); 338 280 void uwb_drp_avail_reserve(struct uwb_rc *rc, struct uwb_mas_bm *mas); 339 281 void uwb_drp_avail_release(struct uwb_rc *rc, struct uwb_mas_bm *mas);
+43 -4
include/linux/uwb.h
··· 67 67 struct uwb_dev_addr dev_addr; 68 68 int beacon_slot; 69 69 DECLARE_BITMAP(streams, UWB_NUM_STREAMS); 70 + DECLARE_BITMAP(last_availability_bm, UWB_NUM_MAS); 70 71 }; 71 72 #define to_uwb_dev(d) container_of(d, struct uwb_dev, dev) 72 73 ··· 110 109 */ 111 110 struct uwb_mas_bm { 112 111 DECLARE_BITMAP(bm, UWB_NUM_MAS); 112 + DECLARE_BITMAP(unsafe_bm, UWB_NUM_MAS); 113 + int safe; 114 + int unsafe; 113 115 }; 114 116 115 117 /** ··· 138 134 * FIXME: further target states TBD. 139 135 */ 140 136 enum uwb_rsv_state { 141 - UWB_RSV_STATE_NONE, 137 + UWB_RSV_STATE_NONE = 0, 142 138 UWB_RSV_STATE_O_INITIATED, 143 139 UWB_RSV_STATE_O_PENDING, 144 140 UWB_RSV_STATE_O_MODIFIED, 145 141 UWB_RSV_STATE_O_ESTABLISHED, 142 + UWB_RSV_STATE_O_TO_BE_MOVED, 143 + UWB_RSV_STATE_O_MOVE_EXPANDING, 144 + UWB_RSV_STATE_O_MOVE_COMBINING, 145 + UWB_RSV_STATE_O_MOVE_REDUCING, 146 146 UWB_RSV_STATE_T_ACCEPTED, 147 147 UWB_RSV_STATE_T_DENIED, 148 + UWB_RSV_STATE_T_CONFLICT, 148 149 UWB_RSV_STATE_T_PENDING, 150 + UWB_RSV_STATE_T_EXPANDING_ACCEPTED, 151 + UWB_RSV_STATE_T_EXPANDING_CONFLICT, 152 + UWB_RSV_STATE_T_EXPANDING_PENDING, 153 + UWB_RSV_STATE_T_EXPANDING_DENIED, 154 + UWB_RSV_STATE_T_RESIZED, 149 155 150 156 UWB_RSV_STATE_LAST, 151 157 }; ··· 178 164 struct uwb_dev *dev; 179 165 struct uwb_dev_addr devaddr; 180 166 }; 167 + }; 168 + 169 + struct uwb_rsv_move { 170 + struct uwb_mas_bm final_mas; 171 + struct uwb_ie_drp *companion_drp_ie; 172 + struct uwb_mas_bm companion_mas; 181 173 }; 182 174 183 175 /* ··· 223 203 * 224 204 * @status: negotiation status 225 205 * @stream: stream index allocated for this reservation 206 + * @tiebreaker: conflict tiebreaker for this reservation 226 207 * @mas: reserved MAS 227 208 * @drp_ie: the DRP IE 228 209 * @ie_valid: true iff the DRP IE matches the reservation parameters ··· 246 225 enum uwb_drp_type type; 247 226 int max_mas; 248 227 int min_mas; 249 - int sparsity; 228 + int max_interval; 250 229 bool is_multicast; 251 230 252 231 uwb_rsv_cb_f callback; 253 232 void *pal_priv; 254 233 255 234 enum uwb_rsv_state state; 235 + bool needs_release_companion_mas; 256 236 u8 stream; 237 + u8 tiebreaker; 257 238 struct uwb_mas_bm mas; 258 239 struct uwb_ie_drp *drp_ie; 240 + struct uwb_rsv_move mv; 259 241 bool ie_valid; 260 242 struct timer_list timer; 261 - bool expired; 243 + struct work_struct handle_timeout_work; 262 244 }; 263 245 264 246 static const ··· 303 279 bool ie_valid; 304 280 }; 305 281 282 + struct uwb_drp_backoff_win { 283 + u8 window; 284 + u8 n; 285 + int total_expired; 286 + struct timer_list timer; 287 + bool can_reserve_extra_mases; 288 + }; 306 289 307 290 const char *uwb_rsv_state_str(enum uwb_rsv_state state); 308 291 const char *uwb_rsv_type_str(enum uwb_drp_type type); ··· 324 293 void uwb_rsv_terminate(struct uwb_rsv *rsv); 325 294 326 295 void uwb_rsv_accept(struct uwb_rsv *rsv, uwb_rsv_cb_f cb, void *pal_priv); 296 + 297 + void uwb_rsv_get_usable_mas(struct uwb_rsv *orig_rsv, struct uwb_mas_bm *mas); 327 298 328 299 /** 329 300 * Radio Control Interface instance ··· 397 364 398 365 struct uwbd uwbd; 399 366 367 + struct uwb_drp_backoff_win bow; 400 368 struct uwb_drp_avail drp_avail; 401 369 struct list_head reservations; 370 + struct list_head cnflt_alien_list; 371 + struct uwb_mas_bm cnflt_alien_bitmap; 402 372 struct mutex rsvs_mutex; 373 + spinlock_t rsvs_lock; 403 374 struct workqueue_struct *rsv_workq; 404 - struct work_struct rsv_update_work; 405 375 376 + struct delayed_work rsv_update_work; 377 + struct delayed_work rsv_alien_bp_work; 378 + int set_drp_ie_pending; 406 379 struct mutex ies_mutex; 407 380 struct uwb_rc_cmd_set_ie *ies; 408 381 size_t ies_capacity;
+1 -1
include/linux/uwb/debug-cmd.h
··· 43 43 __u8 type; 44 44 __u16 max_mas; 45 45 __u16 min_mas; 46 - __u8 sparsity; 46 + __u8 max_interval; 47 47 }; 48 48 49 49 struct uwb_dbg_cmd_rsv_terminate {
+25
include/linux/uwb/spec.h
··· 59 59 #define UWB_MAS_PER_ZONE (UWB_NUM_MAS / UWB_NUM_ZONES) 60 60 61 61 /* 62 + * Number of MAS required before a row can be considered available. 63 + */ 64 + #define UWB_USABLE_MAS_PER_ROW (UWB_NUM_ZONES - 1) 65 + 66 + /* 62 67 * Number of streams per DRP reservation between a pair of devices. 63 68 * 64 69 * [ECMA-368] section 16.8.6. ··· 97 92 * [ECMA-368] section 17.16 98 93 */ 99 94 enum { UWB_MAX_LOST_BEACONS = 3 }; 95 + 96 + /* 97 + * mDRPBackOffWinMin 98 + * 99 + * The minimum number of superframes to wait before trying to reserve 100 + * extra MAS. 101 + * 102 + * [ECMA-368] section 17.16 103 + */ 104 + enum { UWB_DRP_BACKOFF_WIN_MIN = 2 }; 105 + 106 + /* 107 + * mDRPBackOffWinMax 108 + * 109 + * The maximum number of superframes to wait before trying to reserve 110 + * extra MAS. 111 + * 112 + * [ECMA-368] section 17.16 113 + */ 114 + enum { UWB_DRP_BACKOFF_WIN_MAX = 16 }; 100 115 101 116 /* 102 117 * Length of a superframe in microseconds.