Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

net: ipa: generalize register offset functions

Rename ipa_reg_offset() to be reg_offset() and move its definition
to "reg.h". Rename ipa_reg_n_offset() to be reg_n_offset() also.

Signed-off-by: Alex Elder <elder@linaro.org>
Signed-off-by: David S. Miller <davem@davemloft.net>

authored by

Alex Elder and committed by
David S. Miller
fc4cecf7 81772e44

+66 -68
+2 -2
drivers/net/ipa/ipa_cmd.c
··· 300 300 else 301 301 reg = ipa_reg(ipa, FILT_ROUT_CACHE_FLUSH); 302 302 303 - offset = ipa_reg_offset(reg); 303 + offset = reg_offset(reg); 304 304 name = "filter/route hash flush"; 305 305 if (!ipa_cmd_register_write_offset_valid(ipa, name, offset)) 306 306 return false; ··· 314 314 * fits in the register write command field(s) that must hold it. 315 315 */ 316 316 reg = ipa_reg(ipa, ENDP_STATUS); 317 - offset = ipa_reg_n_offset(reg, IPA_ENDPOINT_COUNT - 1); 317 + offset = reg_n_offset(reg, IPA_ENDPOINT_COUNT - 1); 318 318 name = "maximal endpoint status"; 319 319 if (!ipa_cmd_register_write_offset_valid(ipa, name, offset)) 320 320 return false;
+19 -19
drivers/net/ipa/ipa_endpoint.c
··· 460 460 WARN_ON(ipa->version >= IPA_VERSION_4_0); 461 461 462 462 reg = ipa_reg(ipa, ENDP_INIT_CTRL); 463 - offset = ipa_reg_n_offset(reg, endpoint->endpoint_id); 463 + offset = reg_n_offset(reg, endpoint->endpoint_id); 464 464 val = ioread32(ipa->reg_virt + offset); 465 465 466 466 field_id = endpoint->toward_ipa ? ENDP_DELAY : ENDP_SUSPEND; ··· 499 499 WARN_ON(!test_bit(endpoint_id, ipa->available)); 500 500 501 501 reg = ipa_reg(ipa, STATE_AGGR_ACTIVE); 502 - val = ioread32(ipa->reg_virt + ipa_reg_n_offset(reg, unit)); 502 + val = ioread32(ipa->reg_virt + reg_n_offset(reg, unit)); 503 503 504 504 return !!(val & BIT(endpoint_id % 32)); 505 505 } ··· 515 515 WARN_ON(!test_bit(endpoint_id, ipa->available)); 516 516 517 517 reg = ipa_reg(ipa, AGGR_FORCE_CLOSE); 518 - iowrite32(mask, ipa->reg_virt + ipa_reg_n_offset(reg, unit)); 518 + iowrite32(mask, ipa->reg_virt + reg_n_offset(reg, unit)); 519 519 } 520 520 521 521 /** ··· 622 622 continue; 623 623 624 624 reg = ipa_reg(ipa, ENDP_STATUS); 625 - offset = ipa_reg_n_offset(reg, endpoint_id); 625 + offset = reg_n_offset(reg, endpoint_id); 626 626 627 627 /* Value written is 0, and all bits are updated. That 628 628 * means status is disabled on the endpoint, and as a ··· 674 674 val |= ipa_reg_encode(reg, CS_OFFLOAD_EN, enabled); 675 675 /* CS_GEN_QMB_MASTER_SEL is 0 */ 676 676 677 - iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id)); 677 + iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id)); 678 678 } 679 679 680 680 static void ipa_endpoint_init_nat(struct ipa_endpoint *endpoint) ··· 690 690 reg = ipa_reg(ipa, ENDP_INIT_NAT); 691 691 val = ipa_reg_encode(reg, NAT_EN, IPA_NAT_TYPE_BYPASS); 692 692 693 - iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id)); 693 + iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id)); 694 694 } 695 695 696 696 static u32 ··· 820 820 /* HDR_METADATA_REG_VALID is 0 (TX only, version < v4.5) */ 821 821 } 822 822 823 - iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id)); 823 + iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id)); 824 824 } 825 825 826 826 static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint) ··· 872 872 } 873 873 } 874 874 875 - iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id)); 875 + iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id)); 876 876 } 877 877 878 878 static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint) ··· 887 887 return; /* Register not valid for TX endpoints */ 888 888 889 889 reg = ipa_reg(ipa, ENDP_INIT_HDR_METADATA_MASK); 890 - offset = ipa_reg_n_offset(reg, endpoint_id); 890 + offset = reg_n_offset(reg, endpoint_id); 891 891 892 892 /* Note that HDR_ENDIANNESS indicates big endian header fields */ 893 893 if (endpoint->config.qmap) ··· 918 918 } 919 919 /* All other bits unspecified (and 0) */ 920 920 921 - offset = ipa_reg_n_offset(reg, endpoint->endpoint_id); 921 + offset = reg_n_offset(reg, endpoint->endpoint_id); 922 922 iowrite32(val, ipa->reg_virt + offset); 923 923 } 924 924 ··· 1032 1032 /* other fields ignored */ 1033 1033 } 1034 1034 1035 - iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id)); 1035 + iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id)); 1036 1036 } 1037 1037 1038 1038 /* The head-of-line blocking timer is defined as a tick count. For ··· 1116 1116 reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_TIMER); 1117 1117 val = hol_block_timer_encode(ipa, reg, microseconds); 1118 1118 1119 - iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id)); 1119 + iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id)); 1120 1120 } 1121 1121 1122 1122 static void ··· 1129 1129 u32 val; 1130 1130 1131 1131 reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_EN); 1132 - offset = ipa_reg_n_offset(reg, endpoint_id); 1132 + offset = reg_n_offset(reg, endpoint_id); 1133 1133 val = enable ? ipa_reg_bit(reg, HOL_BLOCK_EN) : 0; 1134 1134 1135 1135 iowrite32(val, ipa->reg_virt + offset); ··· 1183 1183 /* PACKET_OFFSET_LOCATION is ignored (not valid) */ 1184 1184 /* MAX_PACKET_LEN is 0 (not enforced) */ 1185 1185 1186 - iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id)); 1186 + iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id)); 1187 1187 } 1188 1188 1189 1189 static void ipa_endpoint_init_rsrc_grp(struct ipa_endpoint *endpoint) ··· 1197 1197 reg = ipa_reg(ipa, ENDP_INIT_RSRC_GRP); 1198 1198 val = ipa_reg_encode(reg, ENDP_RSRC_GRP, resource_group); 1199 1199 1200 - iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id)); 1200 + iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id)); 1201 1201 } 1202 1202 1203 1203 static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint) ··· 1220 1220 val |= ipa_reg_encode(reg, SEQ_REP_TYPE, 1221 1221 endpoint->config.tx.seq_rep_type); 1222 1222 1223 - iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id)); 1223 + iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id)); 1224 1224 } 1225 1225 1226 1226 /** ··· 1292 1292 /* STATUS_PKT_SUPPRESS_FMASK is 0 (not present for v4.0+) */ 1293 1293 } 1294 1294 1295 - iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id)); 1295 + iowrite32(val, ipa->reg_virt + reg_n_offset(reg, endpoint_id)); 1296 1296 } 1297 1297 1298 1298 static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint, ··· 1647 1647 val |= ipa_reg_encode(reg, ROUTE_FRAG_DEF_PIPE, endpoint_id); 1648 1648 val |= ipa_reg_bit(reg, ROUTE_DEF_RETAIN_HDR); 1649 1649 1650 - iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg)); 1650 + iowrite32(val, ipa->reg_virt + reg_offset(reg)); 1651 1651 } 1652 1652 1653 1653 void ipa_endpoint_default_route_clear(struct ipa *ipa) ··· 2019 2019 * the highest one doesn't exceed the number supported by software. 2020 2020 */ 2021 2021 reg = ipa_reg(ipa, FLAVOR_0); 2022 - val = ioread32(ipa->reg_virt + ipa_reg_offset(reg)); 2022 + val = ioread32(ipa->reg_virt + reg_offset(reg)); 2023 2023 2024 2024 /* Our RX is an IPA producer; our TX is an IPA consumer. */ 2025 2025 tx_count = ipa_reg_decode(reg, MAX_CONS_PIPES, val);
+8 -9
drivers/net/ipa/ipa_interrupt.c
··· 52 52 u32 offset; 53 53 54 54 reg = ipa_reg(ipa, IPA_IRQ_CLR); 55 - offset = ipa_reg_offset(reg); 55 + offset = reg_offset(reg); 56 56 57 57 switch (irq_id) { 58 58 case IPA_IRQ_UC_0: ··· 102 102 * only the enabled ones. 103 103 */ 104 104 reg = ipa_reg(ipa, IPA_IRQ_STTS); 105 - offset = ipa_reg_offset(reg); 105 + offset = reg_offset(reg); 106 106 pending = ioread32(ipa->reg_virt + offset); 107 107 while ((mask = pending & enabled)) { 108 108 do { ··· 120 120 dev_dbg(dev, "clearing disabled IPA interrupts 0x%08x\n", 121 121 pending); 122 122 reg = ipa_reg(ipa, IPA_IRQ_CLR); 123 - offset = ipa_reg_offset(reg); 124 - iowrite32(pending, ipa->reg_virt + offset); 123 + iowrite32(pending, ipa->reg_virt + reg_offset(reg)); 125 124 } 126 125 out_power_put: 127 126 pm_runtime_mark_last_busy(dev); ··· 133 134 { 134 135 const struct reg *reg = ipa_reg(ipa, IPA_IRQ_EN); 135 136 136 - iowrite32(ipa->interrupt->enabled, ipa->reg_virt + ipa_reg_offset(reg)); 137 + iowrite32(ipa->interrupt->enabled, ipa->reg_virt + reg_offset(reg)); 137 138 } 138 139 139 140 /* Enable an IPA interrupt type */ ··· 180 181 return; 181 182 182 183 reg = ipa_reg(ipa, IRQ_SUSPEND_EN); 183 - offset = ipa_reg_n_offset(reg, unit); 184 + offset = reg_n_offset(reg, unit); 184 185 val = ioread32(ipa->reg_virt + offset); 185 186 186 187 if (enable) ··· 218 219 u32 val; 219 220 220 221 reg = ipa_reg(ipa, IRQ_SUSPEND_INFO); 221 - val = ioread32(ipa->reg_virt + ipa_reg_n_offset(reg, unit)); 222 + val = ioread32(ipa->reg_virt + reg_n_offset(reg, unit)); 222 223 223 224 /* SUSPEND interrupt status isn't cleared on IPA version 3.0 */ 224 225 if (ipa->version == IPA_VERSION_3_0) 225 226 continue; 226 227 227 228 reg = ipa_reg(ipa, IRQ_SUSPEND_CLR); 228 - iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, unit)); 229 + iowrite32(val, ipa->reg_virt + reg_n_offset(reg, unit)); 229 230 } 230 231 } 231 232 ··· 260 261 261 262 /* Start with all IPA interrupts disabled */ 262 263 reg = ipa_reg(ipa, IPA_IRQ_EN); 263 - iowrite32(0, ipa->reg_virt + ipa_reg_offset(reg)); 264 + iowrite32(0, ipa->reg_virt + reg_offset(reg)); 264 265 265 266 ret = request_threaded_irq(irq, NULL, ipa_isr_thread, IRQF_ONESHOT, 266 267 "ipa", interrupt);
+15 -13
drivers/net/ipa/ipa_main.c
··· 212 212 213 213 reg = ipa_reg(ipa, IPA_BCR); 214 214 val = data->backward_compat; 215 - iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg)); 215 + iowrite32(val, ipa->reg_virt + reg_offset(reg)); 216 216 } 217 217 218 218 static void ipa_hardware_config_tx(struct ipa *ipa) ··· 227 227 228 228 /* Disable PA mask to allow HOLB drop */ 229 229 reg = ipa_reg(ipa, IPA_TX_CFG); 230 - offset = ipa_reg_offset(reg); 230 + offset = reg_offset(reg); 231 231 232 232 val = ioread32(ipa->reg_virt + offset); 233 233 ··· 259 259 val |= ipa_reg_bit(reg, GLOBAL_2X_CLK); 260 260 } 261 261 262 - iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg)); 262 + iowrite32(val, ipa->reg_virt + reg_offset(reg)); 263 263 } 264 264 265 265 /* Configure bus access behavior for IPA components */ ··· 274 274 return; 275 275 276 276 reg = ipa_reg(ipa, COMP_CFG); 277 - offset = ipa_reg_offset(reg); 277 + offset = reg_offset(reg); 278 + 278 279 val = ioread32(ipa->reg_virt + offset); 279 280 280 281 if (ipa->version == IPA_VERSION_4_0) { ··· 316 315 val |= ipa_reg_encode(reg, GEN_QMB_1_MAX_WRITES, 317 316 data1->max_writes); 318 317 319 - iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg)); 318 + iowrite32(val, ipa->reg_virt + reg_offset(reg)); 320 319 321 320 /* Max outstanding read accesses for QSB masters */ 322 321 reg = ipa_reg(ipa, QSB_MAX_READS); ··· 333 332 data1->max_reads_beats); 334 333 } 335 334 336 - iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg)); 335 + iowrite32(val, ipa->reg_virt + reg_offset(reg)); 337 336 } 338 337 339 338 /* The internal inactivity timer clock is used for the aggregation timer */ ··· 375 374 376 375 /* Timer clock divider must be disabled when we change the rate */ 377 376 reg = ipa_reg(ipa, TIMERS_XO_CLK_DIV_CFG); 378 - iowrite32(0, ipa->reg_virt + ipa_reg_offset(reg)); 377 + iowrite32(0, ipa->reg_virt + reg_offset(reg)); 379 378 380 379 reg = ipa_reg(ipa, QTIME_TIMESTAMP_CFG); 381 380 /* Set DPL time stamp resolution to use Qtime (instead of 1 msec) */ ··· 385 384 val = ipa_reg_encode(reg, TAG_TIMESTAMP_LSB, TAG_TIMESTAMP_SHIFT); 386 385 val = ipa_reg_encode(reg, NAT_TIMESTAMP_LSB, NAT_TIMESTAMP_SHIFT); 387 386 388 - iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg)); 387 + iowrite32(val, ipa->reg_virt + reg_offset(reg)); 389 388 390 389 /* Set granularity of pulse generators used for other timers */ 391 390 reg = ipa_reg(ipa, TIMERS_PULSE_GRAN_CFG); ··· 398 397 val |= ipa_reg_encode(reg, PULSE_GRAN_2, IPA_GRAN_1_MS); 399 398 } 400 399 401 - iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg)); 400 + iowrite32(val, ipa->reg_virt + reg_offset(reg)); 402 401 403 402 /* Actual divider is 1 more than value supplied here */ 404 403 reg = ipa_reg(ipa, TIMERS_XO_CLK_DIV_CFG); 405 - offset = ipa_reg_offset(reg); 404 + offset = reg_offset(reg); 405 + 406 406 val = ipa_reg_encode(reg, DIV_VALUE, IPA_XO_CLOCK_DIVIDER - 1); 407 407 408 408 iowrite32(val, ipa->reg_virt + offset); ··· 424 422 reg = ipa_reg(ipa, COUNTER_CFG); 425 423 /* If defined, EOT_COAL_GRANULARITY is 0 */ 426 424 val = ipa_reg_encode(reg, AGGR_GRANULARITY, granularity); 427 - iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg)); 425 + iowrite32(val, ipa->reg_virt + reg_offset(reg)); 428 426 } 429 427 430 428 static void ipa_hardware_config_timing(struct ipa *ipa) ··· 453 451 /* IPV6_ROUTER_HASH, IPV6_FILTER_HASH, IPV4_ROUTER_HASH, 454 452 * IPV4_FILTER_HASH are all zero. 455 453 */ 456 - iowrite32(0, ipa->reg_virt + ipa_reg_offset(reg)); 454 + iowrite32(0, ipa->reg_virt + reg_offset(reg)); 457 455 } 458 456 459 457 static void ipa_idle_indication_cfg(struct ipa *ipa, ··· 472 470 if (const_non_idle_enable) 473 471 val |= ipa_reg_bit(reg, CONST_NON_IDLE_ENABLE); 474 472 475 - iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg)); 473 + iowrite32(val, ipa->reg_virt + reg_offset(reg)); 476 474 } 477 475 478 476 /**
+2 -2
drivers/net/ipa/ipa_mem.c
··· 116 116 117 117 reg = ipa_reg(ipa, LOCAL_PKT_PROC_CNTXT); 118 118 val = ipa_reg_encode(reg, IPA_BASE_ADDR, offset); 119 - iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg)); 119 + iowrite32(val, ipa->reg_virt + reg_offset(reg)); 120 120 121 121 return 0; 122 122 } ··· 328 328 329 329 /* Check the advertised location and size of the shared memory area */ 330 330 reg = ipa_reg(ipa, SHARED_MEM_SIZE); 331 - val = ioread32(ipa->reg_virt + ipa_reg_offset(reg)); 331 + val = ioread32(ipa->reg_virt + reg_offset(reg)); 332 332 333 333 /* The fields in the register are in 8 byte units */ 334 334 ipa->mem_offset = 8 * ipa_reg_decode(reg, MEM_BADDR, val);
+1 -13
drivers/net/ipa/ipa_reg.h
··· 36 36 * by register ID. Each entry in the array specifies the base offset and 37 37 * (for parameterized registers) a non-zero stride value. Not all versions 38 38 * of IPA define all registers. The offset for a register is returned by 39 - * ipa_reg_offset() when the register's ipa_reg structure is supplied; 39 + * reg_offset() when the register's ipa_reg structure is supplied; 40 40 * zero is returned for an undefined register (this should never happen). 41 41 * 42 42 * Some registers encode multiple fields within them. Each field in ··· 699 699 } 700 700 701 701 const struct reg *ipa_reg(struct ipa *ipa, enum ipa_reg_id reg_id); 702 - 703 - /* Returns 0 for NULL reg; warning will have already been issued */ 704 - static inline u32 ipa_reg_offset(const struct reg *reg) 705 - { 706 - return reg ? reg->offset : 0; 707 - } 708 - 709 - /* Returns 0 for NULL reg; warning will have already been issued */ 710 - static inline u32 ipa_reg_n_offset(const struct reg *reg, u32 n) 711 - { 712 - return reg ? reg->offset + n * reg->stride : 0; 713 - } 714 702 715 703 int ipa_reg_init(struct ipa *ipa); 716 704 void ipa_reg_exit(struct ipa *ipa);
+1 -1
drivers/net/ipa/ipa_resource.c
··· 83 83 val |= ipa_reg_encode(reg, Y_MAX_LIM, ylimits->max); 84 84 } 85 85 86 - iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, resource_type)); 86 + iowrite32(val, ipa->reg_virt + reg_n_offset(reg, resource_type)); 87 87 } 88 88 89 89 static void ipa_resource_config_src(struct ipa *ipa, u32 resource_type,
+5 -8
drivers/net/ipa/ipa_table.c
··· 347 347 { 348 348 struct gsi_trans *trans; 349 349 const struct reg *reg; 350 - u32 offset; 351 350 u32 val; 352 351 353 352 if (!ipa_table_hash_support(ipa)) ··· 360 361 361 362 if (ipa->version < IPA_VERSION_5_0) { 362 363 reg = ipa_reg(ipa, FILT_ROUT_HASH_FLUSH); 363 - offset = ipa_reg_offset(reg); 364 364 365 365 val = ipa_reg_bit(reg, IPV6_ROUTER_HASH); 366 366 val |= ipa_reg_bit(reg, IPV6_FILTER_HASH); ··· 367 369 val |= ipa_reg_bit(reg, IPV4_FILTER_HASH); 368 370 } else { 369 371 reg = ipa_reg(ipa, FILT_ROUT_CACHE_FLUSH); 370 - offset = ipa_reg_offset(reg); 371 372 372 373 /* IPA v5.0+ uses a unified cache (both IPv4 and IPv6) */ 373 374 val = ipa_reg_bit(reg, ROUTER_CACHE); 374 375 val |= ipa_reg_bit(reg, FILTER_CACHE); 375 376 } 376 377 377 - ipa_cmd_register_write_add(trans, offset, val, val, false); 378 + ipa_cmd_register_write_add(trans, reg_offset(reg), val, val, false); 378 379 379 380 gsi_trans_commit_wait(trans); 380 381 ··· 499 502 if (ipa->version < IPA_VERSION_5_0) { 500 503 reg = ipa_reg(ipa, ENDP_FILTER_ROUTER_HSH_CFG); 501 504 502 - offset = ipa_reg_n_offset(reg, endpoint_id); 505 + offset = reg_n_offset(reg, endpoint_id); 503 506 val = ioread32(endpoint->ipa->reg_virt + offset); 504 507 505 508 /* Zero all filter-related fields, preserving the rest */ ··· 507 510 } else { 508 511 /* IPA v5.0 separates filter and router cache configuration */ 509 512 reg = ipa_reg(ipa, ENDP_FILTER_CACHE_CFG); 510 - offset = ipa_reg_n_offset(reg, endpoint_id); 513 + offset = reg_n_offset(reg, endpoint_id); 511 514 512 515 /* Zero all filter-related fields */ 513 516 val = 0; ··· 557 560 558 561 if (ipa->version < IPA_VERSION_5_0) { 559 562 reg = ipa_reg(ipa, ENDP_FILTER_ROUTER_HSH_CFG); 560 - offset = ipa_reg_n_offset(reg, route_id); 563 + offset = reg_n_offset(reg, route_id); 561 564 562 565 val = ioread32(ipa->reg_virt + offset); 563 566 ··· 566 569 } else { 567 570 /* IPA v5.0 separates filter and router cache configuration */ 568 571 reg = ipa_reg(ipa, ENDP_ROUTER_CACHE_CFG); 569 - offset = ipa_reg_n_offset(reg, route_id); 572 + offset = reg_n_offset(reg, route_id); 570 573 571 574 /* Zero all route-related fields */ 572 575 val = 0;
+1 -1
drivers/net/ipa/ipa_uc.c
··· 245 245 reg = ipa_reg(ipa, IPA_IRQ_UC); 246 246 val = ipa_reg_bit(reg, UC_INTR); 247 247 248 - iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg)); 248 + iowrite32(val, ipa->reg_virt + reg_offset(reg)); 249 249 } 250 250 251 251 /* Tell the microcontroller the AP is shutting down */
+12
drivers/net/ipa/reg.h
··· 67 67 return regs->reg[reg_id]; 68 68 } 69 69 70 + /* Returns 0 for NULL reg; warning should have already been issued */ 71 + static inline u32 reg_offset(const struct reg *reg) 72 + { 73 + return reg ? reg->offset : 0; 74 + } 75 + 76 + /* Returns 0 for NULL reg; warning should have already been issued */ 77 + static inline u32 reg_n_offset(const struct reg *reg, u32 n) 78 + { 79 + return reg ? reg->offset + n * reg->stride : 0; 80 + } 81 + 70 82 #endif /* _REG_H_ */