Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

Revert "firewire: core: move workqueue handler from 1394 OHCI driver to core function"

This reverts commit 767bfb9ef27ebf760290d9f8bc303828b018c312. It appears
that the call of ohci_flush_iso_completions() in the work item scheduled
by hardIRQ of 1394 OHCI for any isochronous context changes the timing to
queue events in the view of user space application.

Link: https://lore.kernel.org/r/20240912133038.238786-3-o-takashi@sakamocchi.jp
Signed-off-by: Takashi Sakamoto <o-takashi@sakamocchi.jp>

+64 -12
+16 -10
drivers/firewire/core-iso.c
··· 131 131 return 0; 132 132 } 133 133 134 - static void flush_completions_work(struct work_struct *work) 135 - { 136 - struct fw_iso_context *ctx = container_of(work, struct fw_iso_context, work); 137 - 138 - fw_iso_context_flush_completions(ctx); 139 - } 140 - 141 134 struct fw_iso_context *fw_iso_context_create(struct fw_card *card, 142 135 int type, int channel, int speed, size_t header_size, 143 136 fw_iso_callback_t callback, void *callback_data) ··· 149 156 ctx->header_size = header_size; 150 157 ctx->callback.sc = callback; 151 158 ctx->callback_data = callback_data; 152 - INIT_WORK(&ctx->work, flush_completions_work); 153 159 154 160 trace_isoc_outbound_allocate(ctx, channel, speed); 155 161 trace_isoc_inbound_single_allocate(ctx, channel, header_size); ··· 218 226 * to process the context asynchronously, fw_iso_context_schedule_flush_completions() is available 219 227 * instead. 220 228 * 221 - * Context: Process context. 229 + * Context: Process context. May sleep due to disable_work_sync(). 222 230 */ 223 231 int fw_iso_context_flush_completions(struct fw_iso_context *ctx) 224 232 { 233 + int err; 234 + 225 235 trace_isoc_outbound_flush_completions(ctx); 226 236 trace_isoc_inbound_single_flush_completions(ctx); 227 237 trace_isoc_inbound_multiple_flush_completions(ctx); 228 238 229 - return ctx->card->driver->flush_iso_completions(ctx); 239 + might_sleep(); 240 + 241 + // Avoid dead lock due to programming mistake. 242 + if (WARN_ON_ONCE(current_work() == &ctx->work)) 243 + return 0; 244 + 245 + disable_work_sync(&ctx->work); 246 + 247 + err = ctx->card->driver->flush_iso_completions(ctx); 248 + 249 + enable_work(&ctx->work); 250 + 251 + return err; 230 252 } 231 253 EXPORT_SYMBOL(fw_iso_context_flush_completions); 232 254
+5
drivers/firewire/core.h
··· 159 159 int fw_iso_buffer_map_dma(struct fw_iso_buffer *buffer, struct fw_card *card, 160 160 enum dma_data_direction direction); 161 161 162 + static inline void fw_iso_context_init_work(struct fw_iso_context *ctx, work_func_t func) 163 + { 164 + INIT_WORK(&ctx->work, func); 165 + } 166 + 162 167 163 168 /* -topology */ 164 169
+43 -2
drivers/firewire/ohci.c
··· 1182 1182 } 1183 1183 } 1184 1184 1185 + static void ohci_isoc_context_work(struct work_struct *work) 1186 + { 1187 + struct fw_iso_context *base = container_of(work, struct fw_iso_context, work); 1188 + struct iso_context *isoc_ctx = container_of(base, struct iso_context, base); 1189 + struct context *ctx = &isoc_ctx->context; 1190 + struct descriptor *d, *last; 1191 + u32 address; 1192 + int z; 1193 + struct descriptor_buffer *desc; 1194 + 1195 + desc = list_entry(ctx->buffer_list.next, struct descriptor_buffer, list); 1196 + last = ctx->last; 1197 + while (last->branch_address != 0) { 1198 + struct descriptor_buffer *old_desc = desc; 1199 + 1200 + address = le32_to_cpu(last->branch_address); 1201 + z = address & 0xf; 1202 + address &= ~0xf; 1203 + ctx->current_bus = address; 1204 + 1205 + // If the branch address points to a buffer outside of the current buffer, advance 1206 + // to the next buffer. 1207 + if (address < desc->buffer_bus || address >= desc->buffer_bus + desc->used) 1208 + desc = list_entry(desc->list.next, struct descriptor_buffer, list); 1209 + d = desc->buffer + (address - desc->buffer_bus) / sizeof(*d); 1210 + last = find_branch_descriptor(d, z); 1211 + 1212 + if (!ctx->callback(ctx, d, last)) 1213 + break; 1214 + 1215 + if (old_desc != desc) { 1216 + // If we've advanced to the next buffer, move the previous buffer to the 1217 + // free list. 1218 + old_desc->used = 0; 1219 + guard(spinlock_irqsave)(&ctx->ohci->lock); 1220 + list_move_tail(&old_desc->list, &ctx->buffer_list); 1221 + } 1222 + ctx->last = last; 1223 + } 1224 + } 1225 + 1185 1226 /* 1186 1227 * Allocate a new buffer and add it to the list of free buffers for this 1187 1228 * context. Must be called with ohci->lock held. ··· 3169 3128 ret = context_init(&ctx->context, ohci, regs, callback); 3170 3129 if (ret < 0) 3171 3130 goto out_with_header; 3131 + fw_iso_context_init_work(&ctx->base, ohci_isoc_context_work); 3172 3132 3173 3133 if (type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL) { 3174 3134 set_multichannel_mask(ohci, 0); ··· 3624 3582 int ret = 0; 3625 3583 3626 3584 if (!test_and_set_bit_lock(0, &ctx->flushing_completions)) { 3627 - // Note that tasklet softIRQ is not used to process isochronous context anymore. 3628 - context_tasklet((unsigned long)&ctx->context); 3585 + ohci_isoc_context_work(&base->work); 3629 3586 3630 3587 switch (base->type) { 3631 3588 case FW_ISO_CONTEXT_TRANSMIT: