Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

mei: bus: split RX and async notification callbacks

Split callbacks for RX and async notification events on mei bus to
eliminate synchronization problems and to open way for RX optimizations.

Signed-off-by: Alexander Usyskin <alexander.usyskin@intel.com>
Signed-off-by: Tomas Winkler <tomas.winkler@intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

authored by

Alexander Usyskin and committed by
Greg Kroah-Hartman
7c7a6077 c110cdb1

+131 -125
+83 -60
drivers/misc/mei/bus.c
··· 209 209 EXPORT_SYMBOL_GPL(mei_cldev_recv); 210 210 211 211 /** 212 - * mei_cl_bus_event_work - dispatch rx event for a bus device 213 - * and schedule new work 212 + * mei_cl_bus_rx_work - dispatch rx event for a bus device 214 213 * 215 214 * @work: work 216 215 */ 217 - static void mei_cl_bus_event_work(struct work_struct *work) 216 + static void mei_cl_bus_rx_work(struct work_struct *work) 218 217 { 219 218 struct mei_cl_device *cldev; 220 219 struct mei_device *bus; 221 220 222 - cldev = container_of(work, struct mei_cl_device, event_work); 221 + cldev = container_of(work, struct mei_cl_device, rx_work); 223 222 224 223 bus = cldev->bus; 225 224 226 - if (cldev->event_cb) 227 - cldev->event_cb(cldev, cldev->events); 225 + if (cldev->rx_cb) 226 + cldev->rx_cb(cldev); 228 227 229 - cldev->events = 0; 228 + mutex_lock(&bus->device_lock); 229 + mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL); 230 + mutex_unlock(&bus->device_lock); 231 + } 230 232 231 - /* Prepare for the next read */ 232 - if (cldev->events_mask & BIT(MEI_CL_EVENT_RX)) { 233 - mutex_lock(&bus->device_lock); 234 - mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL); 235 - mutex_unlock(&bus->device_lock); 236 - } 233 + /** 234 + * mei_cl_bus_notif_work - dispatch FW notif event for a bus device 235 + * 236 + * @work: work 237 + */ 238 + static void mei_cl_bus_notif_work(struct work_struct *work) 239 + { 240 + struct mei_cl_device *cldev; 241 + 242 + cldev = container_of(work, struct mei_cl_device, notif_work); 243 + 244 + if (cldev->notif_cb) 245 + cldev->notif_cb(cldev); 237 246 } 238 247 239 248 /** ··· 257 248 { 258 249 struct mei_cl_device *cldev = cl->cldev; 259 250 260 - if (!cldev || !cldev->event_cb) 261 - return false; 262 - 263 - if (!(cldev->events_mask & BIT(MEI_CL_EVENT_NOTIF))) 251 + if (!cldev || !cldev->notif_cb) 264 252 return false; 265 253 266 254 if (!cl->notify_ev) 267 255 return false; 268 256 269 - set_bit(MEI_CL_EVENT_NOTIF, &cldev->events); 270 - 271 - schedule_work(&cldev->event_work); 257 + schedule_work(&cldev->notif_work); 272 258 273 259 cl->notify_ev = false; 274 260 ··· 271 267 } 272 268 273 269 /** 274 - * mei_cl_bus_rx_event - schedule rx event 270 + * mei_cl_bus_rx_event - schedule rx event 275 271 * 276 272 * @cl: host client 277 273 * ··· 282 278 { 283 279 struct mei_cl_device *cldev = cl->cldev; 284 280 285 - if (!cldev || !cldev->event_cb) 281 + if (!cldev || !cldev->rx_cb) 286 282 return false; 287 283 288 - if (!(cldev->events_mask & BIT(MEI_CL_EVENT_RX))) 289 - return false; 290 - 291 - set_bit(MEI_CL_EVENT_RX, &cldev->events); 292 - 293 - schedule_work(&cldev->event_work); 284 + schedule_work(&cldev->rx_work); 294 285 295 286 return true; 296 287 } 297 288 298 289 /** 299 - * mei_cldev_register_event_cb - register event callback 290 + * mei_cldev_register_rx_cb - register Rx event callback 300 291 * 301 292 * @cldev: me client devices 302 - * @event_cb: callback function 303 - * @events_mask: requested events bitmask 293 + * @rx_cb: callback function 304 294 * 305 295 * Return: 0 on success 306 296 * -EALREADY if an callback is already registered 307 297 * <0 on other errors 308 298 */ 309 - int mei_cldev_register_event_cb(struct mei_cl_device *cldev, 310 - unsigned long events_mask, 311 - mei_cldev_event_cb_t event_cb) 299 + int mei_cldev_register_rx_cb(struct mei_cl_device *cldev, mei_cldev_cb_t rx_cb) 312 300 { 313 301 struct mei_device *bus = cldev->bus; 314 302 int ret; 315 303 316 - if (cldev->event_cb) 304 + if (!rx_cb) 305 + return -EINVAL; 306 + if (cldev->rx_cb) 317 307 return -EALREADY; 318 308 319 - cldev->events = 0; 320 - cldev->events_mask = events_mask; 321 - cldev->event_cb = event_cb; 322 - INIT_WORK(&cldev->event_work, mei_cl_bus_event_work); 309 + cldev->rx_cb = rx_cb; 310 + INIT_WORK(&cldev->rx_work, mei_cl_bus_rx_work); 323 311 324 - if (cldev->events_mask & BIT(MEI_CL_EVENT_RX)) { 325 - mutex_lock(&bus->device_lock); 326 - ret = mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL); 327 - mutex_unlock(&bus->device_lock); 328 - if (ret && ret != -EBUSY) 329 - return ret; 330 - } 331 - 332 - if (cldev->events_mask & BIT(MEI_CL_EVENT_NOTIF)) { 333 - mutex_lock(&bus->device_lock); 334 - ret = mei_cl_notify_request(cldev->cl, NULL, event_cb ? 1 : 0); 335 - mutex_unlock(&bus->device_lock); 336 - if (ret) 337 - return ret; 338 - } 312 + mutex_lock(&bus->device_lock); 313 + ret = mei_cl_read_start(cldev->cl, mei_cl_mtu(cldev->cl), NULL); 314 + mutex_unlock(&bus->device_lock); 315 + if (ret && ret != -EBUSY) 316 + return ret; 339 317 340 318 return 0; 341 319 } 342 - EXPORT_SYMBOL_GPL(mei_cldev_register_event_cb); 320 + EXPORT_SYMBOL_GPL(mei_cldev_register_rx_cb); 321 + 322 + /** 323 + * mei_cldev_register_notif_cb - register FW notification event callback 324 + * 325 + * @cldev: me client devices 326 + * @notif_cb: callback function 327 + * 328 + * Return: 0 on success 329 + * -EALREADY if an callback is already registered 330 + * <0 on other errors 331 + */ 332 + int mei_cldev_register_notif_cb(struct mei_cl_device *cldev, 333 + mei_cldev_cb_t notif_cb) 334 + { 335 + struct mei_device *bus = cldev->bus; 336 + int ret; 337 + 338 + if (!notif_cb) 339 + return -EINVAL; 340 + 341 + if (cldev->notif_cb) 342 + return -EALREADY; 343 + 344 + cldev->notif_cb = notif_cb; 345 + INIT_WORK(&cldev->notif_work, mei_cl_bus_notif_work); 346 + 347 + mutex_lock(&bus->device_lock); 348 + ret = mei_cl_notify_request(cldev->cl, NULL, 1); 349 + mutex_unlock(&bus->device_lock); 350 + if (ret) 351 + return ret; 352 + 353 + return 0; 354 + } 355 + EXPORT_SYMBOL_GPL(mei_cldev_register_notif_cb); 343 356 344 357 /** 345 358 * mei_cldev_get_drvdata - driver data getter ··· 491 470 cl = cldev->cl; 492 471 493 472 bus = cldev->bus; 494 - 495 - cldev->event_cb = NULL; 496 473 497 474 mutex_lock(&bus->device_lock); 498 475 ··· 638 619 if (!cldev || !dev->driver) 639 620 return 0; 640 621 641 - if (cldev->event_cb) { 642 - cldev->event_cb = NULL; 643 - cancel_work_sync(&cldev->event_work); 622 + if (cldev->rx_cb) { 623 + cancel_work_sync(&cldev->rx_work); 624 + cldev->rx_cb = NULL; 625 + } 626 + if (cldev->notif_cb) { 627 + cancel_work_sync(&cldev->notif_work); 628 + cldev->notif_cb = NULL; 644 629 } 645 630 646 631 cldrv = to_mei_cl_driver(dev->driver);
+5
drivers/misc/mei/client.c
··· 673 673 list_del_init(&cl->link); 674 674 675 675 cl->state = MEI_FILE_UNINITIALIZED; 676 + cl->writing_state = MEI_IDLE; 677 + 678 + WARN_ON(!list_empty(&cl->rd_completed) || 679 + !list_empty(&cl->rd_pending) || 680 + !list_empty(&cl->link)); 676 681 677 682 return 0; 678 683 }
+18 -22
drivers/nfc/mei_phy.c
··· 297 297 } 298 298 299 299 300 - static void nfc_mei_event_cb(struct mei_cl_device *cldev, u32 events) 300 + static void nfc_mei_rx_cb(struct mei_cl_device *cldev) 301 301 { 302 302 struct nfc_mei_phy *phy = mei_cldev_get_drvdata(cldev); 303 + struct sk_buff *skb; 304 + int reply_size; 303 305 304 306 if (!phy) 305 307 return; ··· 309 307 if (phy->hard_fault != 0) 310 308 return; 311 309 312 - if (events & BIT(MEI_CL_EVENT_RX)) { 313 - struct sk_buff *skb; 314 - int reply_size; 310 + skb = alloc_skb(MEI_NFC_MAX_READ, GFP_KERNEL); 311 + if (!skb) 312 + return; 315 313 316 - skb = alloc_skb(MEI_NFC_MAX_READ, GFP_KERNEL); 317 - if (!skb) 318 - return; 319 - 320 - reply_size = mei_nfc_recv(phy, skb->data, MEI_NFC_MAX_READ); 321 - if (reply_size < MEI_NFC_HEADER_SIZE) { 322 - kfree_skb(skb); 323 - return; 324 - } 325 - 326 - skb_put(skb, reply_size); 327 - skb_pull(skb, MEI_NFC_HEADER_SIZE); 328 - 329 - MEI_DUMP_SKB_IN("mei frame read", skb); 330 - 331 - nfc_hci_recv_frame(phy->hdev, skb); 314 + reply_size = mei_nfc_recv(phy, skb->data, MEI_NFC_MAX_READ); 315 + if (reply_size < MEI_NFC_HEADER_SIZE) { 316 + kfree_skb(skb); 317 + return; 332 318 } 319 + 320 + skb_put(skb, reply_size); 321 + skb_pull(skb, MEI_NFC_HEADER_SIZE); 322 + 323 + MEI_DUMP_SKB_IN("mei frame read", skb); 324 + 325 + nfc_hci_recv_frame(phy->hdev, skb); 333 326 } 334 327 335 328 static int nfc_mei_phy_enable(void *phy_id) ··· 355 358 goto err; 356 359 } 357 360 358 - r = mei_cldev_register_event_cb(phy->cldev, BIT(MEI_CL_EVENT_RX), 359 - nfc_mei_event_cb); 361 + r = mei_cldev_register_rx_cb(phy->cldev, nfc_mei_rx_cb); 360 362 if (r) { 361 363 pr_err("Event cb registration failed %d\n", r); 362 364 goto err;
+11 -25
drivers/watchdog/mei_wdt.c
··· 410 410 } 411 411 412 412 /** 413 - * mei_wdt_event_rx - callback for data receive 413 + * mei_wdt_rx - callback for data receive 414 414 * 415 415 * @cldev: bus device 416 416 */ 417 - static void mei_wdt_event_rx(struct mei_cl_device *cldev) 417 + static void mei_wdt_rx(struct mei_cl_device *cldev) 418 418 { 419 419 struct mei_wdt *wdt = mei_cldev_get_drvdata(cldev); 420 420 struct mei_wdt_start_response res; ··· 482 482 } 483 483 484 484 /* 485 - * mei_wdt_notify_event - callback for event notification 485 + * mei_wdt_notif - callback for event notification 486 486 * 487 487 * @cldev: bus device 488 488 */ 489 - static void mei_wdt_notify_event(struct mei_cl_device *cldev) 489 + static void mei_wdt_notif(struct mei_cl_device *cldev) 490 490 { 491 491 struct mei_wdt *wdt = mei_cldev_get_drvdata(cldev); 492 492 ··· 494 494 return; 495 495 496 496 mei_wdt_register(wdt); 497 - } 498 - 499 - /** 500 - * mei_wdt_event - callback for event receive 501 - * 502 - * @cldev: bus device 503 - * @events: event mask 504 - */ 505 - static void mei_wdt_event(struct mei_cl_device *cldev, u32 events) 506 - { 507 - if (events & BIT(MEI_CL_EVENT_RX)) 508 - mei_wdt_event_rx(cldev); 509 - 510 - if (events & BIT(MEI_CL_EVENT_NOTIF)) 511 - mei_wdt_notify_event(cldev); 512 497 } 513 498 514 499 #if IS_ENABLED(CONFIG_DEBUG_FS) ··· 606 621 goto err_out; 607 622 } 608 623 609 - ret = mei_cldev_register_event_cb(wdt->cldev, 610 - BIT(MEI_CL_EVENT_RX) | 611 - BIT(MEI_CL_EVENT_NOTIF), 612 - mei_wdt_event); 624 + ret = mei_cldev_register_rx_cb(wdt->cldev, mei_wdt_rx); 625 + if (ret) { 626 + dev_err(&cldev->dev, "Could not reg rx event ret=%d\n", ret); 627 + goto err_disable; 628 + } 613 629 630 + ret = mei_cldev_register_notif_cb(wdt->cldev, mei_wdt_notif); 614 631 /* on legacy devices notification is not supported 615 - * this doesn't fail the registration for RX event 616 632 */ 617 633 if (ret && ret != -EOPNOTSUPP) { 618 - dev_err(&cldev->dev, "Could not register event ret=%d\n", ret); 634 + dev_err(&cldev->dev, "Could not reg notif event ret=%d\n", ret); 619 635 goto err_disable; 620 636 } 621 637
+14 -18
include/linux/mei_cl_bus.h
··· 8 8 struct mei_cl_device; 9 9 struct mei_device; 10 10 11 - typedef void (*mei_cldev_event_cb_t)(struct mei_cl_device *cldev, 12 - u32 events); 11 + typedef void (*mei_cldev_cb_t)(struct mei_cl_device *cldev); 13 12 14 13 /** 15 14 * struct mei_cl_device - MEI device handle ··· 23 24 * @me_cl: me client 24 25 * @cl: mei client 25 26 * @name: device name 26 - * @event_work: async work to execute event callback 27 - * @event_cb: Drivers register this callback to get asynchronous ME 28 - * events (e.g. Rx buffer pending) notifications. 29 - * @events_mask: Events bit mask requested by driver. 30 - * @events: Events bitmask sent to the driver. 27 + * @rx_work: async work to execute Rx event callback 28 + * @rx_cb: Drivers register this callback to get asynchronous ME 29 + * Rx buffer pending notifications. 30 + * @notif_work: async work to execute FW notif event callback 31 + * @notif_cb: Drivers register this callback to get asynchronous ME 32 + * FW notification pending notifications. 31 33 * 32 34 * @do_match: wheather device can be matched with a driver 33 35 * @is_added: device is already scanned ··· 43 43 struct mei_cl *cl; 44 44 char name[MEI_CL_NAME_SIZE]; 45 45 46 - struct work_struct event_work; 47 - mei_cldev_event_cb_t event_cb; 48 - unsigned long events_mask; 49 - unsigned long events; 46 + struct work_struct rx_work; 47 + mei_cldev_cb_t rx_cb; 48 + struct work_struct notif_work; 49 + mei_cldev_cb_t notif_cb; 50 50 51 51 unsigned int do_match:1; 52 52 unsigned int is_added:1; ··· 88 88 ssize_t mei_cldev_send(struct mei_cl_device *cldev, u8 *buf, size_t length); 89 89 ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length); 90 90 91 - int mei_cldev_register_event_cb(struct mei_cl_device *cldev, 92 - unsigned long event_mask, 93 - mei_cldev_event_cb_t read_cb); 94 - 95 - #define MEI_CL_EVENT_RX 0 96 - #define MEI_CL_EVENT_TX 1 97 - #define MEI_CL_EVENT_NOTIF 2 91 + int mei_cldev_register_rx_cb(struct mei_cl_device *cldev, mei_cldev_cb_t rx_cb); 92 + int mei_cldev_register_notif_cb(struct mei_cl_device *cldev, 93 + mei_cldev_cb_t notif_cb); 98 94 99 95 const uuid_le *mei_cldev_uuid(const struct mei_cl_device *cldev); 100 96 u8 mei_cldev_ver(const struct mei_cl_device *cldev);