Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

usb: typec: tcpm: Migrate workqueue to RT priority for processing events

"tReceiverResponse 15 ms Section 6.6.2
The receiver of a Message requiring a response Shall respond
within tReceiverResponse in order to ensure that the
sender’s SenderResponseTimer does not expire."

When the cpu complex is busy running other lower priority
work items, TCPM's work queue sometimes does not get scheduled
on time to meet the above requirement from the spec.
Moving to kthread_work apis to run with real time priority.

Further, as observed in 1ff688209e2e, moving to hrtimers to
overcome scheduling latency while scheduling the delayed work.

TCPM has three work streams:
1. tcpm_state_machine
2. vdm_state_machine
3. event_work

tcpm_state_machine and vdm_state_machine both schedule work in
future i.e. delayed. Hence each of them have a corresponding
hrtimer, tcpm_state_machine_timer & vdm_state_machine_timer.

When work is queued right away kthread_queue_work is used.
Else, the relevant timer is programmed and made to queue
the kthread_work upon timer expiry.

kthread_create_worker only creates one kthread worker thread,
hence single threadedness of workqueue is retained.

Signed-off-by: Badhri Jagan Sridharan <badhri@google.com>
Reviewed-by: Guenter Roeck <linux@roeck-us.net>
Reviewed-by: Heikki Krogerus <heikki.krogerus@linux.intel.com>
Link: https://lore.kernel.org/r/20200818192758.2562908-1-badhri@google.com
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

authored by

Badhri Jagan Sridharan and committed by
Greg Kroah-Hartman
3ed8e1c2 aefc66af

+87 -44
+87 -44
drivers/usb/typec/tcpm/tcpm.c
··· 8 8 #include <linux/completion.h> 9 9 #include <linux/debugfs.h> 10 10 #include <linux/device.h> 11 + #include <linux/hrtimer.h> 11 12 #include <linux/jiffies.h> 12 13 #include <linux/kernel.h> 14 + #include <linux/kthread.h> 13 15 #include <linux/module.h> 14 16 #include <linux/mutex.h> 15 17 #include <linux/power_supply.h> ··· 30 28 #include <linux/usb/role.h> 31 29 #include <linux/usb/tcpm.h> 32 30 #include <linux/usb/typec_altmode.h> 33 - #include <linux/workqueue.h> 31 + 32 + #include <uapi/linux/sched/types.h> 34 33 35 34 #define FOREACH_STATE(S) \ 36 35 S(INVALID_STATE), \ ··· 206 203 struct device *dev; 207 204 208 205 struct mutex lock; /* tcpm state machine lock */ 209 - struct workqueue_struct *wq; 206 + struct kthread_worker *wq; 210 207 211 208 struct typec_capability typec_caps; 212 209 struct typec_port *typec_port; ··· 250 247 enum tcpm_state prev_state; 251 248 enum tcpm_state state; 252 249 enum tcpm_state delayed_state; 253 - unsigned long delayed_runtime; 250 + ktime_t delayed_runtime; 254 251 unsigned long delay_ms; 255 252 256 253 spinlock_t pd_event_lock; 257 254 u32 pd_events; 258 255 259 - struct work_struct event_work; 260 - struct delayed_work state_machine; 261 - struct delayed_work vdm_state_machine; 256 + struct kthread_work event_work; 257 + struct hrtimer state_machine_timer; 258 + struct kthread_work state_machine; 259 + struct hrtimer vdm_state_machine_timer; 260 + struct kthread_work vdm_state_machine; 262 261 bool state_machine_running; 263 262 264 263 struct completion tx_complete; ··· 345 340 }; 346 341 347 342 struct pd_rx_event { 348 - struct work_struct work; 343 + struct kthread_work work; 349 344 struct tcpm_port *port; 350 345 struct pd_message msg; 351 346 }; ··· 919 914 return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg); 920 915 } 921 916 917 + static void mod_tcpm_delayed_work(struct tcpm_port *port, unsigned int delay_ms) 918 + { 919 + if (delay_ms) { 920 + hrtimer_start(&port->state_machine_timer, ms_to_ktime(delay_ms), HRTIMER_MODE_REL); 921 + } else { 922 + hrtimer_cancel(&port->state_machine_timer); 923 + kthread_queue_work(port->wq, &port->state_machine); 924 + } 925 + } 926 + 927 + static void mod_vdm_delayed_work(struct tcpm_port *port, unsigned int delay_ms) 928 + { 929 + if (delay_ms) { 930 + hrtimer_start(&port->vdm_state_machine_timer, ms_to_ktime(delay_ms), 931 + HRTIMER_MODE_REL); 932 + } else { 933 + hrtimer_cancel(&port->vdm_state_machine_timer); 934 + kthread_queue_work(port->wq, &port->vdm_state_machine); 935 + } 936 + } 937 + 922 938 static void tcpm_set_state(struct tcpm_port *port, enum tcpm_state state, 923 939 unsigned int delay_ms) 924 940 { ··· 948 922 tcpm_states[port->state], tcpm_states[state], 949 923 delay_ms); 950 924 port->delayed_state = state; 951 - mod_delayed_work(port->wq, &port->state_machine, 952 - msecs_to_jiffies(delay_ms)); 953 - port->delayed_runtime = jiffies + msecs_to_jiffies(delay_ms); 925 + mod_tcpm_delayed_work(port, delay_ms); 926 + port->delayed_runtime = ktime_add(ktime_get(), ms_to_ktime(delay_ms)); 954 927 port->delay_ms = delay_ms; 955 928 } else { 956 929 tcpm_log(port, "state change %s -> %s", ··· 964 939 * machine. 965 940 */ 966 941 if (!port->state_machine_running) 967 - mod_delayed_work(port->wq, &port->state_machine, 0); 942 + mod_tcpm_delayed_work(port, 0); 968 943 } 969 944 } 970 945 ··· 985 960 enum pd_msg_request message) 986 961 { 987 962 port->queued_message = message; 988 - mod_delayed_work(port->wq, &port->state_machine, 0); 963 + mod_tcpm_delayed_work(port, 0); 989 964 } 990 965 991 966 /* ··· 1006 981 port->vdm_retries = 0; 1007 982 port->vdm_state = VDM_STATE_READY; 1008 983 1009 - mod_delayed_work(port->wq, &port->vdm_state_machine, 0); 984 + mod_vdm_delayed_work(port, 0); 1010 985 } 1011 986 1012 987 static void tcpm_queue_vdm_unlocked(struct tcpm_port *port, const u32 header, ··· 1269 1244 port->vdm_state = VDM_STATE_WAIT_RSP_BUSY; 1270 1245 port->vdo_retry = (p[0] & ~VDO_CMDT_MASK) | 1271 1246 CMDT_INIT; 1272 - mod_delayed_work(port->wq, &port->vdm_state_machine, 1273 - msecs_to_jiffies(PD_T_VDM_BUSY)); 1247 + mod_vdm_delayed_work(port, PD_T_VDM_BUSY); 1274 1248 return; 1275 1249 } 1276 1250 port->vdm_state = VDM_STATE_DONE; ··· 1414 1390 port->vdm_retries = 0; 1415 1391 port->vdm_state = VDM_STATE_BUSY; 1416 1392 timeout = vdm_ready_timeout(port->vdo_data[0]); 1417 - mod_delayed_work(port->wq, &port->vdm_state_machine, 1418 - timeout); 1393 + mod_vdm_delayed_work(port, timeout); 1419 1394 } 1420 1395 break; 1421 1396 case VDM_STATE_WAIT_RSP_BUSY: ··· 1443 1420 } 1444 1421 } 1445 1422 1446 - static void vdm_state_machine_work(struct work_struct *work) 1423 + static void vdm_state_machine_work(struct kthread_work *work) 1447 1424 { 1448 - struct tcpm_port *port = container_of(work, struct tcpm_port, 1449 - vdm_state_machine.work); 1425 + struct tcpm_port *port = container_of(work, struct tcpm_port, vdm_state_machine); 1450 1426 enum vdm_states prev_state; 1451 1427 1452 1428 mutex_lock(&port->lock); ··· 1613 1591 struct tcpm_port *port = typec_altmode_get_drvdata(altmode); 1614 1592 1615 1593 tcpm_queue_vdm_unlocked(port, header, data, count - 1); 1594 + 1616 1595 return 0; 1617 1596 } 1618 1597 ··· 2028 2005 } 2029 2006 } 2030 2007 2031 - static void tcpm_pd_rx_handler(struct work_struct *work) 2008 + static void tcpm_pd_rx_handler(struct kthread_work *work) 2032 2009 { 2033 2010 struct pd_rx_event *event = container_of(work, 2034 2011 struct pd_rx_event, work); ··· 2090 2067 if (!event) 2091 2068 return; 2092 2069 2093 - INIT_WORK(&event->work, tcpm_pd_rx_handler); 2070 + kthread_init_work(&event->work, tcpm_pd_rx_handler); 2094 2071 event->port = port; 2095 2072 memcpy(&event->msg, msg, sizeof(*msg)); 2096 - queue_work(port->wq, &event->work); 2073 + kthread_queue_work(port->wq, &event->work); 2097 2074 } 2098 2075 EXPORT_SYMBOL_GPL(tcpm_pd_receive); 2099 2076 ··· 2146 2123 } while (port->queued_message != PD_MSG_NONE); 2147 2124 2148 2125 if (port->delayed_state != INVALID_STATE) { 2149 - if (time_is_after_jiffies(port->delayed_runtime)) { 2150 - mod_delayed_work(port->wq, &port->state_machine, 2151 - port->delayed_runtime - jiffies); 2126 + if (ktime_after(port->delayed_runtime, ktime_get())) { 2127 + mod_tcpm_delayed_work(port, ktime_to_ms(ktime_sub(port->delayed_runtime, 2128 + ktime_get()))); 2152 2129 return true; 2153 2130 } 2154 2131 port->delayed_state = INVALID_STATE; ··· 3281 3258 case SNK_DISCOVERY_DEBOUNCE_DONE: 3282 3259 if (!tcpm_port_is_disconnected(port) && 3283 3260 tcpm_port_is_sink(port) && 3284 - time_is_after_jiffies(port->delayed_runtime)) { 3261 + ktime_after(port->delayed_runtime, ktime_get())) { 3285 3262 tcpm_set_state(port, SNK_DISCOVERY, 3286 - jiffies_to_msecs(port->delayed_runtime - 3287 - jiffies)); 3263 + ktime_to_ms(ktime_sub(port->delayed_runtime, ktime_get()))); 3288 3264 break; 3289 3265 } 3290 3266 tcpm_set_state(port, unattached_state(port), 0); ··· 3678 3656 } 3679 3657 } 3680 3658 3681 - static void tcpm_state_machine_work(struct work_struct *work) 3659 + static void tcpm_state_machine_work(struct kthread_work *work) 3682 3660 { 3683 - struct tcpm_port *port = container_of(work, struct tcpm_port, 3684 - state_machine.work); 3661 + struct tcpm_port *port = container_of(work, struct tcpm_port, state_machine); 3685 3662 enum tcpm_state prev_state; 3686 3663 3687 3664 mutex_lock(&port->lock); ··· 4040 4019 0); 4041 4020 } 4042 4021 4043 - static void tcpm_pd_event_handler(struct work_struct *work) 4022 + static void tcpm_pd_event_handler(struct kthread_work *work) 4044 4023 { 4045 4024 struct tcpm_port *port = container_of(work, struct tcpm_port, 4046 4025 event_work); ··· 4081 4060 spin_lock(&port->pd_event_lock); 4082 4061 port->pd_events |= TCPM_CC_EVENT; 4083 4062 spin_unlock(&port->pd_event_lock); 4084 - queue_work(port->wq, &port->event_work); 4063 + kthread_queue_work(port->wq, &port->event_work); 4085 4064 } 4086 4065 EXPORT_SYMBOL_GPL(tcpm_cc_change); 4087 4066 ··· 4090 4069 spin_lock(&port->pd_event_lock); 4091 4070 port->pd_events |= TCPM_VBUS_EVENT; 4092 4071 spin_unlock(&port->pd_event_lock); 4093 - queue_work(port->wq, &port->event_work); 4072 + kthread_queue_work(port->wq, &port->event_work); 4094 4073 } 4095 4074 EXPORT_SYMBOL_GPL(tcpm_vbus_change); 4096 4075 ··· 4099 4078 spin_lock(&port->pd_event_lock); 4100 4079 port->pd_events = TCPM_RESET_EVENT; 4101 4080 spin_unlock(&port->pd_event_lock); 4102 - queue_work(port->wq, &port->event_work); 4081 + kthread_queue_work(port->wq, &port->event_work); 4103 4082 } 4104 4083 EXPORT_SYMBOL_GPL(tcpm_pd_hard_reset); 4105 4084 ··· 4807 4786 return PTR_ERR_OR_ZERO(port->psy); 4808 4787 } 4809 4788 4789 + static enum hrtimer_restart state_machine_timer_handler(struct hrtimer *timer) 4790 + { 4791 + struct tcpm_port *port = container_of(timer, struct tcpm_port, state_machine_timer); 4792 + 4793 + kthread_queue_work(port->wq, &port->state_machine); 4794 + return HRTIMER_NORESTART; 4795 + } 4796 + 4797 + static enum hrtimer_restart vdm_state_machine_timer_handler(struct hrtimer *timer) 4798 + { 4799 + struct tcpm_port *port = container_of(timer, struct tcpm_port, vdm_state_machine_timer); 4800 + 4801 + kthread_queue_work(port->wq, &port->vdm_state_machine); 4802 + return HRTIMER_NORESTART; 4803 + } 4804 + 4810 4805 struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc) 4811 4806 { 4812 4807 struct tcpm_port *port; ··· 4844 4807 mutex_init(&port->lock); 4845 4808 mutex_init(&port->swap_lock); 4846 4809 4847 - port->wq = create_singlethread_workqueue(dev_name(dev)); 4848 - if (!port->wq) 4849 - return ERR_PTR(-ENOMEM); 4850 - INIT_DELAYED_WORK(&port->state_machine, tcpm_state_machine_work); 4851 - INIT_DELAYED_WORK(&port->vdm_state_machine, vdm_state_machine_work); 4852 - INIT_WORK(&port->event_work, tcpm_pd_event_handler); 4810 + port->wq = kthread_create_worker(0, dev_name(dev)); 4811 + if (IS_ERR(port->wq)) 4812 + return ERR_CAST(port->wq); 4813 + sched_set_fifo(port->wq->task); 4814 + 4815 + kthread_init_work(&port->state_machine, tcpm_state_machine_work); 4816 + kthread_init_work(&port->vdm_state_machine, vdm_state_machine_work); 4817 + kthread_init_work(&port->event_work, tcpm_pd_event_handler); 4818 + hrtimer_init(&port->state_machine_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 4819 + port->state_machine_timer.function = state_machine_timer_handler; 4820 + hrtimer_init(&port->vdm_state_machine_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 4821 + port->vdm_state_machine_timer.function = vdm_state_machine_timer_handler; 4853 4822 4854 4823 spin_lock_init(&port->pd_event_lock); 4855 4824 ··· 4907 4864 usb_role_switch_put(port->role_sw); 4908 4865 out_destroy_wq: 4909 4866 tcpm_debugfs_exit(port); 4910 - destroy_workqueue(port->wq); 4867 + kthread_destroy_worker(port->wq); 4911 4868 return ERR_PTR(err); 4912 4869 } 4913 4870 EXPORT_SYMBOL_GPL(tcpm_register_port); ··· 4922 4879 typec_unregister_port(port->typec_port); 4923 4880 usb_role_switch_put(port->role_sw); 4924 4881 tcpm_debugfs_exit(port); 4925 - destroy_workqueue(port->wq); 4882 + kthread_destroy_worker(port->wq); 4926 4883 } 4927 4884 EXPORT_SYMBOL_GPL(tcpm_unregister_port); 4928 4885