Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux

gpu: host1x: Add syncpoint wait and interrupts

Add support for sync point interrupts, and sync point wait. Sync
point wait used interrupts for unblocking wait.

Signed-off-by: Arto Merilainen <amerilainen@nvidia.com>
Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com>
Reviewed-by: Thierry Reding <thierry.reding@avionic-design.de>
Tested-by: Thierry Reding <thierry.reding@avionic-design.de>
Tested-by: Erik Faye-Lund <kusmabite@gmail.com>
Signed-off-by: Thierry Reding <thierry.reding@avionic-design.de>

authored by

Terje Bergstrom and committed by
Thierry Reding
7ede0b0b 75471687

+846
+1
drivers/gpu/host1x/Makefile
··· 3 3 host1x-y = \ 4 4 syncpt.o \ 5 5 dev.o \ 6 + intr.o \ 6 7 hw/host1x01.o 7 8 8 9 obj-$(CONFIG_TEGRA_HOST1X) += host1x.o
+12
drivers/gpu/host1x/dev.c
··· 28 28 #include <trace/events/host1x.h> 29 29 30 30 #include "dev.h" 31 + #include "intr.h" 31 32 #include "hw/host1x01.h" 32 33 33 34 void host1x_sync_writel(struct host1x *host1x, u32 v, u32 r) ··· 124 123 return err; 125 124 } 126 125 126 + err = host1x_intr_init(host, syncpt_irq); 127 + if (err) { 128 + dev_err(&pdev->dev, "failed to initialize interrupts\n"); 129 + goto fail_deinit_syncpt; 130 + } 131 + 127 132 return 0; 133 + 134 + fail_deinit_syncpt: 135 + host1x_syncpt_deinit(host); 136 + return err; 128 137 } 129 138 130 139 static int __exit host1x_remove(struct platform_device *pdev) 131 140 { 132 141 struct host1x *host = platform_get_drvdata(pdev); 133 142 143 + host1x_intr_deinit(host); 134 144 host1x_syncpt_deinit(host); 135 145 clk_disable_unprepare(host->clk); 136 146
+51
drivers/gpu/host1x/dev.h
··· 21 21 #include <linux/device.h> 22 22 23 23 #include "syncpt.h" 24 + #include "intr.h" 24 25 25 26 struct host1x_syncpt; 26 27 ··· 32 31 u32 (*load)(struct host1x_syncpt *syncpt); 33 32 void (*cpu_incr)(struct host1x_syncpt *syncpt); 34 33 int (*patch_wait)(struct host1x_syncpt *syncpt, void *patch_addr); 34 + }; 35 + 36 + struct host1x_intr_ops { 37 + int (*init_host_sync)(struct host1x *host, u32 cpm, 38 + void (*syncpt_thresh_work)(struct work_struct *work)); 39 + void (*set_syncpt_threshold)( 40 + struct host1x *host, u32 id, u32 thresh); 41 + void (*enable_syncpt_intr)(struct host1x *host, u32 id); 42 + void (*disable_syncpt_intr)(struct host1x *host, u32 id); 43 + void (*disable_all_syncpt_intrs)(struct host1x *host); 44 + int (*free_syncpt_irq)(struct host1x *host); 35 45 }; 36 46 37 47 struct host1x_info { ··· 62 50 struct device *dev; 63 51 struct clk *clk; 64 52 53 + struct mutex intr_mutex; 54 + struct workqueue_struct *intr_wq; 55 + int intr_syncpt_irq; 56 + 65 57 const struct host1x_syncpt_ops *syncpt_op; 58 + const struct host1x_intr_ops *intr_op; 59 + 66 60 }; 67 61 68 62 void host1x_sync_writel(struct host1x *host1x, u32 r, u32 v); ··· 111 93 return host->syncpt_op->patch_wait(sp, patch_addr); 112 94 } 113 95 96 + static inline int host1x_hw_intr_init_host_sync(struct host1x *host, u32 cpm, 97 + void (*syncpt_thresh_work)(struct work_struct *)) 98 + { 99 + return host->intr_op->init_host_sync(host, cpm, syncpt_thresh_work); 100 + } 101 + 102 + static inline void host1x_hw_intr_set_syncpt_threshold(struct host1x *host, 103 + u32 id, u32 thresh) 104 + { 105 + host->intr_op->set_syncpt_threshold(host, id, thresh); 106 + } 107 + 108 + static inline void host1x_hw_intr_enable_syncpt_intr(struct host1x *host, 109 + u32 id) 110 + { 111 + host->intr_op->enable_syncpt_intr(host, id); 112 + } 113 + 114 + static inline void host1x_hw_intr_disable_syncpt_intr(struct host1x *host, 115 + u32 id) 116 + { 117 + host->intr_op->disable_syncpt_intr(host, id); 118 + } 119 + 120 + static inline void host1x_hw_intr_disable_all_syncpt_intrs(struct host1x *host) 121 + { 122 + host->intr_op->disable_all_syncpt_intrs(host); 123 + } 124 + 125 + static inline int host1x_hw_intr_free_syncpt_irq(struct host1x *host) 126 + { 127 + return host->intr_op->free_syncpt_irq(host); 128 + } 114 129 #endif
+2
drivers/gpu/host1x/hw/host1x01.c
··· 21 21 #include "hw/host1x01_hardware.h" 22 22 23 23 /* include code */ 24 + #include "hw/intr_hw.c" 24 25 #include "hw/syncpt_hw.c" 25 26 26 27 #include "dev.h" ··· 29 28 int host1x01_init(struct host1x *host) 30 29 { 31 30 host->syncpt_op = &host1x_syncpt_ops; 31 + host->intr_op = &host1x_intr_ops; 32 32 33 33 return 0; 34 34 }
+42
drivers/gpu/host1x/hw/hw_host1x01_sync.h
··· 59 59 } 60 60 #define HOST1X_SYNC_SYNCPT(id) \ 61 61 host1x_sync_syncpt_r(id) 62 + static inline u32 host1x_sync_syncpt_thresh_cpu0_int_status_r(unsigned int id) 63 + { 64 + return 0x40 + id * REGISTER_STRIDE; 65 + } 66 + #define HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(id) \ 67 + host1x_sync_syncpt_thresh_cpu0_int_status_r(id) 68 + static inline u32 host1x_sync_syncpt_thresh_int_disable_r(unsigned int id) 69 + { 70 + return 0x60 + id * REGISTER_STRIDE; 71 + } 72 + #define HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(id) \ 73 + host1x_sync_syncpt_thresh_int_disable_r(id) 74 + static inline u32 host1x_sync_syncpt_thresh_int_enable_cpu0_r(unsigned int id) 75 + { 76 + return 0x68 + id * REGISTER_STRIDE; 77 + } 78 + #define HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0(id) \ 79 + host1x_sync_syncpt_thresh_int_enable_cpu0_r(id) 80 + static inline u32 host1x_sync_usec_clk_r(void) 81 + { 82 + return 0x1a4; 83 + } 84 + #define HOST1X_SYNC_USEC_CLK \ 85 + host1x_sync_usec_clk_r() 86 + static inline u32 host1x_sync_ctxsw_timeout_cfg_r(void) 87 + { 88 + return 0x1a8; 89 + } 90 + #define HOST1X_SYNC_CTXSW_TIMEOUT_CFG \ 91 + host1x_sync_ctxsw_timeout_cfg_r() 92 + static inline u32 host1x_sync_ip_busy_timeout_r(void) 93 + { 94 + return 0x1bc; 95 + } 96 + #define HOST1X_SYNC_IP_BUSY_TIMEOUT \ 97 + host1x_sync_ip_busy_timeout_r() 98 + static inline u32 host1x_sync_syncpt_int_thresh_r(unsigned int id) 99 + { 100 + return 0x500 + id * REGISTER_STRIDE; 101 + } 102 + #define HOST1X_SYNC_SYNCPT_INT_THRESH(id) \ 103 + host1x_sync_syncpt_int_thresh_r(id) 62 104 static inline u32 host1x_sync_syncpt_base_r(unsigned int id) 63 105 { 64 106 return 0x600 + id * REGISTER_STRIDE;
+143
drivers/gpu/host1x/hw/intr_hw.c
··· 1 + /* 2 + * Tegra host1x Interrupt Management 3 + * 4 + * Copyright (C) 2010 Google, Inc. 5 + * Copyright (c) 2010-2013, NVIDIA Corporation. 6 + * 7 + * This program is free software; you can redistribute it and/or modify it 8 + * under the terms and conditions of the GNU General Public License, 9 + * version 2, as published by the Free Software Foundation. 10 + * 11 + * This program is distributed in the hope it will be useful, but WITHOUT 12 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 13 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 14 + * more details. 15 + * 16 + * You should have received a copy of the GNU General Public License 17 + * along with this program. If not, see <http://www.gnu.org/licenses/>. 18 + */ 19 + 20 + #include <linux/interrupt.h> 21 + #include <linux/irq.h> 22 + #include <linux/io.h> 23 + #include <asm/mach/irq.h> 24 + 25 + #include "intr.h" 26 + #include "dev.h" 27 + 28 + /* 29 + * Sync point threshold interrupt service function 30 + * Handles sync point threshold triggers, in interrupt context 31 + */ 32 + static void host1x_intr_syncpt_handle(struct host1x_syncpt *syncpt) 33 + { 34 + unsigned int id = syncpt->id; 35 + struct host1x *host = syncpt->host; 36 + 37 + host1x_sync_writel(host, BIT_MASK(id), 38 + HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(BIT_WORD(id))); 39 + host1x_sync_writel(host, BIT_MASK(id), 40 + HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(BIT_WORD(id))); 41 + 42 + queue_work(host->intr_wq, &syncpt->intr.work); 43 + } 44 + 45 + static irqreturn_t syncpt_thresh_isr(int irq, void *dev_id) 46 + { 47 + struct host1x *host = dev_id; 48 + unsigned long reg; 49 + int i, id; 50 + 51 + for (i = 0; i <= BIT_WORD(host->info->nb_pts); i++) { 52 + reg = host1x_sync_readl(host, 53 + HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(i)); 54 + for_each_set_bit(id, &reg, BITS_PER_LONG) { 55 + struct host1x_syncpt *syncpt = 56 + host->syncpt + (i * BITS_PER_LONG + id); 57 + host1x_intr_syncpt_handle(syncpt); 58 + } 59 + } 60 + 61 + return IRQ_HANDLED; 62 + } 63 + 64 + static void _host1x_intr_disable_all_syncpt_intrs(struct host1x *host) 65 + { 66 + u32 i; 67 + 68 + for (i = 0; i <= BIT_WORD(host->info->nb_pts); ++i) { 69 + host1x_sync_writel(host, 0xffffffffu, 70 + HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(i)); 71 + host1x_sync_writel(host, 0xffffffffu, 72 + HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(i)); 73 + } 74 + } 75 + 76 + static int _host1x_intr_init_host_sync(struct host1x *host, u32 cpm, 77 + void (*syncpt_thresh_work)(struct work_struct *)) 78 + { 79 + int i, err; 80 + 81 + host1x_hw_intr_disable_all_syncpt_intrs(host); 82 + 83 + for (i = 0; i < host->info->nb_pts; i++) 84 + INIT_WORK(&host->syncpt[i].intr.work, syncpt_thresh_work); 85 + 86 + err = devm_request_irq(host->dev, host->intr_syncpt_irq, 87 + syncpt_thresh_isr, IRQF_SHARED, 88 + "host1x_syncpt", host); 89 + if (IS_ERR_VALUE(err)) { 90 + WARN_ON(1); 91 + return err; 92 + } 93 + 94 + /* disable the ip_busy_timeout. this prevents write drops */ 95 + host1x_sync_writel(host, 0, HOST1X_SYNC_IP_BUSY_TIMEOUT); 96 + 97 + /* 98 + * increase the auto-ack timout to the maximum value. 2d will hang 99 + * otherwise on Tegra2. 100 + */ 101 + host1x_sync_writel(host, 0xff, HOST1X_SYNC_CTXSW_TIMEOUT_CFG); 102 + 103 + /* update host clocks per usec */ 104 + host1x_sync_writel(host, cpm, HOST1X_SYNC_USEC_CLK); 105 + 106 + return 0; 107 + } 108 + 109 + static void _host1x_intr_set_syncpt_threshold(struct host1x *host, 110 + u32 id, u32 thresh) 111 + { 112 + host1x_sync_writel(host, thresh, HOST1X_SYNC_SYNCPT_INT_THRESH(id)); 113 + } 114 + 115 + static void _host1x_intr_enable_syncpt_intr(struct host1x *host, u32 id) 116 + { 117 + host1x_sync_writel(host, BIT_MASK(id), 118 + HOST1X_SYNC_SYNCPT_THRESH_INT_ENABLE_CPU0(BIT_WORD(id))); 119 + } 120 + 121 + static void _host1x_intr_disable_syncpt_intr(struct host1x *host, u32 id) 122 + { 123 + host1x_sync_writel(host, BIT_MASK(id), 124 + HOST1X_SYNC_SYNCPT_THRESH_INT_DISABLE(BIT_WORD(id))); 125 + host1x_sync_writel(host, BIT_MASK(id), 126 + HOST1X_SYNC_SYNCPT_THRESH_CPU0_INT_STATUS(BIT_WORD(id))); 127 + } 128 + 129 + static int _host1x_free_syncpt_irq(struct host1x *host) 130 + { 131 + devm_free_irq(host->dev, host->intr_syncpt_irq, host); 132 + flush_workqueue(host->intr_wq); 133 + return 0; 134 + } 135 + 136 + static const struct host1x_intr_ops host1x_intr_ops = { 137 + .init_host_sync = _host1x_intr_init_host_sync, 138 + .set_syncpt_threshold = _host1x_intr_set_syncpt_threshold, 139 + .enable_syncpt_intr = _host1x_intr_enable_syncpt_intr, 140 + .disable_syncpt_intr = _host1x_intr_disable_syncpt_intr, 141 + .disable_all_syncpt_intrs = _host1x_intr_disable_all_syncpt_intrs, 142 + .free_syncpt_irq = _host1x_free_syncpt_irq, 143 + };
+328
drivers/gpu/host1x/intr.c
··· 1 + /* 2 + * Tegra host1x Interrupt Management 3 + * 4 + * Copyright (c) 2010-2013, NVIDIA Corporation. 5 + * 6 + * This program is free software; you can redistribute it and/or modify it 7 + * under the terms and conditions of the GNU General Public License, 8 + * version 2, as published by the Free Software Foundation. 9 + * 10 + * This program is distributed in the hope it will be useful, but WITHOUT 11 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 + * more details. 14 + * 15 + * You should have received a copy of the GNU General Public License 16 + * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 + */ 18 + 19 + #include <linux/clk.h> 20 + #include <linux/interrupt.h> 21 + #include <linux/slab.h> 22 + #include <linux/irq.h> 23 + 24 + #include "dev.h" 25 + #include "intr.h" 26 + 27 + /* Wait list management */ 28 + 29 + enum waitlist_state { 30 + WLS_PENDING, 31 + WLS_REMOVED, 32 + WLS_CANCELLED, 33 + WLS_HANDLED 34 + }; 35 + 36 + static void waiter_release(struct kref *kref) 37 + { 38 + kfree(container_of(kref, struct host1x_waitlist, refcount)); 39 + } 40 + 41 + /* 42 + * add a waiter to a waiter queue, sorted by threshold 43 + * returns true if it was added at the head of the queue 44 + */ 45 + static bool add_waiter_to_queue(struct host1x_waitlist *waiter, 46 + struct list_head *queue) 47 + { 48 + struct host1x_waitlist *pos; 49 + u32 thresh = waiter->thresh; 50 + 51 + list_for_each_entry_reverse(pos, queue, list) 52 + if ((s32)(pos->thresh - thresh) <= 0) { 53 + list_add(&waiter->list, &pos->list); 54 + return false; 55 + } 56 + 57 + list_add(&waiter->list, queue); 58 + return true; 59 + } 60 + 61 + /* 62 + * run through a waiter queue for a single sync point ID 63 + * and gather all completed waiters into lists by actions 64 + */ 65 + static void remove_completed_waiters(struct list_head *head, u32 sync, 66 + struct list_head completed[HOST1X_INTR_ACTION_COUNT]) 67 + { 68 + struct list_head *dest; 69 + struct host1x_waitlist *waiter, *next; 70 + 71 + list_for_each_entry_safe(waiter, next, head, list) { 72 + if ((s32)(waiter->thresh - sync) > 0) 73 + break; 74 + 75 + dest = completed + waiter->action; 76 + 77 + /* PENDING->REMOVED or CANCELLED->HANDLED */ 78 + if (atomic_inc_return(&waiter->state) == WLS_HANDLED || !dest) { 79 + list_del(&waiter->list); 80 + kref_put(&waiter->refcount, waiter_release); 81 + } else 82 + list_move_tail(&waiter->list, dest); 83 + } 84 + } 85 + 86 + static void reset_threshold_interrupt(struct host1x *host, 87 + struct list_head *head, 88 + unsigned int id) 89 + { 90 + u32 thresh = 91 + list_first_entry(head, struct host1x_waitlist, list)->thresh; 92 + 93 + host1x_hw_intr_set_syncpt_threshold(host, id, thresh); 94 + host1x_hw_intr_enable_syncpt_intr(host, id); 95 + } 96 + 97 + static void action_wakeup(struct host1x_waitlist *waiter) 98 + { 99 + wait_queue_head_t *wq = waiter->data; 100 + wake_up(wq); 101 + } 102 + 103 + static void action_wakeup_interruptible(struct host1x_waitlist *waiter) 104 + { 105 + wait_queue_head_t *wq = waiter->data; 106 + wake_up_interruptible(wq); 107 + } 108 + 109 + typedef void (*action_handler)(struct host1x_waitlist *waiter); 110 + 111 + static action_handler action_handlers[HOST1X_INTR_ACTION_COUNT] = { 112 + action_wakeup, 113 + action_wakeup_interruptible, 114 + }; 115 + 116 + static void run_handlers(struct list_head completed[HOST1X_INTR_ACTION_COUNT]) 117 + { 118 + struct list_head *head = completed; 119 + int i; 120 + 121 + for (i = 0; i < HOST1X_INTR_ACTION_COUNT; ++i, ++head) { 122 + action_handler handler = action_handlers[i]; 123 + struct host1x_waitlist *waiter, *next; 124 + 125 + list_for_each_entry_safe(waiter, next, head, list) { 126 + list_del(&waiter->list); 127 + handler(waiter); 128 + WARN_ON(atomic_xchg(&waiter->state, WLS_HANDLED) != 129 + WLS_REMOVED); 130 + kref_put(&waiter->refcount, waiter_release); 131 + } 132 + } 133 + } 134 + 135 + /* 136 + * Remove & handle all waiters that have completed for the given syncpt 137 + */ 138 + static int process_wait_list(struct host1x *host, 139 + struct host1x_syncpt *syncpt, 140 + u32 threshold) 141 + { 142 + struct list_head completed[HOST1X_INTR_ACTION_COUNT]; 143 + unsigned int i; 144 + int empty; 145 + 146 + for (i = 0; i < HOST1X_INTR_ACTION_COUNT; ++i) 147 + INIT_LIST_HEAD(completed + i); 148 + 149 + spin_lock(&syncpt->intr.lock); 150 + 151 + remove_completed_waiters(&syncpt->intr.wait_head, threshold, 152 + completed); 153 + 154 + empty = list_empty(&syncpt->intr.wait_head); 155 + if (empty) 156 + host1x_hw_intr_disable_syncpt_intr(host, syncpt->id); 157 + else 158 + reset_threshold_interrupt(host, &syncpt->intr.wait_head, 159 + syncpt->id); 160 + 161 + spin_unlock(&syncpt->intr.lock); 162 + 163 + run_handlers(completed); 164 + 165 + return empty; 166 + } 167 + 168 + /* 169 + * Sync point threshold interrupt service thread function 170 + * Handles sync point threshold triggers, in thread context 171 + */ 172 + 173 + static void syncpt_thresh_work(struct work_struct *work) 174 + { 175 + struct host1x_syncpt_intr *syncpt_intr = 176 + container_of(work, struct host1x_syncpt_intr, work); 177 + struct host1x_syncpt *syncpt = 178 + container_of(syncpt_intr, struct host1x_syncpt, intr); 179 + unsigned int id = syncpt->id; 180 + struct host1x *host = syncpt->host; 181 + 182 + (void)process_wait_list(host, syncpt, 183 + host1x_syncpt_load(host->syncpt + id)); 184 + } 185 + 186 + int host1x_intr_add_action(struct host1x *host, u32 id, u32 thresh, 187 + enum host1x_intr_action action, void *data, 188 + struct host1x_waitlist *waiter, void **ref) 189 + { 190 + struct host1x_syncpt *syncpt; 191 + int queue_was_empty; 192 + 193 + if (waiter == NULL) { 194 + pr_warn("%s: NULL waiter\n", __func__); 195 + return -EINVAL; 196 + } 197 + 198 + /* initialize a new waiter */ 199 + INIT_LIST_HEAD(&waiter->list); 200 + kref_init(&waiter->refcount); 201 + if (ref) 202 + kref_get(&waiter->refcount); 203 + waiter->thresh = thresh; 204 + waiter->action = action; 205 + atomic_set(&waiter->state, WLS_PENDING); 206 + waiter->data = data; 207 + waiter->count = 1; 208 + 209 + syncpt = host->syncpt + id; 210 + 211 + spin_lock(&syncpt->intr.lock); 212 + 213 + queue_was_empty = list_empty(&syncpt->intr.wait_head); 214 + 215 + if (add_waiter_to_queue(waiter, &syncpt->intr.wait_head)) { 216 + /* added at head of list - new threshold value */ 217 + host1x_hw_intr_set_syncpt_threshold(host, id, thresh); 218 + 219 + /* added as first waiter - enable interrupt */ 220 + if (queue_was_empty) 221 + host1x_hw_intr_enable_syncpt_intr(host, id); 222 + } 223 + 224 + spin_unlock(&syncpt->intr.lock); 225 + 226 + if (ref) 227 + *ref = waiter; 228 + return 0; 229 + } 230 + 231 + void host1x_intr_put_ref(struct host1x *host, u32 id, void *ref) 232 + { 233 + struct host1x_waitlist *waiter = ref; 234 + struct host1x_syncpt *syncpt; 235 + 236 + while (atomic_cmpxchg(&waiter->state, WLS_PENDING, WLS_CANCELLED) == 237 + WLS_REMOVED) 238 + schedule(); 239 + 240 + syncpt = host->syncpt + id; 241 + (void)process_wait_list(host, syncpt, 242 + host1x_syncpt_load(host->syncpt + id)); 243 + 244 + kref_put(&waiter->refcount, waiter_release); 245 + } 246 + 247 + int host1x_intr_init(struct host1x *host, unsigned int irq_sync) 248 + { 249 + unsigned int id; 250 + u32 nb_pts = host1x_syncpt_nb_pts(host); 251 + 252 + mutex_init(&host->intr_mutex); 253 + host->intr_syncpt_irq = irq_sync; 254 + host->intr_wq = create_workqueue("host_syncpt"); 255 + if (!host->intr_wq) 256 + return -ENOMEM; 257 + 258 + for (id = 0; id < nb_pts; ++id) { 259 + struct host1x_syncpt *syncpt = host->syncpt + id; 260 + 261 + spin_lock_init(&syncpt->intr.lock); 262 + INIT_LIST_HEAD(&syncpt->intr.wait_head); 263 + snprintf(syncpt->intr.thresh_irq_name, 264 + sizeof(syncpt->intr.thresh_irq_name), 265 + "host1x_sp_%02d", id); 266 + } 267 + 268 + host1x_intr_start(host); 269 + 270 + return 0; 271 + } 272 + 273 + void host1x_intr_deinit(struct host1x *host) 274 + { 275 + host1x_intr_stop(host); 276 + destroy_workqueue(host->intr_wq); 277 + } 278 + 279 + void host1x_intr_start(struct host1x *host) 280 + { 281 + u32 hz = clk_get_rate(host->clk); 282 + int err; 283 + 284 + mutex_lock(&host->intr_mutex); 285 + err = host1x_hw_intr_init_host_sync(host, DIV_ROUND_UP(hz, 1000000), 286 + syncpt_thresh_work); 287 + if (err) { 288 + mutex_unlock(&host->intr_mutex); 289 + return; 290 + } 291 + mutex_unlock(&host->intr_mutex); 292 + } 293 + 294 + void host1x_intr_stop(struct host1x *host) 295 + { 296 + unsigned int id; 297 + struct host1x_syncpt *syncpt = host->syncpt; 298 + u32 nb_pts = host1x_syncpt_nb_pts(host); 299 + 300 + mutex_lock(&host->intr_mutex); 301 + 302 + host1x_hw_intr_disable_all_syncpt_intrs(host); 303 + 304 + for (id = 0; id < nb_pts; ++id) { 305 + struct host1x_waitlist *waiter, *next; 306 + 307 + list_for_each_entry_safe(waiter, next, 308 + &syncpt[id].intr.wait_head, list) { 309 + if (atomic_cmpxchg(&waiter->state, 310 + WLS_CANCELLED, WLS_HANDLED) == WLS_CANCELLED) { 311 + list_del(&waiter->list); 312 + kref_put(&waiter->refcount, waiter_release); 313 + } 314 + } 315 + 316 + if (!list_empty(&syncpt[id].intr.wait_head)) { 317 + /* output diagnostics */ 318 + mutex_unlock(&host->intr_mutex); 319 + pr_warn("%s cannot stop syncpt intr id=%d\n", 320 + __func__, id); 321 + return; 322 + } 323 + } 324 + 325 + host1x_hw_intr_free_syncpt_irq(host); 326 + 327 + mutex_unlock(&host->intr_mutex); 328 + }
+96
drivers/gpu/host1x/intr.h
··· 1 + /* 2 + * Tegra host1x Interrupt Management 3 + * 4 + * Copyright (c) 2010-2013, NVIDIA Corporation. 5 + * 6 + * This program is free software; you can redistribute it and/or modify it 7 + * under the terms and conditions of the GNU General Public License, 8 + * version 2, as published by the Free Software Foundation. 9 + * 10 + * This program is distributed in the hope it will be useful, but WITHOUT 11 + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for 13 + * more details. 14 + * 15 + * You should have received a copy of the GNU General Public License 16 + * along with this program. If not, see <http://www.gnu.org/licenses/>. 17 + */ 18 + 19 + #ifndef __HOST1X_INTR_H 20 + #define __HOST1X_INTR_H 21 + 22 + #include <linux/interrupt.h> 23 + #include <linux/workqueue.h> 24 + 25 + struct host1x; 26 + 27 + enum host1x_intr_action { 28 + /* 29 + * Wake up a task. 30 + * 'data' points to a wait_queue_head_t 31 + */ 32 + HOST1X_INTR_ACTION_WAKEUP, 33 + 34 + /* 35 + * Wake up a interruptible task. 36 + * 'data' points to a wait_queue_head_t 37 + */ 38 + HOST1X_INTR_ACTION_WAKEUP_INTERRUPTIBLE, 39 + 40 + HOST1X_INTR_ACTION_COUNT 41 + }; 42 + 43 + struct host1x_syncpt_intr { 44 + spinlock_t lock; 45 + struct list_head wait_head; 46 + char thresh_irq_name[12]; 47 + struct work_struct work; 48 + }; 49 + 50 + struct host1x_waitlist { 51 + struct list_head list; 52 + struct kref refcount; 53 + u32 thresh; 54 + enum host1x_intr_action action; 55 + atomic_t state; 56 + void *data; 57 + int count; 58 + }; 59 + 60 + /* 61 + * Schedule an action to be taken when a sync point reaches the given threshold. 62 + * 63 + * @id the sync point 64 + * @thresh the threshold 65 + * @action the action to take 66 + * @data a pointer to extra data depending on action, see above 67 + * @waiter waiter structure - assumes ownership 68 + * @ref must be passed if cancellation is possible, else NULL 69 + * 70 + * This is a non-blocking api. 71 + */ 72 + int host1x_intr_add_action(struct host1x *host, u32 id, u32 thresh, 73 + enum host1x_intr_action action, void *data, 74 + struct host1x_waitlist *waiter, void **ref); 75 + 76 + /* 77 + * Unreference an action submitted to host1x_intr_add_action(). 78 + * You must call this if you passed non-NULL as ref. 79 + * @ref the ref returned from host1x_intr_add_action() 80 + */ 81 + void host1x_intr_put_ref(struct host1x *host, u32 id, void *ref); 82 + 83 + /* Initialize host1x sync point interrupt */ 84 + int host1x_intr_init(struct host1x *host, unsigned int irq_sync); 85 + 86 + /* Deinitialize host1x sync point interrupt */ 87 + void host1x_intr_deinit(struct host1x *host); 88 + 89 + /* Enable host1x sync point interrupt */ 90 + void host1x_intr_start(struct host1x *host); 91 + 92 + /* Disable host1x sync point interrupt */ 93 + void host1x_intr_stop(struct host1x *host); 94 + 95 + irqreturn_t host1x_syncpt_thresh_fn(void *dev_id); 96 + #endif
+159
drivers/gpu/host1x/syncpt.c
··· 24 24 25 25 #include "syncpt.h" 26 26 #include "dev.h" 27 + #include "intr.h" 28 + 29 + #define SYNCPT_CHECK_PERIOD (2 * HZ) 30 + #define MAX_STUCK_CHECK_COUNT 15 27 31 28 32 static struct host1x_syncpt *_host1x_syncpt_alloc(struct host1x *host, 29 33 struct device *dev, ··· 143 139 if (host1x_syncpt_client_managed(sp)) 144 140 host1x_syncpt_incr_max(sp, 1); 145 141 host1x_syncpt_cpu_incr(sp); 142 + } 143 + 144 + /* 145 + * Updated sync point form hardware, and returns true if syncpoint is expired, 146 + * false if we may need to wait 147 + */ 148 + static bool syncpt_load_min_is_expired(struct host1x_syncpt *sp, u32 thresh) 149 + { 150 + host1x_hw_syncpt_load(sp->host, sp); 151 + return host1x_syncpt_is_expired(sp, thresh); 152 + } 153 + 154 + /* 155 + * Main entrypoint for syncpoint value waits. 156 + */ 157 + int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout, 158 + u32 *value) 159 + { 160 + DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); 161 + void *ref; 162 + struct host1x_waitlist *waiter; 163 + int err = 0, check_count = 0; 164 + u32 val; 165 + 166 + if (value) 167 + *value = 0; 168 + 169 + /* first check cache */ 170 + if (host1x_syncpt_is_expired(sp, thresh)) { 171 + if (value) 172 + *value = host1x_syncpt_load(sp); 173 + return 0; 174 + } 175 + 176 + /* try to read from register */ 177 + val = host1x_hw_syncpt_load(sp->host, sp); 178 + if (host1x_syncpt_is_expired(sp, thresh)) { 179 + if (value) 180 + *value = val; 181 + goto done; 182 + } 183 + 184 + if (!timeout) { 185 + err = -EAGAIN; 186 + goto done; 187 + } 188 + 189 + /* allocate a waiter */ 190 + waiter = kzalloc(sizeof(*waiter), GFP_KERNEL); 191 + if (!waiter) { 192 + err = -ENOMEM; 193 + goto done; 194 + } 195 + 196 + /* schedule a wakeup when the syncpoint value is reached */ 197 + err = host1x_intr_add_action(sp->host, sp->id, thresh, 198 + HOST1X_INTR_ACTION_WAKEUP_INTERRUPTIBLE, 199 + &wq, waiter, &ref); 200 + if (err) 201 + goto done; 202 + 203 + err = -EAGAIN; 204 + /* Caller-specified timeout may be impractically low */ 205 + if (timeout < 0) 206 + timeout = LONG_MAX; 207 + 208 + /* wait for the syncpoint, or timeout, or signal */ 209 + while (timeout) { 210 + long check = min_t(long, SYNCPT_CHECK_PERIOD, timeout); 211 + int remain = wait_event_interruptible_timeout(wq, 212 + syncpt_load_min_is_expired(sp, thresh), 213 + check); 214 + if (remain > 0 || host1x_syncpt_is_expired(sp, thresh)) { 215 + if (value) 216 + *value = host1x_syncpt_load(sp); 217 + err = 0; 218 + break; 219 + } 220 + if (remain < 0) { 221 + err = remain; 222 + break; 223 + } 224 + timeout -= check; 225 + if (timeout && check_count <= MAX_STUCK_CHECK_COUNT) { 226 + dev_warn(sp->host->dev, 227 + "%s: syncpoint id %d (%s) stuck waiting %d, timeout=%ld\n", 228 + current->comm, sp->id, sp->name, 229 + thresh, timeout); 230 + check_count++; 231 + } 232 + } 233 + host1x_intr_put_ref(sp->host, sp->id, ref); 234 + 235 + done: 236 + return err; 237 + } 238 + EXPORT_SYMBOL(host1x_syncpt_wait); 239 + 240 + /* 241 + * Returns true if syncpoint is expired, false if we may need to wait 242 + */ 243 + bool host1x_syncpt_is_expired(struct host1x_syncpt *sp, u32 thresh) 244 + { 245 + u32 current_val; 246 + u32 future_val; 247 + smp_rmb(); 248 + current_val = (u32)atomic_read(&sp->min_val); 249 + future_val = (u32)atomic_read(&sp->max_val); 250 + 251 + /* Note the use of unsigned arithmetic here (mod 1<<32). 252 + * 253 + * c = current_val = min_val = the current value of the syncpoint. 254 + * t = thresh = the value we are checking 255 + * f = future_val = max_val = the value c will reach when all 256 + * outstanding increments have completed. 257 + * 258 + * Note that c always chases f until it reaches f. 259 + * 260 + * Dtf = (f - t) 261 + * Dtc = (c - t) 262 + * 263 + * Consider all cases: 264 + * 265 + * A) .....c..t..f..... Dtf < Dtc need to wait 266 + * B) .....c.....f..t.. Dtf > Dtc expired 267 + * C) ..t..c.....f..... Dtf > Dtc expired (Dct very large) 268 + * 269 + * Any case where f==c: always expired (for any t). Dtf == Dcf 270 + * Any case where t==c: always expired (for any f). Dtf >= Dtc (because Dtc==0) 271 + * Any case where t==f!=c: always wait. Dtf < Dtc (because Dtf==0, 272 + * Dtc!=0) 273 + * 274 + * Other cases: 275 + * 276 + * A) .....t..f..c..... Dtf < Dtc need to wait 277 + * A) .....f..c..t..... Dtf < Dtc need to wait 278 + * A) .....f..t..c..... Dtf > Dtc expired 279 + * 280 + * So: 281 + * Dtf >= Dtc implies EXPIRED (return true) 282 + * Dtf < Dtc implies WAIT (return false) 283 + * 284 + * Note: If t is expired then we *cannot* wait on it. We would wait 285 + * forever (hang the system). 286 + * 287 + * Note: do NOT get clever and remove the -thresh from both sides. It 288 + * is NOT the same. 289 + * 290 + * If future valueis zero, we have a client managed sync point. In that 291 + * case we do a direct comparison. 292 + */ 293 + if (!host1x_syncpt_client_managed(sp)) 294 + return future_val - thresh >= current_val - thresh; 295 + else 296 + return (s32)(current_val - thresh) >= 0; 146 297 } 147 298 148 299 int host1x_syncpt_init(struct host1x *host)
+12
drivers/gpu/host1x/syncpt.h
··· 23 23 #include <linux/kernel.h> 24 24 #include <linux/sched.h> 25 25 26 + #include "intr.h" 27 + 26 28 struct host1x; 27 29 28 30 struct host1x_syncpt { ··· 36 34 int client_managed; 37 35 struct host1x *host; 38 36 struct device *dev; 37 + 38 + /* interrupt data */ 39 + struct host1x_syncpt_intr intr; 39 40 }; 40 41 41 42 /* Initialize sync point array */ ··· 118 113 /* Load current value from hardware to the shadow register. */ 119 114 u32 host1x_syncpt_load(struct host1x_syncpt *sp); 120 115 116 + /* Check if the given syncpoint value has already passed */ 117 + bool host1x_syncpt_is_expired(struct host1x_syncpt *sp, u32 thresh); 118 + 121 119 /* Save host1x sync point state into shadow registers. */ 122 120 void host1x_syncpt_save(struct host1x *host); 123 121 ··· 135 127 136 128 /* Indicate future operations by incrementing the sync point max. */ 137 129 u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs); 130 + 131 + /* Wait until sync point reaches a threshold value, or a timeout. */ 132 + int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, 133 + long timeout, u32 *value); 138 134 139 135 /* Check if sync point id is valid. */ 140 136 static inline int host1x_syncpt_is_valid(struct host1x_syncpt *sp)