Linux kernel mirror (for testing) git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel os linux
1
fork

Configure Feed

Select the types of activity you want to include in your feed.

at v4.12-rc4 289 lines 7.6 kB view raw
1/* 2 * Copyright (c) 2014 David Jander, Protonic Holland 3 * Copyright (C) 2014-2017 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de> 4 * 5 * This program is free software; you can redistribute it and/or modify 6 * it under the terms of the version 2 of the GNU General Public License 7 * as published by the Free Software Foundation 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, see <http://www.gnu.org/licenses/>. 16 */ 17 18#include <linux/can/dev.h> 19#include <linux/can/rx-offload.h> 20 21struct can_rx_offload_cb { 22 u32 timestamp; 23}; 24 25static inline struct can_rx_offload_cb *can_rx_offload_get_cb(struct sk_buff *skb) 26{ 27 BUILD_BUG_ON(sizeof(struct can_rx_offload_cb) > sizeof(skb->cb)); 28 29 return (struct can_rx_offload_cb *)skb->cb; 30} 31 32static inline bool can_rx_offload_le(struct can_rx_offload *offload, unsigned int a, unsigned int b) 33{ 34 if (offload->inc) 35 return a <= b; 36 else 37 return a >= b; 38} 39 40static inline unsigned int can_rx_offload_inc(struct can_rx_offload *offload, unsigned int *val) 41{ 42 if (offload->inc) 43 return (*val)++; 44 else 45 return (*val)--; 46} 47 48static int can_rx_offload_napi_poll(struct napi_struct *napi, int quota) 49{ 50 struct can_rx_offload *offload = container_of(napi, struct can_rx_offload, napi); 51 struct net_device *dev = offload->dev; 52 struct net_device_stats *stats = &dev->stats; 53 struct sk_buff *skb; 54 int work_done = 0; 55 56 while ((work_done < quota) && 57 (skb = skb_dequeue(&offload->skb_queue))) { 58 struct can_frame *cf = (struct can_frame *)skb->data; 59 60 work_done++; 61 stats->rx_packets++; 62 stats->rx_bytes += cf->can_dlc; 63 netif_receive_skb(skb); 64 } 65 66 if (work_done < quota) { 67 napi_complete_done(napi, work_done); 68 69 /* Check if there was another interrupt */ 70 if (!skb_queue_empty(&offload->skb_queue)) 71 napi_reschedule(&offload->napi); 72 } 73 74 can_led_event(offload->dev, CAN_LED_EVENT_RX); 75 76 return work_done; 77} 78 79static inline void __skb_queue_add_sort(struct sk_buff_head *head, struct sk_buff *new, 80 int (*compare)(struct sk_buff *a, struct sk_buff *b)) 81{ 82 struct sk_buff *pos, *insert = (struct sk_buff *)head; 83 84 skb_queue_reverse_walk(head, pos) { 85 const struct can_rx_offload_cb *cb_pos, *cb_new; 86 87 cb_pos = can_rx_offload_get_cb(pos); 88 cb_new = can_rx_offload_get_cb(new); 89 90 netdev_dbg(new->dev, 91 "%s: pos=0x%08x, new=0x%08x, diff=%10d, queue_len=%d\n", 92 __func__, 93 cb_pos->timestamp, cb_new->timestamp, 94 cb_new->timestamp - cb_pos->timestamp, 95 skb_queue_len(head)); 96 97 if (compare(pos, new) < 0) 98 continue; 99 insert = pos; 100 break; 101 } 102 103 __skb_queue_after(head, insert, new); 104} 105 106static int can_rx_offload_compare(struct sk_buff *a, struct sk_buff *b) 107{ 108 const struct can_rx_offload_cb *cb_a, *cb_b; 109 110 cb_a = can_rx_offload_get_cb(a); 111 cb_b = can_rx_offload_get_cb(b); 112 113 /* Substract two u32 and return result as int, to keep 114 * difference steady around the u32 overflow. 115 */ 116 return cb_b->timestamp - cb_a->timestamp; 117} 118 119static struct sk_buff *can_rx_offload_offload_one(struct can_rx_offload *offload, unsigned int n) 120{ 121 struct sk_buff *skb = NULL; 122 struct can_rx_offload_cb *cb; 123 struct can_frame *cf; 124 int ret; 125 126 /* If queue is full or skb not available, read to discard mailbox */ 127 if (likely(skb_queue_len(&offload->skb_queue) <= 128 offload->skb_queue_len_max)) 129 skb = alloc_can_skb(offload->dev, &cf); 130 131 if (!skb) { 132 struct can_frame cf_overflow; 133 u32 timestamp; 134 135 ret = offload->mailbox_read(offload, &cf_overflow, 136 &timestamp, n); 137 if (ret) 138 offload->dev->stats.rx_dropped++; 139 140 return NULL; 141 } 142 143 cb = can_rx_offload_get_cb(skb); 144 ret = offload->mailbox_read(offload, cf, &cb->timestamp, n); 145 if (!ret) { 146 kfree_skb(skb); 147 return NULL; 148 } 149 150 return skb; 151} 152 153int can_rx_offload_irq_offload_timestamp(struct can_rx_offload *offload, u64 pending) 154{ 155 struct sk_buff_head skb_queue; 156 unsigned int i; 157 158 __skb_queue_head_init(&skb_queue); 159 160 for (i = offload->mb_first; 161 can_rx_offload_le(offload, i, offload->mb_last); 162 can_rx_offload_inc(offload, &i)) { 163 struct sk_buff *skb; 164 165 if (!(pending & BIT_ULL(i))) 166 continue; 167 168 skb = can_rx_offload_offload_one(offload, i); 169 if (!skb) 170 break; 171 172 __skb_queue_add_sort(&skb_queue, skb, can_rx_offload_compare); 173 } 174 175 if (!skb_queue_empty(&skb_queue)) { 176 unsigned long flags; 177 u32 queue_len; 178 179 spin_lock_irqsave(&offload->skb_queue.lock, flags); 180 skb_queue_splice_tail(&skb_queue, &offload->skb_queue); 181 spin_unlock_irqrestore(&offload->skb_queue.lock, flags); 182 183 if ((queue_len = skb_queue_len(&offload->skb_queue)) > 184 (offload->skb_queue_len_max / 8)) 185 netdev_dbg(offload->dev, "%s: queue_len=%d\n", 186 __func__, queue_len); 187 188 can_rx_offload_schedule(offload); 189 } 190 191 return skb_queue_len(&skb_queue); 192} 193EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_timestamp); 194 195int can_rx_offload_irq_offload_fifo(struct can_rx_offload *offload) 196{ 197 struct sk_buff *skb; 198 int received = 0; 199 200 while ((skb = can_rx_offload_offload_one(offload, 0))) { 201 skb_queue_tail(&offload->skb_queue, skb); 202 received++; 203 } 204 205 if (received) 206 can_rx_offload_schedule(offload); 207 208 return received; 209} 210EXPORT_SYMBOL_GPL(can_rx_offload_irq_offload_fifo); 211 212int can_rx_offload_irq_queue_err_skb(struct can_rx_offload *offload, struct sk_buff *skb) 213{ 214 if (skb_queue_len(&offload->skb_queue) > 215 offload->skb_queue_len_max) 216 return -ENOMEM; 217 218 skb_queue_tail(&offload->skb_queue, skb); 219 can_rx_offload_schedule(offload); 220 221 return 0; 222} 223EXPORT_SYMBOL_GPL(can_rx_offload_irq_queue_err_skb); 224 225static int can_rx_offload_init_queue(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight) 226{ 227 offload->dev = dev; 228 229 /* Limit queue len to 4x the weight (rounted to next power of two) */ 230 offload->skb_queue_len_max = 2 << fls(weight); 231 offload->skb_queue_len_max *= 4; 232 skb_queue_head_init(&offload->skb_queue); 233 234 can_rx_offload_reset(offload); 235 netif_napi_add(dev, &offload->napi, can_rx_offload_napi_poll, weight); 236 237 dev_dbg(dev->dev.parent, "%s: skb_queue_len_max=%d\n", 238 __func__, offload->skb_queue_len_max); 239 240 return 0; 241} 242 243int can_rx_offload_add_timestamp(struct net_device *dev, struct can_rx_offload *offload) 244{ 245 unsigned int weight; 246 247 if (offload->mb_first > BITS_PER_LONG_LONG || 248 offload->mb_last > BITS_PER_LONG_LONG || !offload->mailbox_read) 249 return -EINVAL; 250 251 if (offload->mb_first < offload->mb_last) { 252 offload->inc = true; 253 weight = offload->mb_last - offload->mb_first; 254 } else { 255 offload->inc = false; 256 weight = offload->mb_first - offload->mb_last; 257 } 258 259 return can_rx_offload_init_queue(dev, offload, weight);; 260} 261EXPORT_SYMBOL_GPL(can_rx_offload_add_timestamp); 262 263int can_rx_offload_add_fifo(struct net_device *dev, struct can_rx_offload *offload, unsigned int weight) 264{ 265 if (!offload->mailbox_read) 266 return -EINVAL; 267 268 return can_rx_offload_init_queue(dev, offload, weight); 269} 270EXPORT_SYMBOL_GPL(can_rx_offload_add_fifo); 271 272void can_rx_offload_enable(struct can_rx_offload *offload) 273{ 274 can_rx_offload_reset(offload); 275 napi_enable(&offload->napi); 276} 277EXPORT_SYMBOL_GPL(can_rx_offload_enable); 278 279void can_rx_offload_del(struct can_rx_offload *offload) 280{ 281 netif_napi_del(&offload->napi); 282 skb_queue_purge(&offload->skb_queue); 283} 284EXPORT_SYMBOL_GPL(can_rx_offload_del); 285 286void can_rx_offload_reset(struct can_rx_offload *offload) 287{ 288} 289EXPORT_SYMBOL_GPL(can_rx_offload_reset);