Linux kernel mirror (for testing)
git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
kernel
os
linux
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
6 *
7 * PACKET - implements raw packet sockets.
8 *
9 * Authors: Ross Biro
10 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
11 * Alan Cox, <gw4pts@gw4pts.ampr.org>
12 *
13 * Fixes:
14 * Alan Cox : verify_area() now used correctly
15 * Alan Cox : new skbuff lists, look ma no backlogs!
16 * Alan Cox : tidied skbuff lists.
17 * Alan Cox : Now uses generic datagram routines I
18 * added. Also fixed the peek/read crash
19 * from all old Linux datagram code.
20 * Alan Cox : Uses the improved datagram code.
21 * Alan Cox : Added NULL's for socket options.
22 * Alan Cox : Re-commented the code.
23 * Alan Cox : Use new kernel side addressing
24 * Rob Janssen : Correct MTU usage.
25 * Dave Platt : Counter leaks caused by incorrect
26 * interrupt locking and some slightly
27 * dubious gcc output. Can you read
28 * compiler: it said _VOLATILE_
29 * Richard Kooijman : Timestamp fixes.
30 * Alan Cox : New buffers. Use sk->mac.raw.
31 * Alan Cox : sendmsg/recvmsg support.
32 * Alan Cox : Protocol setting support
33 * Alexey Kuznetsov : Untied from IPv4 stack.
34 * Cyrus Durgin : Fixed kerneld for kmod.
35 * Michal Ostrowski : Module initialization cleanup.
36 * Ulises Alonso : Frame number limit removal and
37 * packet_set_ring memory leak.
38 * Eric Biederman : Allow for > 8 byte hardware addresses.
39 * The convention is that longer addresses
40 * will simply extend the hardware address
41 * byte arrays at the end of sockaddr_ll
42 * and packet_mreq.
43 * Johann Baudy : Added TX RING.
44 * Chetan Loke : Implemented TPACKET_V3 block abstraction
45 * layer.
46 * Copyright (C) 2011, <lokec@ccs.neu.edu>
47 */
48
49#include <linux/ethtool.h>
50#include <linux/types.h>
51#include <linux/mm.h>
52#include <linux/capability.h>
53#include <linux/fcntl.h>
54#include <linux/socket.h>
55#include <linux/in.h>
56#include <linux/inet.h>
57#include <linux/netdevice.h>
58#include <linux/if_packet.h>
59#include <linux/wireless.h>
60#include <linux/kernel.h>
61#include <linux/kmod.h>
62#include <linux/slab.h>
63#include <linux/vmalloc.h>
64#include <net/net_namespace.h>
65#include <net/ip.h>
66#include <net/protocol.h>
67#include <linux/skbuff.h>
68#include <net/sock.h>
69#include <linux/errno.h>
70#include <linux/timer.h>
71#include <linux/uaccess.h>
72#include <asm/ioctls.h>
73#include <asm/page.h>
74#include <asm/cacheflush.h>
75#include <asm/io.h>
76#include <linux/proc_fs.h>
77#include <linux/seq_file.h>
78#include <linux/poll.h>
79#include <linux/module.h>
80#include <linux/init.h>
81#include <linux/mutex.h>
82#include <linux/if_vlan.h>
83#include <linux/virtio_net.h>
84#include <linux/errqueue.h>
85#include <linux/net_tstamp.h>
86#include <linux/percpu.h>
87#ifdef CONFIG_INET
88#include <net/inet_common.h>
89#endif
90#include <linux/bpf.h>
91#include <net/compat.h>
92
93#include "internal.h"
94
95/*
96 Assumptions:
97 - If the device has no dev->header_ops->create, there is no LL header
98 visible above the device. In this case, its hard_header_len should be 0.
99 The device may prepend its own header internally. In this case, its
100 needed_headroom should be set to the space needed for it to add its
101 internal header.
102 For example, a WiFi driver pretending to be an Ethernet driver should
103 set its hard_header_len to be the Ethernet header length, and set its
104 needed_headroom to be (the real WiFi header length - the fake Ethernet
105 header length).
106 - packet socket receives packets with pulled ll header,
107 so that SOCK_RAW should push it back.
108
109On receive:
110-----------
111
112Incoming, dev_has_header(dev) == true
113 mac_header -> ll header
114 data -> data
115
116Outgoing, dev_has_header(dev) == true
117 mac_header -> ll header
118 data -> ll header
119
120Incoming, dev_has_header(dev) == false
121 mac_header -> data
122 However drivers often make it point to the ll header.
123 This is incorrect because the ll header should be invisible to us.
124 data -> data
125
126Outgoing, dev_has_header(dev) == false
127 mac_header -> data. ll header is invisible to us.
128 data -> data
129
130Resume
131 If dev_has_header(dev) == false we are unable to restore the ll header,
132 because it is invisible to us.
133
134
135On transmit:
136------------
137
138dev_has_header(dev) == true
139 mac_header -> ll header
140 data -> ll header
141
142dev_has_header(dev) == false (ll header is invisible to us)
143 mac_header -> data
144 data -> data
145
146 We should set network_header on output to the correct position,
147 packet classifier depends on it.
148 */
149
150/* Private packet socket structures. */
151
152/* identical to struct packet_mreq except it has
153 * a longer address field.
154 */
155struct packet_mreq_max {
156 int mr_ifindex;
157 unsigned short mr_type;
158 unsigned short mr_alen;
159 unsigned char mr_address[MAX_ADDR_LEN];
160};
161
162union tpacket_uhdr {
163 struct tpacket_hdr *h1;
164 struct tpacket2_hdr *h2;
165 struct tpacket3_hdr *h3;
166 void *raw;
167};
168
169static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
170 int closing, int tx_ring);
171
172#define V3_ALIGNMENT (8)
173
174#define BLK_HDR_LEN (ALIGN(sizeof(struct tpacket_block_desc), V3_ALIGNMENT))
175
176#define BLK_PLUS_PRIV(sz_of_priv) \
177 (BLK_HDR_LEN + ALIGN((sz_of_priv), V3_ALIGNMENT))
178
179#define BLOCK_STATUS(x) ((x)->hdr.bh1.block_status)
180#define BLOCK_NUM_PKTS(x) ((x)->hdr.bh1.num_pkts)
181#define BLOCK_O2FP(x) ((x)->hdr.bh1.offset_to_first_pkt)
182#define BLOCK_LEN(x) ((x)->hdr.bh1.blk_len)
183#define BLOCK_SNUM(x) ((x)->hdr.bh1.seq_num)
184#define BLOCK_O2PRIV(x) ((x)->offset_to_priv)
185
186struct packet_sock;
187static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
188 struct packet_type *pt, struct net_device *orig_dev);
189
190static void *packet_previous_frame(struct packet_sock *po,
191 struct packet_ring_buffer *rb,
192 int status);
193static void packet_increment_head(struct packet_ring_buffer *buff);
194static int prb_curr_blk_in_use(struct tpacket_block_desc *);
195static void *prb_dispatch_next_block(struct tpacket_kbdq_core *,
196 struct packet_sock *);
197static void prb_retire_current_block(struct tpacket_kbdq_core *,
198 struct packet_sock *, unsigned int status);
199static int prb_queue_frozen(struct tpacket_kbdq_core *);
200static void prb_open_block(struct tpacket_kbdq_core *,
201 struct tpacket_block_desc *);
202static void prb_retire_rx_blk_timer_expired(struct timer_list *);
203static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *);
204static void prb_fill_rxhash(struct tpacket_kbdq_core *, struct tpacket3_hdr *);
205static void prb_clear_rxhash(struct tpacket_kbdq_core *,
206 struct tpacket3_hdr *);
207static void prb_fill_vlan_info(struct tpacket_kbdq_core *,
208 struct tpacket3_hdr *);
209static void packet_flush_mclist(struct sock *sk);
210static u16 packet_pick_tx_queue(struct sk_buff *skb);
211
212struct packet_skb_cb {
213 union {
214 struct sockaddr_pkt pkt;
215 union {
216 /* Trick: alias skb original length with
217 * ll.sll_family and ll.protocol in order
218 * to save room.
219 */
220 unsigned int origlen;
221 struct sockaddr_ll ll;
222 };
223 } sa;
224};
225
226#define vio_le() virtio_legacy_is_little_endian()
227
228#define PACKET_SKB_CB(__skb) ((struct packet_skb_cb *)((__skb)->cb))
229
230#define GET_PBDQC_FROM_RB(x) ((struct tpacket_kbdq_core *)(&(x)->prb_bdqc))
231#define GET_PBLOCK_DESC(x, bid) \
232 ((struct tpacket_block_desc *)((x)->pkbdq[(bid)].buffer))
233#define GET_CURR_PBLOCK_DESC_FROM_CORE(x) \
234 ((struct tpacket_block_desc *)((x)->pkbdq[(x)->kactive_blk_num].buffer))
235#define GET_NEXT_PRB_BLK_NUM(x) \
236 (((x)->kactive_blk_num < ((x)->knum_blocks-1)) ? \
237 ((x)->kactive_blk_num+1) : 0)
238
239static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
240static void __fanout_link(struct sock *sk, struct packet_sock *po);
241
242static int packet_direct_xmit(struct sk_buff *skb)
243{
244 return dev_direct_xmit(skb, packet_pick_tx_queue(skb));
245}
246
247static struct net_device *packet_cached_dev_get(struct packet_sock *po)
248{
249 struct net_device *dev;
250
251 rcu_read_lock();
252 dev = rcu_dereference(po->cached_dev);
253 if (likely(dev))
254 dev_hold(dev);
255 rcu_read_unlock();
256
257 return dev;
258}
259
260static void packet_cached_dev_assign(struct packet_sock *po,
261 struct net_device *dev)
262{
263 rcu_assign_pointer(po->cached_dev, dev);
264}
265
266static void packet_cached_dev_reset(struct packet_sock *po)
267{
268 RCU_INIT_POINTER(po->cached_dev, NULL);
269}
270
271static bool packet_use_direct_xmit(const struct packet_sock *po)
272{
273 return po->xmit == packet_direct_xmit;
274}
275
276static u16 packet_pick_tx_queue(struct sk_buff *skb)
277{
278 struct net_device *dev = skb->dev;
279 const struct net_device_ops *ops = dev->netdev_ops;
280 int cpu = raw_smp_processor_id();
281 u16 queue_index;
282
283#ifdef CONFIG_XPS
284 skb->sender_cpu = cpu + 1;
285#endif
286 skb_record_rx_queue(skb, cpu % dev->real_num_tx_queues);
287 if (ops->ndo_select_queue) {
288 queue_index = ops->ndo_select_queue(dev, skb, NULL);
289 queue_index = netdev_cap_txqueue(dev, queue_index);
290 } else {
291 queue_index = netdev_pick_tx(dev, skb, NULL);
292 }
293
294 return queue_index;
295}
296
297/* __register_prot_hook must be invoked through register_prot_hook
298 * or from a context in which asynchronous accesses to the packet
299 * socket is not possible (packet_create()).
300 */
301static void __register_prot_hook(struct sock *sk)
302{
303 struct packet_sock *po = pkt_sk(sk);
304
305 if (!po->running) {
306 if (po->fanout)
307 __fanout_link(sk, po);
308 else
309 dev_add_pack(&po->prot_hook);
310
311 sock_hold(sk);
312 po->running = 1;
313 }
314}
315
316static void register_prot_hook(struct sock *sk)
317{
318 lockdep_assert_held_once(&pkt_sk(sk)->bind_lock);
319 __register_prot_hook(sk);
320}
321
322/* If the sync parameter is true, we will temporarily drop
323 * the po->bind_lock and do a synchronize_net to make sure no
324 * asynchronous packet processing paths still refer to the elements
325 * of po->prot_hook. If the sync parameter is false, it is the
326 * callers responsibility to take care of this.
327 */
328static void __unregister_prot_hook(struct sock *sk, bool sync)
329{
330 struct packet_sock *po = pkt_sk(sk);
331
332 lockdep_assert_held_once(&po->bind_lock);
333
334 po->running = 0;
335
336 if (po->fanout)
337 __fanout_unlink(sk, po);
338 else
339 __dev_remove_pack(&po->prot_hook);
340
341 __sock_put(sk);
342
343 if (sync) {
344 spin_unlock(&po->bind_lock);
345 synchronize_net();
346 spin_lock(&po->bind_lock);
347 }
348}
349
350static void unregister_prot_hook(struct sock *sk, bool sync)
351{
352 struct packet_sock *po = pkt_sk(sk);
353
354 if (po->running)
355 __unregister_prot_hook(sk, sync);
356}
357
358static inline struct page * __pure pgv_to_page(void *addr)
359{
360 if (is_vmalloc_addr(addr))
361 return vmalloc_to_page(addr);
362 return virt_to_page(addr);
363}
364
365static void __packet_set_status(struct packet_sock *po, void *frame, int status)
366{
367 union tpacket_uhdr h;
368
369 h.raw = frame;
370 switch (po->tp_version) {
371 case TPACKET_V1:
372 h.h1->tp_status = status;
373 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
374 break;
375 case TPACKET_V2:
376 h.h2->tp_status = status;
377 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
378 break;
379 case TPACKET_V3:
380 h.h3->tp_status = status;
381 flush_dcache_page(pgv_to_page(&h.h3->tp_status));
382 break;
383 default:
384 WARN(1, "TPACKET version not supported.\n");
385 BUG();
386 }
387
388 smp_wmb();
389}
390
391static int __packet_get_status(const struct packet_sock *po, void *frame)
392{
393 union tpacket_uhdr h;
394
395 smp_rmb();
396
397 h.raw = frame;
398 switch (po->tp_version) {
399 case TPACKET_V1:
400 flush_dcache_page(pgv_to_page(&h.h1->tp_status));
401 return h.h1->tp_status;
402 case TPACKET_V2:
403 flush_dcache_page(pgv_to_page(&h.h2->tp_status));
404 return h.h2->tp_status;
405 case TPACKET_V3:
406 flush_dcache_page(pgv_to_page(&h.h3->tp_status));
407 return h.h3->tp_status;
408 default:
409 WARN(1, "TPACKET version not supported.\n");
410 BUG();
411 return 0;
412 }
413}
414
415static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec64 *ts,
416 unsigned int flags)
417{
418 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
419
420 if (shhwtstamps &&
421 (flags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
422 ktime_to_timespec64_cond(shhwtstamps->hwtstamp, ts))
423 return TP_STATUS_TS_RAW_HARDWARE;
424
425 if ((flags & SOF_TIMESTAMPING_SOFTWARE) &&
426 ktime_to_timespec64_cond(skb->tstamp, ts))
427 return TP_STATUS_TS_SOFTWARE;
428
429 return 0;
430}
431
432static __u32 __packet_set_timestamp(struct packet_sock *po, void *frame,
433 struct sk_buff *skb)
434{
435 union tpacket_uhdr h;
436 struct timespec64 ts;
437 __u32 ts_status;
438
439 if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
440 return 0;
441
442 h.raw = frame;
443 /*
444 * versions 1 through 3 overflow the timestamps in y2106, since they
445 * all store the seconds in a 32-bit unsigned integer.
446 * If we create a version 4, that should have a 64-bit timestamp,
447 * either 64-bit seconds + 32-bit nanoseconds, or just 64-bit
448 * nanoseconds.
449 */
450 switch (po->tp_version) {
451 case TPACKET_V1:
452 h.h1->tp_sec = ts.tv_sec;
453 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
454 break;
455 case TPACKET_V2:
456 h.h2->tp_sec = ts.tv_sec;
457 h.h2->tp_nsec = ts.tv_nsec;
458 break;
459 case TPACKET_V3:
460 h.h3->tp_sec = ts.tv_sec;
461 h.h3->tp_nsec = ts.tv_nsec;
462 break;
463 default:
464 WARN(1, "TPACKET version not supported.\n");
465 BUG();
466 }
467
468 /* one flush is safe, as both fields always lie on the same cacheline */
469 flush_dcache_page(pgv_to_page(&h.h1->tp_sec));
470 smp_wmb();
471
472 return ts_status;
473}
474
475static void *packet_lookup_frame(const struct packet_sock *po,
476 const struct packet_ring_buffer *rb,
477 unsigned int position,
478 int status)
479{
480 unsigned int pg_vec_pos, frame_offset;
481 union tpacket_uhdr h;
482
483 pg_vec_pos = position / rb->frames_per_block;
484 frame_offset = position % rb->frames_per_block;
485
486 h.raw = rb->pg_vec[pg_vec_pos].buffer +
487 (frame_offset * rb->frame_size);
488
489 if (status != __packet_get_status(po, h.raw))
490 return NULL;
491
492 return h.raw;
493}
494
495static void *packet_current_frame(struct packet_sock *po,
496 struct packet_ring_buffer *rb,
497 int status)
498{
499 return packet_lookup_frame(po, rb, rb->head, status);
500}
501
502static void prb_del_retire_blk_timer(struct tpacket_kbdq_core *pkc)
503{
504 del_timer_sync(&pkc->retire_blk_timer);
505}
506
507static void prb_shutdown_retire_blk_timer(struct packet_sock *po,
508 struct sk_buff_head *rb_queue)
509{
510 struct tpacket_kbdq_core *pkc;
511
512 pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
513
514 spin_lock_bh(&rb_queue->lock);
515 pkc->delete_blk_timer = 1;
516 spin_unlock_bh(&rb_queue->lock);
517
518 prb_del_retire_blk_timer(pkc);
519}
520
521static void prb_setup_retire_blk_timer(struct packet_sock *po)
522{
523 struct tpacket_kbdq_core *pkc;
524
525 pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
526 timer_setup(&pkc->retire_blk_timer, prb_retire_rx_blk_timer_expired,
527 0);
528 pkc->retire_blk_timer.expires = jiffies;
529}
530
531static int prb_calc_retire_blk_tmo(struct packet_sock *po,
532 int blk_size_in_bytes)
533{
534 struct net_device *dev;
535 unsigned int mbits, div;
536 struct ethtool_link_ksettings ecmd;
537 int err;
538
539 rtnl_lock();
540 dev = __dev_get_by_index(sock_net(&po->sk), po->ifindex);
541 if (unlikely(!dev)) {
542 rtnl_unlock();
543 return DEFAULT_PRB_RETIRE_TOV;
544 }
545 err = __ethtool_get_link_ksettings(dev, &ecmd);
546 rtnl_unlock();
547 if (err)
548 return DEFAULT_PRB_RETIRE_TOV;
549
550 /* If the link speed is so slow you don't really
551 * need to worry about perf anyways
552 */
553 if (ecmd.base.speed < SPEED_1000 ||
554 ecmd.base.speed == SPEED_UNKNOWN)
555 return DEFAULT_PRB_RETIRE_TOV;
556
557 div = ecmd.base.speed / 1000;
558 mbits = (blk_size_in_bytes * 8) / (1024 * 1024);
559
560 if (div)
561 mbits /= div;
562
563 if (div)
564 return mbits + 1;
565 return mbits;
566}
567
568static void prb_init_ft_ops(struct tpacket_kbdq_core *p1,
569 union tpacket_req_u *req_u)
570{
571 p1->feature_req_word = req_u->req3.tp_feature_req_word;
572}
573
574static void init_prb_bdqc(struct packet_sock *po,
575 struct packet_ring_buffer *rb,
576 struct pgv *pg_vec,
577 union tpacket_req_u *req_u)
578{
579 struct tpacket_kbdq_core *p1 = GET_PBDQC_FROM_RB(rb);
580 struct tpacket_block_desc *pbd;
581
582 memset(p1, 0x0, sizeof(*p1));
583
584 p1->knxt_seq_num = 1;
585 p1->pkbdq = pg_vec;
586 pbd = (struct tpacket_block_desc *)pg_vec[0].buffer;
587 p1->pkblk_start = pg_vec[0].buffer;
588 p1->kblk_size = req_u->req3.tp_block_size;
589 p1->knum_blocks = req_u->req3.tp_block_nr;
590 p1->hdrlen = po->tp_hdrlen;
591 p1->version = po->tp_version;
592 p1->last_kactive_blk_num = 0;
593 po->stats.stats3.tp_freeze_q_cnt = 0;
594 if (req_u->req3.tp_retire_blk_tov)
595 p1->retire_blk_tov = req_u->req3.tp_retire_blk_tov;
596 else
597 p1->retire_blk_tov = prb_calc_retire_blk_tmo(po,
598 req_u->req3.tp_block_size);
599 p1->tov_in_jiffies = msecs_to_jiffies(p1->retire_blk_tov);
600 p1->blk_sizeof_priv = req_u->req3.tp_sizeof_priv;
601 rwlock_init(&p1->blk_fill_in_prog_lock);
602
603 p1->max_frame_len = p1->kblk_size - BLK_PLUS_PRIV(p1->blk_sizeof_priv);
604 prb_init_ft_ops(p1, req_u);
605 prb_setup_retire_blk_timer(po);
606 prb_open_block(p1, pbd);
607}
608
609/* Do NOT update the last_blk_num first.
610 * Assumes sk_buff_head lock is held.
611 */
612static void _prb_refresh_rx_retire_blk_timer(struct tpacket_kbdq_core *pkc)
613{
614 mod_timer(&pkc->retire_blk_timer,
615 jiffies + pkc->tov_in_jiffies);
616 pkc->last_kactive_blk_num = pkc->kactive_blk_num;
617}
618
619/*
620 * Timer logic:
621 * 1) We refresh the timer only when we open a block.
622 * By doing this we don't waste cycles refreshing the timer
623 * on packet-by-packet basis.
624 *
625 * With a 1MB block-size, on a 1Gbps line, it will take
626 * i) ~8 ms to fill a block + ii) memcpy etc.
627 * In this cut we are not accounting for the memcpy time.
628 *
629 * So, if the user sets the 'tmo' to 10ms then the timer
630 * will never fire while the block is still getting filled
631 * (which is what we want). However, the user could choose
632 * to close a block early and that's fine.
633 *
634 * But when the timer does fire, we check whether or not to refresh it.
635 * Since the tmo granularity is in msecs, it is not too expensive
636 * to refresh the timer, lets say every '8' msecs.
637 * Either the user can set the 'tmo' or we can derive it based on
638 * a) line-speed and b) block-size.
639 * prb_calc_retire_blk_tmo() calculates the tmo.
640 *
641 */
642static void prb_retire_rx_blk_timer_expired(struct timer_list *t)
643{
644 struct packet_sock *po =
645 from_timer(po, t, rx_ring.prb_bdqc.retire_blk_timer);
646 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
647 unsigned int frozen;
648 struct tpacket_block_desc *pbd;
649
650 spin_lock(&po->sk.sk_receive_queue.lock);
651
652 frozen = prb_queue_frozen(pkc);
653 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
654
655 if (unlikely(pkc->delete_blk_timer))
656 goto out;
657
658 /* We only need to plug the race when the block is partially filled.
659 * tpacket_rcv:
660 * lock(); increment BLOCK_NUM_PKTS; unlock()
661 * copy_bits() is in progress ...
662 * timer fires on other cpu:
663 * we can't retire the current block because copy_bits
664 * is in progress.
665 *
666 */
667 if (BLOCK_NUM_PKTS(pbd)) {
668 /* Waiting for skb_copy_bits to finish... */
669 write_lock(&pkc->blk_fill_in_prog_lock);
670 write_unlock(&pkc->blk_fill_in_prog_lock);
671 }
672
673 if (pkc->last_kactive_blk_num == pkc->kactive_blk_num) {
674 if (!frozen) {
675 if (!BLOCK_NUM_PKTS(pbd)) {
676 /* An empty block. Just refresh the timer. */
677 goto refresh_timer;
678 }
679 prb_retire_current_block(pkc, po, TP_STATUS_BLK_TMO);
680 if (!prb_dispatch_next_block(pkc, po))
681 goto refresh_timer;
682 else
683 goto out;
684 } else {
685 /* Case 1. Queue was frozen because user-space was
686 * lagging behind.
687 */
688 if (prb_curr_blk_in_use(pbd)) {
689 /*
690 * Ok, user-space is still behind.
691 * So just refresh the timer.
692 */
693 goto refresh_timer;
694 } else {
695 /* Case 2. queue was frozen,user-space caught up,
696 * now the link went idle && the timer fired.
697 * We don't have a block to close.So we open this
698 * block and restart the timer.
699 * opening a block thaws the queue,restarts timer
700 * Thawing/timer-refresh is a side effect.
701 */
702 prb_open_block(pkc, pbd);
703 goto out;
704 }
705 }
706 }
707
708refresh_timer:
709 _prb_refresh_rx_retire_blk_timer(pkc);
710
711out:
712 spin_unlock(&po->sk.sk_receive_queue.lock);
713}
714
715static void prb_flush_block(struct tpacket_kbdq_core *pkc1,
716 struct tpacket_block_desc *pbd1, __u32 status)
717{
718 /* Flush everything minus the block header */
719
720#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
721 u8 *start, *end;
722
723 start = (u8 *)pbd1;
724
725 /* Skip the block header(we know header WILL fit in 4K) */
726 start += PAGE_SIZE;
727
728 end = (u8 *)PAGE_ALIGN((unsigned long)pkc1->pkblk_end);
729 for (; start < end; start += PAGE_SIZE)
730 flush_dcache_page(pgv_to_page(start));
731
732 smp_wmb();
733#endif
734
735 /* Now update the block status. */
736
737 BLOCK_STATUS(pbd1) = status;
738
739 /* Flush the block header */
740
741#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
742 start = (u8 *)pbd1;
743 flush_dcache_page(pgv_to_page(start));
744
745 smp_wmb();
746#endif
747}
748
749/*
750 * Side effect:
751 *
752 * 1) flush the block
753 * 2) Increment active_blk_num
754 *
755 * Note:We DONT refresh the timer on purpose.
756 * Because almost always the next block will be opened.
757 */
758static void prb_close_block(struct tpacket_kbdq_core *pkc1,
759 struct tpacket_block_desc *pbd1,
760 struct packet_sock *po, unsigned int stat)
761{
762 __u32 status = TP_STATUS_USER | stat;
763
764 struct tpacket3_hdr *last_pkt;
765 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
766 struct sock *sk = &po->sk;
767
768 if (atomic_read(&po->tp_drops))
769 status |= TP_STATUS_LOSING;
770
771 last_pkt = (struct tpacket3_hdr *)pkc1->prev;
772 last_pkt->tp_next_offset = 0;
773
774 /* Get the ts of the last pkt */
775 if (BLOCK_NUM_PKTS(pbd1)) {
776 h1->ts_last_pkt.ts_sec = last_pkt->tp_sec;
777 h1->ts_last_pkt.ts_nsec = last_pkt->tp_nsec;
778 } else {
779 /* Ok, we tmo'd - so get the current time.
780 *
781 * It shouldn't really happen as we don't close empty
782 * blocks. See prb_retire_rx_blk_timer_expired().
783 */
784 struct timespec64 ts;
785 ktime_get_real_ts64(&ts);
786 h1->ts_last_pkt.ts_sec = ts.tv_sec;
787 h1->ts_last_pkt.ts_nsec = ts.tv_nsec;
788 }
789
790 smp_wmb();
791
792 /* Flush the block */
793 prb_flush_block(pkc1, pbd1, status);
794
795 sk->sk_data_ready(sk);
796
797 pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
798}
799
800static void prb_thaw_queue(struct tpacket_kbdq_core *pkc)
801{
802 pkc->reset_pending_on_curr_blk = 0;
803}
804
805/*
806 * Side effect of opening a block:
807 *
808 * 1) prb_queue is thawed.
809 * 2) retire_blk_timer is refreshed.
810 *
811 */
812static void prb_open_block(struct tpacket_kbdq_core *pkc1,
813 struct tpacket_block_desc *pbd1)
814{
815 struct timespec64 ts;
816 struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
817
818 smp_rmb();
819
820 /* We could have just memset this but we will lose the
821 * flexibility of making the priv area sticky
822 */
823
824 BLOCK_SNUM(pbd1) = pkc1->knxt_seq_num++;
825 BLOCK_NUM_PKTS(pbd1) = 0;
826 BLOCK_LEN(pbd1) = BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
827
828 ktime_get_real_ts64(&ts);
829
830 h1->ts_first_pkt.ts_sec = ts.tv_sec;
831 h1->ts_first_pkt.ts_nsec = ts.tv_nsec;
832
833 pkc1->pkblk_start = (char *)pbd1;
834 pkc1->nxt_offset = pkc1->pkblk_start + BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
835
836 BLOCK_O2FP(pbd1) = (__u32)BLK_PLUS_PRIV(pkc1->blk_sizeof_priv);
837 BLOCK_O2PRIV(pbd1) = BLK_HDR_LEN;
838
839 pbd1->version = pkc1->version;
840 pkc1->prev = pkc1->nxt_offset;
841 pkc1->pkblk_end = pkc1->pkblk_start + pkc1->kblk_size;
842
843 prb_thaw_queue(pkc1);
844 _prb_refresh_rx_retire_blk_timer(pkc1);
845
846 smp_wmb();
847}
848
849/*
850 * Queue freeze logic:
851 * 1) Assume tp_block_nr = 8 blocks.
852 * 2) At time 't0', user opens Rx ring.
853 * 3) Some time past 't0', kernel starts filling blocks starting from 0 .. 7
854 * 4) user-space is either sleeping or processing block '0'.
855 * 5) tpacket_rcv is currently filling block '7', since there is no space left,
856 * it will close block-7,loop around and try to fill block '0'.
857 * call-flow:
858 * __packet_lookup_frame_in_block
859 * prb_retire_current_block()
860 * prb_dispatch_next_block()
861 * |->(BLOCK_STATUS == USER) evaluates to true
862 * 5.1) Since block-0 is currently in-use, we just freeze the queue.
863 * 6) Now there are two cases:
864 * 6.1) Link goes idle right after the queue is frozen.
865 * But remember, the last open_block() refreshed the timer.
866 * When this timer expires,it will refresh itself so that we can
867 * re-open block-0 in near future.
868 * 6.2) Link is busy and keeps on receiving packets. This is a simple
869 * case and __packet_lookup_frame_in_block will check if block-0
870 * is free and can now be re-used.
871 */
872static void prb_freeze_queue(struct tpacket_kbdq_core *pkc,
873 struct packet_sock *po)
874{
875 pkc->reset_pending_on_curr_blk = 1;
876 po->stats.stats3.tp_freeze_q_cnt++;
877}
878
879#define TOTAL_PKT_LEN_INCL_ALIGN(length) (ALIGN((length), V3_ALIGNMENT))
880
881/*
882 * If the next block is free then we will dispatch it
883 * and return a good offset.
884 * Else, we will freeze the queue.
885 * So, caller must check the return value.
886 */
887static void *prb_dispatch_next_block(struct tpacket_kbdq_core *pkc,
888 struct packet_sock *po)
889{
890 struct tpacket_block_desc *pbd;
891
892 smp_rmb();
893
894 /* 1. Get current block num */
895 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
896
897 /* 2. If this block is currently in_use then freeze the queue */
898 if (TP_STATUS_USER & BLOCK_STATUS(pbd)) {
899 prb_freeze_queue(pkc, po);
900 return NULL;
901 }
902
903 /*
904 * 3.
905 * open this block and return the offset where the first packet
906 * needs to get stored.
907 */
908 prb_open_block(pkc, pbd);
909 return (void *)pkc->nxt_offset;
910}
911
912static void prb_retire_current_block(struct tpacket_kbdq_core *pkc,
913 struct packet_sock *po, unsigned int status)
914{
915 struct tpacket_block_desc *pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
916
917 /* retire/close the current block */
918 if (likely(TP_STATUS_KERNEL == BLOCK_STATUS(pbd))) {
919 /*
920 * Plug the case where copy_bits() is in progress on
921 * cpu-0 and tpacket_rcv() got invoked on cpu-1, didn't
922 * have space to copy the pkt in the current block and
923 * called prb_retire_current_block()
924 *
925 * We don't need to worry about the TMO case because
926 * the timer-handler already handled this case.
927 */
928 if (!(status & TP_STATUS_BLK_TMO)) {
929 /* Waiting for skb_copy_bits to finish... */
930 write_lock(&pkc->blk_fill_in_prog_lock);
931 write_unlock(&pkc->blk_fill_in_prog_lock);
932 }
933 prb_close_block(pkc, pbd, po, status);
934 return;
935 }
936}
937
938static int prb_curr_blk_in_use(struct tpacket_block_desc *pbd)
939{
940 return TP_STATUS_USER & BLOCK_STATUS(pbd);
941}
942
943static int prb_queue_frozen(struct tpacket_kbdq_core *pkc)
944{
945 return pkc->reset_pending_on_curr_blk;
946}
947
948static void prb_clear_blk_fill_status(struct packet_ring_buffer *rb)
949 __releases(&pkc->blk_fill_in_prog_lock)
950{
951 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
952
953 read_unlock(&pkc->blk_fill_in_prog_lock);
954}
955
956static void prb_fill_rxhash(struct tpacket_kbdq_core *pkc,
957 struct tpacket3_hdr *ppd)
958{
959 ppd->hv1.tp_rxhash = skb_get_hash(pkc->skb);
960}
961
962static void prb_clear_rxhash(struct tpacket_kbdq_core *pkc,
963 struct tpacket3_hdr *ppd)
964{
965 ppd->hv1.tp_rxhash = 0;
966}
967
968static void prb_fill_vlan_info(struct tpacket_kbdq_core *pkc,
969 struct tpacket3_hdr *ppd)
970{
971 if (skb_vlan_tag_present(pkc->skb)) {
972 ppd->hv1.tp_vlan_tci = skb_vlan_tag_get(pkc->skb);
973 ppd->hv1.tp_vlan_tpid = ntohs(pkc->skb->vlan_proto);
974 ppd->tp_status = TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
975 } else {
976 ppd->hv1.tp_vlan_tci = 0;
977 ppd->hv1.tp_vlan_tpid = 0;
978 ppd->tp_status = TP_STATUS_AVAILABLE;
979 }
980}
981
982static void prb_run_all_ft_ops(struct tpacket_kbdq_core *pkc,
983 struct tpacket3_hdr *ppd)
984{
985 ppd->hv1.tp_padding = 0;
986 prb_fill_vlan_info(pkc, ppd);
987
988 if (pkc->feature_req_word & TP_FT_REQ_FILL_RXHASH)
989 prb_fill_rxhash(pkc, ppd);
990 else
991 prb_clear_rxhash(pkc, ppd);
992}
993
994static void prb_fill_curr_block(char *curr,
995 struct tpacket_kbdq_core *pkc,
996 struct tpacket_block_desc *pbd,
997 unsigned int len)
998 __acquires(&pkc->blk_fill_in_prog_lock)
999{
1000 struct tpacket3_hdr *ppd;
1001
1002 ppd = (struct tpacket3_hdr *)curr;
1003 ppd->tp_next_offset = TOTAL_PKT_LEN_INCL_ALIGN(len);
1004 pkc->prev = curr;
1005 pkc->nxt_offset += TOTAL_PKT_LEN_INCL_ALIGN(len);
1006 BLOCK_LEN(pbd) += TOTAL_PKT_LEN_INCL_ALIGN(len);
1007 BLOCK_NUM_PKTS(pbd) += 1;
1008 read_lock(&pkc->blk_fill_in_prog_lock);
1009 prb_run_all_ft_ops(pkc, ppd);
1010}
1011
1012/* Assumes caller has the sk->rx_queue.lock */
1013static void *__packet_lookup_frame_in_block(struct packet_sock *po,
1014 struct sk_buff *skb,
1015 unsigned int len
1016 )
1017{
1018 struct tpacket_kbdq_core *pkc;
1019 struct tpacket_block_desc *pbd;
1020 char *curr, *end;
1021
1022 pkc = GET_PBDQC_FROM_RB(&po->rx_ring);
1023 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1024
1025 /* Queue is frozen when user space is lagging behind */
1026 if (prb_queue_frozen(pkc)) {
1027 /*
1028 * Check if that last block which caused the queue to freeze,
1029 * is still in_use by user-space.
1030 */
1031 if (prb_curr_blk_in_use(pbd)) {
1032 /* Can't record this packet */
1033 return NULL;
1034 } else {
1035 /*
1036 * Ok, the block was released by user-space.
1037 * Now let's open that block.
1038 * opening a block also thaws the queue.
1039 * Thawing is a side effect.
1040 */
1041 prb_open_block(pkc, pbd);
1042 }
1043 }
1044
1045 smp_mb();
1046 curr = pkc->nxt_offset;
1047 pkc->skb = skb;
1048 end = (char *)pbd + pkc->kblk_size;
1049
1050 /* first try the current block */
1051 if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) {
1052 prb_fill_curr_block(curr, pkc, pbd, len);
1053 return (void *)curr;
1054 }
1055
1056 /* Ok, close the current block */
1057 prb_retire_current_block(pkc, po, 0);
1058
1059 /* Now, try to dispatch the next block */
1060 curr = (char *)prb_dispatch_next_block(pkc, po);
1061 if (curr) {
1062 pbd = GET_CURR_PBLOCK_DESC_FROM_CORE(pkc);
1063 prb_fill_curr_block(curr, pkc, pbd, len);
1064 return (void *)curr;
1065 }
1066
1067 /*
1068 * No free blocks are available.user_space hasn't caught up yet.
1069 * Queue was just frozen and now this packet will get dropped.
1070 */
1071 return NULL;
1072}
1073
1074static void *packet_current_rx_frame(struct packet_sock *po,
1075 struct sk_buff *skb,
1076 int status, unsigned int len)
1077{
1078 char *curr = NULL;
1079 switch (po->tp_version) {
1080 case TPACKET_V1:
1081 case TPACKET_V2:
1082 curr = packet_lookup_frame(po, &po->rx_ring,
1083 po->rx_ring.head, status);
1084 return curr;
1085 case TPACKET_V3:
1086 return __packet_lookup_frame_in_block(po, skb, len);
1087 default:
1088 WARN(1, "TPACKET version not supported\n");
1089 BUG();
1090 return NULL;
1091 }
1092}
1093
1094static void *prb_lookup_block(const struct packet_sock *po,
1095 const struct packet_ring_buffer *rb,
1096 unsigned int idx,
1097 int status)
1098{
1099 struct tpacket_kbdq_core *pkc = GET_PBDQC_FROM_RB(rb);
1100 struct tpacket_block_desc *pbd = GET_PBLOCK_DESC(pkc, idx);
1101
1102 if (status != BLOCK_STATUS(pbd))
1103 return NULL;
1104 return pbd;
1105}
1106
1107static int prb_previous_blk_num(struct packet_ring_buffer *rb)
1108{
1109 unsigned int prev;
1110 if (rb->prb_bdqc.kactive_blk_num)
1111 prev = rb->prb_bdqc.kactive_blk_num-1;
1112 else
1113 prev = rb->prb_bdqc.knum_blocks-1;
1114 return prev;
1115}
1116
1117/* Assumes caller has held the rx_queue.lock */
1118static void *__prb_previous_block(struct packet_sock *po,
1119 struct packet_ring_buffer *rb,
1120 int status)
1121{
1122 unsigned int previous = prb_previous_blk_num(rb);
1123 return prb_lookup_block(po, rb, previous, status);
1124}
1125
1126static void *packet_previous_rx_frame(struct packet_sock *po,
1127 struct packet_ring_buffer *rb,
1128 int status)
1129{
1130 if (po->tp_version <= TPACKET_V2)
1131 return packet_previous_frame(po, rb, status);
1132
1133 return __prb_previous_block(po, rb, status);
1134}
1135
1136static void packet_increment_rx_head(struct packet_sock *po,
1137 struct packet_ring_buffer *rb)
1138{
1139 switch (po->tp_version) {
1140 case TPACKET_V1:
1141 case TPACKET_V2:
1142 return packet_increment_head(rb);
1143 case TPACKET_V3:
1144 default:
1145 WARN(1, "TPACKET version not supported.\n");
1146 BUG();
1147 return;
1148 }
1149}
1150
1151static void *packet_previous_frame(struct packet_sock *po,
1152 struct packet_ring_buffer *rb,
1153 int status)
1154{
1155 unsigned int previous = rb->head ? rb->head - 1 : rb->frame_max;
1156 return packet_lookup_frame(po, rb, previous, status);
1157}
1158
1159static void packet_increment_head(struct packet_ring_buffer *buff)
1160{
1161 buff->head = buff->head != buff->frame_max ? buff->head+1 : 0;
1162}
1163
1164static void packet_inc_pending(struct packet_ring_buffer *rb)
1165{
1166 this_cpu_inc(*rb->pending_refcnt);
1167}
1168
1169static void packet_dec_pending(struct packet_ring_buffer *rb)
1170{
1171 this_cpu_dec(*rb->pending_refcnt);
1172}
1173
1174static unsigned int packet_read_pending(const struct packet_ring_buffer *rb)
1175{
1176 unsigned int refcnt = 0;
1177 int cpu;
1178
1179 /* We don't use pending refcount in rx_ring. */
1180 if (rb->pending_refcnt == NULL)
1181 return 0;
1182
1183 for_each_possible_cpu(cpu)
1184 refcnt += *per_cpu_ptr(rb->pending_refcnt, cpu);
1185
1186 return refcnt;
1187}
1188
1189static int packet_alloc_pending(struct packet_sock *po)
1190{
1191 po->rx_ring.pending_refcnt = NULL;
1192
1193 po->tx_ring.pending_refcnt = alloc_percpu(unsigned int);
1194 if (unlikely(po->tx_ring.pending_refcnt == NULL))
1195 return -ENOBUFS;
1196
1197 return 0;
1198}
1199
1200static void packet_free_pending(struct packet_sock *po)
1201{
1202 free_percpu(po->tx_ring.pending_refcnt);
1203}
1204
1205#define ROOM_POW_OFF 2
1206#define ROOM_NONE 0x0
1207#define ROOM_LOW 0x1
1208#define ROOM_NORMAL 0x2
1209
1210static bool __tpacket_has_room(const struct packet_sock *po, int pow_off)
1211{
1212 int idx, len;
1213
1214 len = READ_ONCE(po->rx_ring.frame_max) + 1;
1215 idx = READ_ONCE(po->rx_ring.head);
1216 if (pow_off)
1217 idx += len >> pow_off;
1218 if (idx >= len)
1219 idx -= len;
1220 return packet_lookup_frame(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1221}
1222
1223static bool __tpacket_v3_has_room(const struct packet_sock *po, int pow_off)
1224{
1225 int idx, len;
1226
1227 len = READ_ONCE(po->rx_ring.prb_bdqc.knum_blocks);
1228 idx = READ_ONCE(po->rx_ring.prb_bdqc.kactive_blk_num);
1229 if (pow_off)
1230 idx += len >> pow_off;
1231 if (idx >= len)
1232 idx -= len;
1233 return prb_lookup_block(po, &po->rx_ring, idx, TP_STATUS_KERNEL);
1234}
1235
1236static int __packet_rcv_has_room(const struct packet_sock *po,
1237 const struct sk_buff *skb)
1238{
1239 const struct sock *sk = &po->sk;
1240 int ret = ROOM_NONE;
1241
1242 if (po->prot_hook.func != tpacket_rcv) {
1243 int rcvbuf = READ_ONCE(sk->sk_rcvbuf);
1244 int avail = rcvbuf - atomic_read(&sk->sk_rmem_alloc)
1245 - (skb ? skb->truesize : 0);
1246
1247 if (avail > (rcvbuf >> ROOM_POW_OFF))
1248 return ROOM_NORMAL;
1249 else if (avail > 0)
1250 return ROOM_LOW;
1251 else
1252 return ROOM_NONE;
1253 }
1254
1255 if (po->tp_version == TPACKET_V3) {
1256 if (__tpacket_v3_has_room(po, ROOM_POW_OFF))
1257 ret = ROOM_NORMAL;
1258 else if (__tpacket_v3_has_room(po, 0))
1259 ret = ROOM_LOW;
1260 } else {
1261 if (__tpacket_has_room(po, ROOM_POW_OFF))
1262 ret = ROOM_NORMAL;
1263 else if (__tpacket_has_room(po, 0))
1264 ret = ROOM_LOW;
1265 }
1266
1267 return ret;
1268}
1269
1270static int packet_rcv_has_room(struct packet_sock *po, struct sk_buff *skb)
1271{
1272 int pressure, ret;
1273
1274 ret = __packet_rcv_has_room(po, skb);
1275 pressure = ret != ROOM_NORMAL;
1276
1277 if (READ_ONCE(po->pressure) != pressure)
1278 WRITE_ONCE(po->pressure, pressure);
1279
1280 return ret;
1281}
1282
1283static void packet_rcv_try_clear_pressure(struct packet_sock *po)
1284{
1285 if (READ_ONCE(po->pressure) &&
1286 __packet_rcv_has_room(po, NULL) == ROOM_NORMAL)
1287 WRITE_ONCE(po->pressure, 0);
1288}
1289
1290static void packet_sock_destruct(struct sock *sk)
1291{
1292 skb_queue_purge(&sk->sk_error_queue);
1293
1294 WARN_ON(atomic_read(&sk->sk_rmem_alloc));
1295 WARN_ON(refcount_read(&sk->sk_wmem_alloc));
1296
1297 if (!sock_flag(sk, SOCK_DEAD)) {
1298 pr_err("Attempt to release alive packet socket: %p\n", sk);
1299 return;
1300 }
1301
1302 sk_refcnt_debug_dec(sk);
1303}
1304
1305static bool fanout_flow_is_huge(struct packet_sock *po, struct sk_buff *skb)
1306{
1307 u32 *history = po->rollover->history;
1308 u32 victim, rxhash;
1309 int i, count = 0;
1310
1311 rxhash = skb_get_hash(skb);
1312 for (i = 0; i < ROLLOVER_HLEN; i++)
1313 if (READ_ONCE(history[i]) == rxhash)
1314 count++;
1315
1316 victim = prandom_u32() % ROLLOVER_HLEN;
1317
1318 /* Avoid dirtying the cache line if possible */
1319 if (READ_ONCE(history[victim]) != rxhash)
1320 WRITE_ONCE(history[victim], rxhash);
1321
1322 return count > (ROLLOVER_HLEN >> 1);
1323}
1324
1325static unsigned int fanout_demux_hash(struct packet_fanout *f,
1326 struct sk_buff *skb,
1327 unsigned int num)
1328{
1329 return reciprocal_scale(__skb_get_hash_symmetric(skb), num);
1330}
1331
1332static unsigned int fanout_demux_lb(struct packet_fanout *f,
1333 struct sk_buff *skb,
1334 unsigned int num)
1335{
1336 unsigned int val = atomic_inc_return(&f->rr_cur);
1337
1338 return val % num;
1339}
1340
1341static unsigned int fanout_demux_cpu(struct packet_fanout *f,
1342 struct sk_buff *skb,
1343 unsigned int num)
1344{
1345 return smp_processor_id() % num;
1346}
1347
1348static unsigned int fanout_demux_rnd(struct packet_fanout *f,
1349 struct sk_buff *skb,
1350 unsigned int num)
1351{
1352 return prandom_u32_max(num);
1353}
1354
1355static unsigned int fanout_demux_rollover(struct packet_fanout *f,
1356 struct sk_buff *skb,
1357 unsigned int idx, bool try_self,
1358 unsigned int num)
1359{
1360 struct packet_sock *po, *po_next, *po_skip = NULL;
1361 unsigned int i, j, room = ROOM_NONE;
1362
1363 po = pkt_sk(rcu_dereference(f->arr[idx]));
1364
1365 if (try_self) {
1366 room = packet_rcv_has_room(po, skb);
1367 if (room == ROOM_NORMAL ||
1368 (room == ROOM_LOW && !fanout_flow_is_huge(po, skb)))
1369 return idx;
1370 po_skip = po;
1371 }
1372
1373 i = j = min_t(int, po->rollover->sock, num - 1);
1374 do {
1375 po_next = pkt_sk(rcu_dereference(f->arr[i]));
1376 if (po_next != po_skip && !READ_ONCE(po_next->pressure) &&
1377 packet_rcv_has_room(po_next, skb) == ROOM_NORMAL) {
1378 if (i != j)
1379 po->rollover->sock = i;
1380 atomic_long_inc(&po->rollover->num);
1381 if (room == ROOM_LOW)
1382 atomic_long_inc(&po->rollover->num_huge);
1383 return i;
1384 }
1385
1386 if (++i == num)
1387 i = 0;
1388 } while (i != j);
1389
1390 atomic_long_inc(&po->rollover->num_failed);
1391 return idx;
1392}
1393
1394static unsigned int fanout_demux_qm(struct packet_fanout *f,
1395 struct sk_buff *skb,
1396 unsigned int num)
1397{
1398 return skb_get_queue_mapping(skb) % num;
1399}
1400
1401static unsigned int fanout_demux_bpf(struct packet_fanout *f,
1402 struct sk_buff *skb,
1403 unsigned int num)
1404{
1405 struct bpf_prog *prog;
1406 unsigned int ret = 0;
1407
1408 rcu_read_lock();
1409 prog = rcu_dereference(f->bpf_prog);
1410 if (prog)
1411 ret = bpf_prog_run_clear_cb(prog, skb) % num;
1412 rcu_read_unlock();
1413
1414 return ret;
1415}
1416
1417static bool fanout_has_flag(struct packet_fanout *f, u16 flag)
1418{
1419 return f->flags & (flag >> 8);
1420}
1421
1422static int packet_rcv_fanout(struct sk_buff *skb, struct net_device *dev,
1423 struct packet_type *pt, struct net_device *orig_dev)
1424{
1425 struct packet_fanout *f = pt->af_packet_priv;
1426 unsigned int num = READ_ONCE(f->num_members);
1427 struct net *net = read_pnet(&f->net);
1428 struct packet_sock *po;
1429 unsigned int idx;
1430
1431 if (!net_eq(dev_net(dev), net) || !num) {
1432 kfree_skb(skb);
1433 return 0;
1434 }
1435
1436 if (fanout_has_flag(f, PACKET_FANOUT_FLAG_DEFRAG)) {
1437 skb = ip_check_defrag(net, skb, IP_DEFRAG_AF_PACKET);
1438 if (!skb)
1439 return 0;
1440 }
1441 switch (f->type) {
1442 case PACKET_FANOUT_HASH:
1443 default:
1444 idx = fanout_demux_hash(f, skb, num);
1445 break;
1446 case PACKET_FANOUT_LB:
1447 idx = fanout_demux_lb(f, skb, num);
1448 break;
1449 case PACKET_FANOUT_CPU:
1450 idx = fanout_demux_cpu(f, skb, num);
1451 break;
1452 case PACKET_FANOUT_RND:
1453 idx = fanout_demux_rnd(f, skb, num);
1454 break;
1455 case PACKET_FANOUT_QM:
1456 idx = fanout_demux_qm(f, skb, num);
1457 break;
1458 case PACKET_FANOUT_ROLLOVER:
1459 idx = fanout_demux_rollover(f, skb, 0, false, num);
1460 break;
1461 case PACKET_FANOUT_CBPF:
1462 case PACKET_FANOUT_EBPF:
1463 idx = fanout_demux_bpf(f, skb, num);
1464 break;
1465 }
1466
1467 if (fanout_has_flag(f, PACKET_FANOUT_FLAG_ROLLOVER))
1468 idx = fanout_demux_rollover(f, skb, idx, true, num);
1469
1470 po = pkt_sk(rcu_dereference(f->arr[idx]));
1471 return po->prot_hook.func(skb, dev, &po->prot_hook, orig_dev);
1472}
1473
1474DEFINE_MUTEX(fanout_mutex);
1475EXPORT_SYMBOL_GPL(fanout_mutex);
1476static LIST_HEAD(fanout_list);
1477static u16 fanout_next_id;
1478
1479static void __fanout_link(struct sock *sk, struct packet_sock *po)
1480{
1481 struct packet_fanout *f = po->fanout;
1482
1483 spin_lock(&f->lock);
1484 rcu_assign_pointer(f->arr[f->num_members], sk);
1485 smp_wmb();
1486 f->num_members++;
1487 if (f->num_members == 1)
1488 dev_add_pack(&f->prot_hook);
1489 spin_unlock(&f->lock);
1490}
1491
1492static void __fanout_unlink(struct sock *sk, struct packet_sock *po)
1493{
1494 struct packet_fanout *f = po->fanout;
1495 int i;
1496
1497 spin_lock(&f->lock);
1498 for (i = 0; i < f->num_members; i++) {
1499 if (rcu_dereference_protected(f->arr[i],
1500 lockdep_is_held(&f->lock)) == sk)
1501 break;
1502 }
1503 BUG_ON(i >= f->num_members);
1504 rcu_assign_pointer(f->arr[i],
1505 rcu_dereference_protected(f->arr[f->num_members - 1],
1506 lockdep_is_held(&f->lock)));
1507 f->num_members--;
1508 if (f->num_members == 0)
1509 __dev_remove_pack(&f->prot_hook);
1510 spin_unlock(&f->lock);
1511}
1512
1513static bool match_fanout_group(struct packet_type *ptype, struct sock *sk)
1514{
1515 if (sk->sk_family != PF_PACKET)
1516 return false;
1517
1518 return ptype->af_packet_priv == pkt_sk(sk)->fanout;
1519}
1520
1521static void fanout_init_data(struct packet_fanout *f)
1522{
1523 switch (f->type) {
1524 case PACKET_FANOUT_LB:
1525 atomic_set(&f->rr_cur, 0);
1526 break;
1527 case PACKET_FANOUT_CBPF:
1528 case PACKET_FANOUT_EBPF:
1529 RCU_INIT_POINTER(f->bpf_prog, NULL);
1530 break;
1531 }
1532}
1533
1534static void __fanout_set_data_bpf(struct packet_fanout *f, struct bpf_prog *new)
1535{
1536 struct bpf_prog *old;
1537
1538 spin_lock(&f->lock);
1539 old = rcu_dereference_protected(f->bpf_prog, lockdep_is_held(&f->lock));
1540 rcu_assign_pointer(f->bpf_prog, new);
1541 spin_unlock(&f->lock);
1542
1543 if (old) {
1544 synchronize_net();
1545 bpf_prog_destroy(old);
1546 }
1547}
1548
1549static int fanout_set_data_cbpf(struct packet_sock *po, sockptr_t data,
1550 unsigned int len)
1551{
1552 struct bpf_prog *new;
1553 struct sock_fprog fprog;
1554 int ret;
1555
1556 if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1557 return -EPERM;
1558
1559 ret = copy_bpf_fprog_from_user(&fprog, data, len);
1560 if (ret)
1561 return ret;
1562
1563 ret = bpf_prog_create_from_user(&new, &fprog, NULL, false);
1564 if (ret)
1565 return ret;
1566
1567 __fanout_set_data_bpf(po->fanout, new);
1568 return 0;
1569}
1570
1571static int fanout_set_data_ebpf(struct packet_sock *po, sockptr_t data,
1572 unsigned int len)
1573{
1574 struct bpf_prog *new;
1575 u32 fd;
1576
1577 if (sock_flag(&po->sk, SOCK_FILTER_LOCKED))
1578 return -EPERM;
1579 if (len != sizeof(fd))
1580 return -EINVAL;
1581 if (copy_from_sockptr(&fd, data, len))
1582 return -EFAULT;
1583
1584 new = bpf_prog_get_type(fd, BPF_PROG_TYPE_SOCKET_FILTER);
1585 if (IS_ERR(new))
1586 return PTR_ERR(new);
1587
1588 __fanout_set_data_bpf(po->fanout, new);
1589 return 0;
1590}
1591
1592static int fanout_set_data(struct packet_sock *po, sockptr_t data,
1593 unsigned int len)
1594{
1595 switch (po->fanout->type) {
1596 case PACKET_FANOUT_CBPF:
1597 return fanout_set_data_cbpf(po, data, len);
1598 case PACKET_FANOUT_EBPF:
1599 return fanout_set_data_ebpf(po, data, len);
1600 default:
1601 return -EINVAL;
1602 }
1603}
1604
1605static void fanout_release_data(struct packet_fanout *f)
1606{
1607 switch (f->type) {
1608 case PACKET_FANOUT_CBPF:
1609 case PACKET_FANOUT_EBPF:
1610 __fanout_set_data_bpf(f, NULL);
1611 }
1612}
1613
1614static bool __fanout_id_is_free(struct sock *sk, u16 candidate_id)
1615{
1616 struct packet_fanout *f;
1617
1618 list_for_each_entry(f, &fanout_list, list) {
1619 if (f->id == candidate_id &&
1620 read_pnet(&f->net) == sock_net(sk)) {
1621 return false;
1622 }
1623 }
1624 return true;
1625}
1626
1627static bool fanout_find_new_id(struct sock *sk, u16 *new_id)
1628{
1629 u16 id = fanout_next_id;
1630
1631 do {
1632 if (__fanout_id_is_free(sk, id)) {
1633 *new_id = id;
1634 fanout_next_id = id + 1;
1635 return true;
1636 }
1637
1638 id++;
1639 } while (id != fanout_next_id);
1640
1641 return false;
1642}
1643
1644static int fanout_add(struct sock *sk, struct fanout_args *args)
1645{
1646 struct packet_rollover *rollover = NULL;
1647 struct packet_sock *po = pkt_sk(sk);
1648 u16 type_flags = args->type_flags;
1649 struct packet_fanout *f, *match;
1650 u8 type = type_flags & 0xff;
1651 u8 flags = type_flags >> 8;
1652 u16 id = args->id;
1653 int err;
1654
1655 switch (type) {
1656 case PACKET_FANOUT_ROLLOVER:
1657 if (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)
1658 return -EINVAL;
1659 case PACKET_FANOUT_HASH:
1660 case PACKET_FANOUT_LB:
1661 case PACKET_FANOUT_CPU:
1662 case PACKET_FANOUT_RND:
1663 case PACKET_FANOUT_QM:
1664 case PACKET_FANOUT_CBPF:
1665 case PACKET_FANOUT_EBPF:
1666 break;
1667 default:
1668 return -EINVAL;
1669 }
1670
1671 mutex_lock(&fanout_mutex);
1672
1673 err = -EALREADY;
1674 if (po->fanout)
1675 goto out;
1676
1677 if (type == PACKET_FANOUT_ROLLOVER ||
1678 (type_flags & PACKET_FANOUT_FLAG_ROLLOVER)) {
1679 err = -ENOMEM;
1680 rollover = kzalloc(sizeof(*rollover), GFP_KERNEL);
1681 if (!rollover)
1682 goto out;
1683 atomic_long_set(&rollover->num, 0);
1684 atomic_long_set(&rollover->num_huge, 0);
1685 atomic_long_set(&rollover->num_failed, 0);
1686 }
1687
1688 if (type_flags & PACKET_FANOUT_FLAG_UNIQUEID) {
1689 if (id != 0) {
1690 err = -EINVAL;
1691 goto out;
1692 }
1693 if (!fanout_find_new_id(sk, &id)) {
1694 err = -ENOMEM;
1695 goto out;
1696 }
1697 /* ephemeral flag for the first socket in the group: drop it */
1698 flags &= ~(PACKET_FANOUT_FLAG_UNIQUEID >> 8);
1699 }
1700
1701 match = NULL;
1702 list_for_each_entry(f, &fanout_list, list) {
1703 if (f->id == id &&
1704 read_pnet(&f->net) == sock_net(sk)) {
1705 match = f;
1706 break;
1707 }
1708 }
1709 err = -EINVAL;
1710 if (match) {
1711 if (match->flags != flags)
1712 goto out;
1713 if (args->max_num_members &&
1714 args->max_num_members != match->max_num_members)
1715 goto out;
1716 } else {
1717 if (args->max_num_members > PACKET_FANOUT_MAX)
1718 goto out;
1719 if (!args->max_num_members)
1720 /* legacy PACKET_FANOUT_MAX */
1721 args->max_num_members = 256;
1722 err = -ENOMEM;
1723 match = kvzalloc(struct_size(match, arr, args->max_num_members),
1724 GFP_KERNEL);
1725 if (!match)
1726 goto out;
1727 write_pnet(&match->net, sock_net(sk));
1728 match->id = id;
1729 match->type = type;
1730 match->flags = flags;
1731 INIT_LIST_HEAD(&match->list);
1732 spin_lock_init(&match->lock);
1733 refcount_set(&match->sk_ref, 0);
1734 fanout_init_data(match);
1735 match->prot_hook.type = po->prot_hook.type;
1736 match->prot_hook.dev = po->prot_hook.dev;
1737 match->prot_hook.func = packet_rcv_fanout;
1738 match->prot_hook.af_packet_priv = match;
1739 match->prot_hook.id_match = match_fanout_group;
1740 match->max_num_members = args->max_num_members;
1741 list_add(&match->list, &fanout_list);
1742 }
1743 err = -EINVAL;
1744
1745 spin_lock(&po->bind_lock);
1746 if (po->running &&
1747 match->type == type &&
1748 match->prot_hook.type == po->prot_hook.type &&
1749 match->prot_hook.dev == po->prot_hook.dev) {
1750 err = -ENOSPC;
1751 if (refcount_read(&match->sk_ref) < match->max_num_members) {
1752 __dev_remove_pack(&po->prot_hook);
1753 po->fanout = match;
1754 po->rollover = rollover;
1755 rollover = NULL;
1756 refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1);
1757 __fanout_link(sk, po);
1758 err = 0;
1759 }
1760 }
1761 spin_unlock(&po->bind_lock);
1762
1763 if (err && !refcount_read(&match->sk_ref)) {
1764 list_del(&match->list);
1765 kvfree(match);
1766 }
1767
1768out:
1769 kfree(rollover);
1770 mutex_unlock(&fanout_mutex);
1771 return err;
1772}
1773
1774/* If pkt_sk(sk)->fanout->sk_ref is zero, this function removes
1775 * pkt_sk(sk)->fanout from fanout_list and returns pkt_sk(sk)->fanout.
1776 * It is the responsibility of the caller to call fanout_release_data() and
1777 * free the returned packet_fanout (after synchronize_net())
1778 */
1779static struct packet_fanout *fanout_release(struct sock *sk)
1780{
1781 struct packet_sock *po = pkt_sk(sk);
1782 struct packet_fanout *f;
1783
1784 mutex_lock(&fanout_mutex);
1785 f = po->fanout;
1786 if (f) {
1787 po->fanout = NULL;
1788
1789 if (refcount_dec_and_test(&f->sk_ref))
1790 list_del(&f->list);
1791 else
1792 f = NULL;
1793 }
1794 mutex_unlock(&fanout_mutex);
1795
1796 return f;
1797}
1798
1799static bool packet_extra_vlan_len_allowed(const struct net_device *dev,
1800 struct sk_buff *skb)
1801{
1802 /* Earlier code assumed this would be a VLAN pkt, double-check
1803 * this now that we have the actual packet in hand. We can only
1804 * do this check on Ethernet devices.
1805 */
1806 if (unlikely(dev->type != ARPHRD_ETHER))
1807 return false;
1808
1809 skb_reset_mac_header(skb);
1810 return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q));
1811}
1812
1813static const struct proto_ops packet_ops;
1814
1815static const struct proto_ops packet_ops_spkt;
1816
1817static int packet_rcv_spkt(struct sk_buff *skb, struct net_device *dev,
1818 struct packet_type *pt, struct net_device *orig_dev)
1819{
1820 struct sock *sk;
1821 struct sockaddr_pkt *spkt;
1822
1823 /*
1824 * When we registered the protocol we saved the socket in the data
1825 * field for just this event.
1826 */
1827
1828 sk = pt->af_packet_priv;
1829
1830 /*
1831 * Yank back the headers [hope the device set this
1832 * right or kerboom...]
1833 *
1834 * Incoming packets have ll header pulled,
1835 * push it back.
1836 *
1837 * For outgoing ones skb->data == skb_mac_header(skb)
1838 * so that this procedure is noop.
1839 */
1840
1841 if (skb->pkt_type == PACKET_LOOPBACK)
1842 goto out;
1843
1844 if (!net_eq(dev_net(dev), sock_net(sk)))
1845 goto out;
1846
1847 skb = skb_share_check(skb, GFP_ATOMIC);
1848 if (skb == NULL)
1849 goto oom;
1850
1851 /* drop any routing info */
1852 skb_dst_drop(skb);
1853
1854 /* drop conntrack reference */
1855 nf_reset_ct(skb);
1856
1857 spkt = &PACKET_SKB_CB(skb)->sa.pkt;
1858
1859 skb_push(skb, skb->data - skb_mac_header(skb));
1860
1861 /*
1862 * The SOCK_PACKET socket receives _all_ frames.
1863 */
1864
1865 spkt->spkt_family = dev->type;
1866 strlcpy(spkt->spkt_device, dev->name, sizeof(spkt->spkt_device));
1867 spkt->spkt_protocol = skb->protocol;
1868
1869 /*
1870 * Charge the memory to the socket. This is done specifically
1871 * to prevent sockets using all the memory up.
1872 */
1873
1874 if (sock_queue_rcv_skb(sk, skb) == 0)
1875 return 0;
1876
1877out:
1878 kfree_skb(skb);
1879oom:
1880 return 0;
1881}
1882
1883static void packet_parse_headers(struct sk_buff *skb, struct socket *sock)
1884{
1885 if ((!skb->protocol || skb->protocol == htons(ETH_P_ALL)) &&
1886 sock->type == SOCK_RAW) {
1887 skb_reset_mac_header(skb);
1888 skb->protocol = dev_parse_header_protocol(skb);
1889 }
1890
1891 skb_probe_transport_header(skb);
1892}
1893
1894/*
1895 * Output a raw packet to a device layer. This bypasses all the other
1896 * protocol layers and you must therefore supply it with a complete frame
1897 */
1898
1899static int packet_sendmsg_spkt(struct socket *sock, struct msghdr *msg,
1900 size_t len)
1901{
1902 struct sock *sk = sock->sk;
1903 DECLARE_SOCKADDR(struct sockaddr_pkt *, saddr, msg->msg_name);
1904 struct sk_buff *skb = NULL;
1905 struct net_device *dev;
1906 struct sockcm_cookie sockc;
1907 __be16 proto = 0;
1908 int err;
1909 int extra_len = 0;
1910
1911 /*
1912 * Get and verify the address.
1913 */
1914
1915 if (saddr) {
1916 if (msg->msg_namelen < sizeof(struct sockaddr))
1917 return -EINVAL;
1918 if (msg->msg_namelen == sizeof(struct sockaddr_pkt))
1919 proto = saddr->spkt_protocol;
1920 } else
1921 return -ENOTCONN; /* SOCK_PACKET must be sent giving an address */
1922
1923 /*
1924 * Find the device first to size check it
1925 */
1926
1927 saddr->spkt_device[sizeof(saddr->spkt_device) - 1] = 0;
1928retry:
1929 rcu_read_lock();
1930 dev = dev_get_by_name_rcu(sock_net(sk), saddr->spkt_device);
1931 err = -ENODEV;
1932 if (dev == NULL)
1933 goto out_unlock;
1934
1935 err = -ENETDOWN;
1936 if (!(dev->flags & IFF_UP))
1937 goto out_unlock;
1938
1939 /*
1940 * You may not queue a frame bigger than the mtu. This is the lowest level
1941 * raw protocol and you must do your own fragmentation at this level.
1942 */
1943
1944 if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
1945 if (!netif_supports_nofcs(dev)) {
1946 err = -EPROTONOSUPPORT;
1947 goto out_unlock;
1948 }
1949 extra_len = 4; /* We're doing our own CRC */
1950 }
1951
1952 err = -EMSGSIZE;
1953 if (len > dev->mtu + dev->hard_header_len + VLAN_HLEN + extra_len)
1954 goto out_unlock;
1955
1956 if (!skb) {
1957 size_t reserved = LL_RESERVED_SPACE(dev);
1958 int tlen = dev->needed_tailroom;
1959 unsigned int hhlen = dev->header_ops ? dev->hard_header_len : 0;
1960
1961 rcu_read_unlock();
1962 skb = sock_wmalloc(sk, len + reserved + tlen, 0, GFP_KERNEL);
1963 if (skb == NULL)
1964 return -ENOBUFS;
1965 /* FIXME: Save some space for broken drivers that write a hard
1966 * header at transmission time by themselves. PPP is the notable
1967 * one here. This should really be fixed at the driver level.
1968 */
1969 skb_reserve(skb, reserved);
1970 skb_reset_network_header(skb);
1971
1972 /* Try to align data part correctly */
1973 if (hhlen) {
1974 skb->data -= hhlen;
1975 skb->tail -= hhlen;
1976 if (len < hhlen)
1977 skb_reset_network_header(skb);
1978 }
1979 err = memcpy_from_msg(skb_put(skb, len), msg, len);
1980 if (err)
1981 goto out_free;
1982 goto retry;
1983 }
1984
1985 if (!dev_validate_header(dev, skb->data, len)) {
1986 err = -EINVAL;
1987 goto out_unlock;
1988 }
1989 if (len > (dev->mtu + dev->hard_header_len + extra_len) &&
1990 !packet_extra_vlan_len_allowed(dev, skb)) {
1991 err = -EMSGSIZE;
1992 goto out_unlock;
1993 }
1994
1995 sockcm_init(&sockc, sk);
1996 if (msg->msg_controllen) {
1997 err = sock_cmsg_send(sk, msg, &sockc);
1998 if (unlikely(err))
1999 goto out_unlock;
2000 }
2001
2002 skb->protocol = proto;
2003 skb->dev = dev;
2004 skb->priority = sk->sk_priority;
2005 skb->mark = sk->sk_mark;
2006 skb->tstamp = sockc.transmit_time;
2007
2008 skb_setup_tx_timestamp(skb, sockc.tsflags);
2009
2010 if (unlikely(extra_len == 4))
2011 skb->no_fcs = 1;
2012
2013 packet_parse_headers(skb, sock);
2014
2015 dev_queue_xmit(skb);
2016 rcu_read_unlock();
2017 return len;
2018
2019out_unlock:
2020 rcu_read_unlock();
2021out_free:
2022 kfree_skb(skb);
2023 return err;
2024}
2025
2026static unsigned int run_filter(struct sk_buff *skb,
2027 const struct sock *sk,
2028 unsigned int res)
2029{
2030 struct sk_filter *filter;
2031
2032 rcu_read_lock();
2033 filter = rcu_dereference(sk->sk_filter);
2034 if (filter != NULL)
2035 res = bpf_prog_run_clear_cb(filter->prog, skb);
2036 rcu_read_unlock();
2037
2038 return res;
2039}
2040
2041static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb,
2042 size_t *len)
2043{
2044 struct virtio_net_hdr vnet_hdr;
2045
2046 if (*len < sizeof(vnet_hdr))
2047 return -EINVAL;
2048 *len -= sizeof(vnet_hdr);
2049
2050 if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true, 0))
2051 return -EINVAL;
2052
2053 return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr));
2054}
2055
2056/*
2057 * This function makes lazy skb cloning in hope that most of packets
2058 * are discarded by BPF.
2059 *
2060 * Note tricky part: we DO mangle shared skb! skb->data, skb->len
2061 * and skb->cb are mangled. It works because (and until) packets
2062 * falling here are owned by current CPU. Output packets are cloned
2063 * by dev_queue_xmit_nit(), input packets are processed by net_bh
2064 * sequentially, so that if we return skb to original state on exit,
2065 * we will not harm anyone.
2066 */
2067
2068static int packet_rcv(struct sk_buff *skb, struct net_device *dev,
2069 struct packet_type *pt, struct net_device *orig_dev)
2070{
2071 struct sock *sk;
2072 struct sockaddr_ll *sll;
2073 struct packet_sock *po;
2074 u8 *skb_head = skb->data;
2075 int skb_len = skb->len;
2076 unsigned int snaplen, res;
2077 bool is_drop_n_account = false;
2078
2079 if (skb->pkt_type == PACKET_LOOPBACK)
2080 goto drop;
2081
2082 sk = pt->af_packet_priv;
2083 po = pkt_sk(sk);
2084
2085 if (!net_eq(dev_net(dev), sock_net(sk)))
2086 goto drop;
2087
2088 skb->dev = dev;
2089
2090 if (dev_has_header(dev)) {
2091 /* The device has an explicit notion of ll header,
2092 * exported to higher levels.
2093 *
2094 * Otherwise, the device hides details of its frame
2095 * structure, so that corresponding packet head is
2096 * never delivered to user.
2097 */
2098 if (sk->sk_type != SOCK_DGRAM)
2099 skb_push(skb, skb->data - skb_mac_header(skb));
2100 else if (skb->pkt_type == PACKET_OUTGOING) {
2101 /* Special case: outgoing packets have ll header at head */
2102 skb_pull(skb, skb_network_offset(skb));
2103 }
2104 }
2105
2106 snaplen = skb->len;
2107
2108 res = run_filter(skb, sk, snaplen);
2109 if (!res)
2110 goto drop_n_restore;
2111 if (snaplen > res)
2112 snaplen = res;
2113
2114 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
2115 goto drop_n_acct;
2116
2117 if (skb_shared(skb)) {
2118 struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
2119 if (nskb == NULL)
2120 goto drop_n_acct;
2121
2122 if (skb_head != skb->data) {
2123 skb->data = skb_head;
2124 skb->len = skb_len;
2125 }
2126 consume_skb(skb);
2127 skb = nskb;
2128 }
2129
2130 sock_skb_cb_check_size(sizeof(*PACKET_SKB_CB(skb)) + MAX_ADDR_LEN - 8);
2131
2132 sll = &PACKET_SKB_CB(skb)->sa.ll;
2133 sll->sll_hatype = dev->type;
2134 sll->sll_pkttype = skb->pkt_type;
2135 if (unlikely(po->origdev))
2136 sll->sll_ifindex = orig_dev->ifindex;
2137 else
2138 sll->sll_ifindex = dev->ifindex;
2139
2140 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2141
2142 /* sll->sll_family and sll->sll_protocol are set in packet_recvmsg().
2143 * Use their space for storing the original skb length.
2144 */
2145 PACKET_SKB_CB(skb)->sa.origlen = skb->len;
2146
2147 if (pskb_trim(skb, snaplen))
2148 goto drop_n_acct;
2149
2150 skb_set_owner_r(skb, sk);
2151 skb->dev = NULL;
2152 skb_dst_drop(skb);
2153
2154 /* drop conntrack reference */
2155 nf_reset_ct(skb);
2156
2157 spin_lock(&sk->sk_receive_queue.lock);
2158 po->stats.stats1.tp_packets++;
2159 sock_skb_set_dropcount(sk, skb);
2160 __skb_queue_tail(&sk->sk_receive_queue, skb);
2161 spin_unlock(&sk->sk_receive_queue.lock);
2162 sk->sk_data_ready(sk);
2163 return 0;
2164
2165drop_n_acct:
2166 is_drop_n_account = true;
2167 atomic_inc(&po->tp_drops);
2168 atomic_inc(&sk->sk_drops);
2169
2170drop_n_restore:
2171 if (skb_head != skb->data && skb_shared(skb)) {
2172 skb->data = skb_head;
2173 skb->len = skb_len;
2174 }
2175drop:
2176 if (!is_drop_n_account)
2177 consume_skb(skb);
2178 else
2179 kfree_skb(skb);
2180 return 0;
2181}
2182
2183static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
2184 struct packet_type *pt, struct net_device *orig_dev)
2185{
2186 struct sock *sk;
2187 struct packet_sock *po;
2188 struct sockaddr_ll *sll;
2189 union tpacket_uhdr h;
2190 u8 *skb_head = skb->data;
2191 int skb_len = skb->len;
2192 unsigned int snaplen, res;
2193 unsigned long status = TP_STATUS_USER;
2194 unsigned short macoff, hdrlen;
2195 unsigned int netoff;
2196 struct sk_buff *copy_skb = NULL;
2197 struct timespec64 ts;
2198 __u32 ts_status;
2199 bool is_drop_n_account = false;
2200 unsigned int slot_id = 0;
2201 bool do_vnet = false;
2202
2203 /* struct tpacket{2,3}_hdr is aligned to a multiple of TPACKET_ALIGNMENT.
2204 * We may add members to them until current aligned size without forcing
2205 * userspace to call getsockopt(..., PACKET_HDRLEN, ...).
2206 */
2207 BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h2)) != 32);
2208 BUILD_BUG_ON(TPACKET_ALIGN(sizeof(*h.h3)) != 48);
2209
2210 if (skb->pkt_type == PACKET_LOOPBACK)
2211 goto drop;
2212
2213 sk = pt->af_packet_priv;
2214 po = pkt_sk(sk);
2215
2216 if (!net_eq(dev_net(dev), sock_net(sk)))
2217 goto drop;
2218
2219 if (dev_has_header(dev)) {
2220 if (sk->sk_type != SOCK_DGRAM)
2221 skb_push(skb, skb->data - skb_mac_header(skb));
2222 else if (skb->pkt_type == PACKET_OUTGOING) {
2223 /* Special case: outgoing packets have ll header at head */
2224 skb_pull(skb, skb_network_offset(skb));
2225 }
2226 }
2227
2228 snaplen = skb->len;
2229
2230 res = run_filter(skb, sk, snaplen);
2231 if (!res)
2232 goto drop_n_restore;
2233
2234 /* If we are flooded, just give up */
2235 if (__packet_rcv_has_room(po, skb) == ROOM_NONE) {
2236 atomic_inc(&po->tp_drops);
2237 goto drop_n_restore;
2238 }
2239
2240 if (skb->ip_summed == CHECKSUM_PARTIAL)
2241 status |= TP_STATUS_CSUMNOTREADY;
2242 else if (skb->pkt_type != PACKET_OUTGOING &&
2243 (skb->ip_summed == CHECKSUM_COMPLETE ||
2244 skb_csum_unnecessary(skb)))
2245 status |= TP_STATUS_CSUM_VALID;
2246
2247 if (snaplen > res)
2248 snaplen = res;
2249
2250 if (sk->sk_type == SOCK_DGRAM) {
2251 macoff = netoff = TPACKET_ALIGN(po->tp_hdrlen) + 16 +
2252 po->tp_reserve;
2253 } else {
2254 unsigned int maclen = skb_network_offset(skb);
2255 netoff = TPACKET_ALIGN(po->tp_hdrlen +
2256 (maclen < 16 ? 16 : maclen)) +
2257 po->tp_reserve;
2258 if (po->has_vnet_hdr) {
2259 netoff += sizeof(struct virtio_net_hdr);
2260 do_vnet = true;
2261 }
2262 macoff = netoff - maclen;
2263 }
2264 if (netoff > USHRT_MAX) {
2265 atomic_inc(&po->tp_drops);
2266 goto drop_n_restore;
2267 }
2268 if (po->tp_version <= TPACKET_V2) {
2269 if (macoff + snaplen > po->rx_ring.frame_size) {
2270 if (po->copy_thresh &&
2271 atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf) {
2272 if (skb_shared(skb)) {
2273 copy_skb = skb_clone(skb, GFP_ATOMIC);
2274 } else {
2275 copy_skb = skb_get(skb);
2276 skb_head = skb->data;
2277 }
2278 if (copy_skb)
2279 skb_set_owner_r(copy_skb, sk);
2280 }
2281 snaplen = po->rx_ring.frame_size - macoff;
2282 if ((int)snaplen < 0) {
2283 snaplen = 0;
2284 do_vnet = false;
2285 }
2286 }
2287 } else if (unlikely(macoff + snaplen >
2288 GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len)) {
2289 u32 nval;
2290
2291 nval = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len - macoff;
2292 pr_err_once("tpacket_rcv: packet too big, clamped from %u to %u. macoff=%u\n",
2293 snaplen, nval, macoff);
2294 snaplen = nval;
2295 if (unlikely((int)snaplen < 0)) {
2296 snaplen = 0;
2297 macoff = GET_PBDQC_FROM_RB(&po->rx_ring)->max_frame_len;
2298 do_vnet = false;
2299 }
2300 }
2301 spin_lock(&sk->sk_receive_queue.lock);
2302 h.raw = packet_current_rx_frame(po, skb,
2303 TP_STATUS_KERNEL, (macoff+snaplen));
2304 if (!h.raw)
2305 goto drop_n_account;
2306
2307 if (po->tp_version <= TPACKET_V2) {
2308 slot_id = po->rx_ring.head;
2309 if (test_bit(slot_id, po->rx_ring.rx_owner_map))
2310 goto drop_n_account;
2311 __set_bit(slot_id, po->rx_ring.rx_owner_map);
2312 }
2313
2314 if (do_vnet &&
2315 virtio_net_hdr_from_skb(skb, h.raw + macoff -
2316 sizeof(struct virtio_net_hdr),
2317 vio_le(), true, 0)) {
2318 if (po->tp_version == TPACKET_V3)
2319 prb_clear_blk_fill_status(&po->rx_ring);
2320 goto drop_n_account;
2321 }
2322
2323 if (po->tp_version <= TPACKET_V2) {
2324 packet_increment_rx_head(po, &po->rx_ring);
2325 /*
2326 * LOSING will be reported till you read the stats,
2327 * because it's COR - Clear On Read.
2328 * Anyways, moving it for V1/V2 only as V3 doesn't need this
2329 * at packet level.
2330 */
2331 if (atomic_read(&po->tp_drops))
2332 status |= TP_STATUS_LOSING;
2333 }
2334
2335 po->stats.stats1.tp_packets++;
2336 if (copy_skb) {
2337 status |= TP_STATUS_COPY;
2338 __skb_queue_tail(&sk->sk_receive_queue, copy_skb);
2339 }
2340 spin_unlock(&sk->sk_receive_queue.lock);
2341
2342 skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
2343
2344 /* Always timestamp; prefer an existing software timestamp taken
2345 * closer to the time of capture.
2346 */
2347 ts_status = tpacket_get_timestamp(skb, &ts,
2348 po->tp_tstamp | SOF_TIMESTAMPING_SOFTWARE);
2349 if (!ts_status)
2350 ktime_get_real_ts64(&ts);
2351
2352 status |= ts_status;
2353
2354 switch (po->tp_version) {
2355 case TPACKET_V1:
2356 h.h1->tp_len = skb->len;
2357 h.h1->tp_snaplen = snaplen;
2358 h.h1->tp_mac = macoff;
2359 h.h1->tp_net = netoff;
2360 h.h1->tp_sec = ts.tv_sec;
2361 h.h1->tp_usec = ts.tv_nsec / NSEC_PER_USEC;
2362 hdrlen = sizeof(*h.h1);
2363 break;
2364 case TPACKET_V2:
2365 h.h2->tp_len = skb->len;
2366 h.h2->tp_snaplen = snaplen;
2367 h.h2->tp_mac = macoff;
2368 h.h2->tp_net = netoff;
2369 h.h2->tp_sec = ts.tv_sec;
2370 h.h2->tp_nsec = ts.tv_nsec;
2371 if (skb_vlan_tag_present(skb)) {
2372 h.h2->tp_vlan_tci = skb_vlan_tag_get(skb);
2373 h.h2->tp_vlan_tpid = ntohs(skb->vlan_proto);
2374 status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
2375 } else {
2376 h.h2->tp_vlan_tci = 0;
2377 h.h2->tp_vlan_tpid = 0;
2378 }
2379 memset(h.h2->tp_padding, 0, sizeof(h.h2->tp_padding));
2380 hdrlen = sizeof(*h.h2);
2381 break;
2382 case TPACKET_V3:
2383 /* tp_nxt_offset,vlan are already populated above.
2384 * So DONT clear those fields here
2385 */
2386 h.h3->tp_status |= status;
2387 h.h3->tp_len = skb->len;
2388 h.h3->tp_snaplen = snaplen;
2389 h.h3->tp_mac = macoff;
2390 h.h3->tp_net = netoff;
2391 h.h3->tp_sec = ts.tv_sec;
2392 h.h3->tp_nsec = ts.tv_nsec;
2393 memset(h.h3->tp_padding, 0, sizeof(h.h3->tp_padding));
2394 hdrlen = sizeof(*h.h3);
2395 break;
2396 default:
2397 BUG();
2398 }
2399
2400 sll = h.raw + TPACKET_ALIGN(hdrlen);
2401 sll->sll_halen = dev_parse_header(skb, sll->sll_addr);
2402 sll->sll_family = AF_PACKET;
2403 sll->sll_hatype = dev->type;
2404 sll->sll_protocol = skb->protocol;
2405 sll->sll_pkttype = skb->pkt_type;
2406 if (unlikely(po->origdev))
2407 sll->sll_ifindex = orig_dev->ifindex;
2408 else
2409 sll->sll_ifindex = dev->ifindex;
2410
2411 smp_mb();
2412
2413#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE == 1
2414 if (po->tp_version <= TPACKET_V2) {
2415 u8 *start, *end;
2416
2417 end = (u8 *) PAGE_ALIGN((unsigned long) h.raw +
2418 macoff + snaplen);
2419
2420 for (start = h.raw; start < end; start += PAGE_SIZE)
2421 flush_dcache_page(pgv_to_page(start));
2422 }
2423 smp_wmb();
2424#endif
2425
2426 if (po->tp_version <= TPACKET_V2) {
2427 spin_lock(&sk->sk_receive_queue.lock);
2428 __packet_set_status(po, h.raw, status);
2429 __clear_bit(slot_id, po->rx_ring.rx_owner_map);
2430 spin_unlock(&sk->sk_receive_queue.lock);
2431 sk->sk_data_ready(sk);
2432 } else if (po->tp_version == TPACKET_V3) {
2433 prb_clear_blk_fill_status(&po->rx_ring);
2434 }
2435
2436drop_n_restore:
2437 if (skb_head != skb->data && skb_shared(skb)) {
2438 skb->data = skb_head;
2439 skb->len = skb_len;
2440 }
2441drop:
2442 if (!is_drop_n_account)
2443 consume_skb(skb);
2444 else
2445 kfree_skb(skb);
2446 return 0;
2447
2448drop_n_account:
2449 spin_unlock(&sk->sk_receive_queue.lock);
2450 atomic_inc(&po->tp_drops);
2451 is_drop_n_account = true;
2452
2453 sk->sk_data_ready(sk);
2454 kfree_skb(copy_skb);
2455 goto drop_n_restore;
2456}
2457
2458static void tpacket_destruct_skb(struct sk_buff *skb)
2459{
2460 struct packet_sock *po = pkt_sk(skb->sk);
2461
2462 if (likely(po->tx_ring.pg_vec)) {
2463 void *ph;
2464 __u32 ts;
2465
2466 ph = skb_zcopy_get_nouarg(skb);
2467 packet_dec_pending(&po->tx_ring);
2468
2469 ts = __packet_set_timestamp(po, ph, skb);
2470 __packet_set_status(po, ph, TP_STATUS_AVAILABLE | ts);
2471
2472 if (!packet_read_pending(&po->tx_ring))
2473 complete(&po->skb_completion);
2474 }
2475
2476 sock_wfree(skb);
2477}
2478
2479static int __packet_snd_vnet_parse(struct virtio_net_hdr *vnet_hdr, size_t len)
2480{
2481 if ((vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
2482 (__virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
2483 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2 >
2484 __virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len)))
2485 vnet_hdr->hdr_len = __cpu_to_virtio16(vio_le(),
2486 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_start) +
2487 __virtio16_to_cpu(vio_le(), vnet_hdr->csum_offset) + 2);
2488
2489 if (__virtio16_to_cpu(vio_le(), vnet_hdr->hdr_len) > len)
2490 return -EINVAL;
2491
2492 return 0;
2493}
2494
2495static int packet_snd_vnet_parse(struct msghdr *msg, size_t *len,
2496 struct virtio_net_hdr *vnet_hdr)
2497{
2498 if (*len < sizeof(*vnet_hdr))
2499 return -EINVAL;
2500 *len -= sizeof(*vnet_hdr);
2501
2502 if (!copy_from_iter_full(vnet_hdr, sizeof(*vnet_hdr), &msg->msg_iter))
2503 return -EFAULT;
2504
2505 return __packet_snd_vnet_parse(vnet_hdr, *len);
2506}
2507
2508static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
2509 void *frame, struct net_device *dev, void *data, int tp_len,
2510 __be16 proto, unsigned char *addr, int hlen, int copylen,
2511 const struct sockcm_cookie *sockc)
2512{
2513 union tpacket_uhdr ph;
2514 int to_write, offset, len, nr_frags, len_max;
2515 struct socket *sock = po->sk.sk_socket;
2516 struct page *page;
2517 int err;
2518
2519 ph.raw = frame;
2520
2521 skb->protocol = proto;
2522 skb->dev = dev;
2523 skb->priority = po->sk.sk_priority;
2524 skb->mark = po->sk.sk_mark;
2525 skb->tstamp = sockc->transmit_time;
2526 skb_setup_tx_timestamp(skb, sockc->tsflags);
2527 skb_zcopy_set_nouarg(skb, ph.raw);
2528
2529 skb_reserve(skb, hlen);
2530 skb_reset_network_header(skb);
2531
2532 to_write = tp_len;
2533
2534 if (sock->type == SOCK_DGRAM) {
2535 err = dev_hard_header(skb, dev, ntohs(proto), addr,
2536 NULL, tp_len);
2537 if (unlikely(err < 0))
2538 return -EINVAL;
2539 } else if (copylen) {
2540 int hdrlen = min_t(int, copylen, tp_len);
2541
2542 skb_push(skb, dev->hard_header_len);
2543 skb_put(skb, copylen - dev->hard_header_len);
2544 err = skb_store_bits(skb, 0, data, hdrlen);
2545 if (unlikely(err))
2546 return err;
2547 if (!dev_validate_header(dev, skb->data, hdrlen))
2548 return -EINVAL;
2549
2550 data += hdrlen;
2551 to_write -= hdrlen;
2552 }
2553
2554 offset = offset_in_page(data);
2555 len_max = PAGE_SIZE - offset;
2556 len = ((to_write > len_max) ? len_max : to_write);
2557
2558 skb->data_len = to_write;
2559 skb->len += to_write;
2560 skb->truesize += to_write;
2561 refcount_add(to_write, &po->sk.sk_wmem_alloc);
2562
2563 while (likely(to_write)) {
2564 nr_frags = skb_shinfo(skb)->nr_frags;
2565
2566 if (unlikely(nr_frags >= MAX_SKB_FRAGS)) {
2567 pr_err("Packet exceed the number of skb frags(%lu)\n",
2568 MAX_SKB_FRAGS);
2569 return -EFAULT;
2570 }
2571
2572 page = pgv_to_page(data);
2573 data += len;
2574 flush_dcache_page(page);
2575 get_page(page);
2576 skb_fill_page_desc(skb, nr_frags, page, offset, len);
2577 to_write -= len;
2578 offset = 0;
2579 len_max = PAGE_SIZE;
2580 len = ((to_write > len_max) ? len_max : to_write);
2581 }
2582
2583 packet_parse_headers(skb, sock);
2584
2585 return tp_len;
2586}
2587
2588static int tpacket_parse_header(struct packet_sock *po, void *frame,
2589 int size_max, void **data)
2590{
2591 union tpacket_uhdr ph;
2592 int tp_len, off;
2593
2594 ph.raw = frame;
2595
2596 switch (po->tp_version) {
2597 case TPACKET_V3:
2598 if (ph.h3->tp_next_offset != 0) {
2599 pr_warn_once("variable sized slot not supported");
2600 return -EINVAL;
2601 }
2602 tp_len = ph.h3->tp_len;
2603 break;
2604 case TPACKET_V2:
2605 tp_len = ph.h2->tp_len;
2606 break;
2607 default:
2608 tp_len = ph.h1->tp_len;
2609 break;
2610 }
2611 if (unlikely(tp_len > size_max)) {
2612 pr_err("packet size is too long (%d > %d)\n", tp_len, size_max);
2613 return -EMSGSIZE;
2614 }
2615
2616 if (unlikely(po->tp_tx_has_off)) {
2617 int off_min, off_max;
2618
2619 off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2620 off_max = po->tx_ring.frame_size - tp_len;
2621 if (po->sk.sk_type == SOCK_DGRAM) {
2622 switch (po->tp_version) {
2623 case TPACKET_V3:
2624 off = ph.h3->tp_net;
2625 break;
2626 case TPACKET_V2:
2627 off = ph.h2->tp_net;
2628 break;
2629 default:
2630 off = ph.h1->tp_net;
2631 break;
2632 }
2633 } else {
2634 switch (po->tp_version) {
2635 case TPACKET_V3:
2636 off = ph.h3->tp_mac;
2637 break;
2638 case TPACKET_V2:
2639 off = ph.h2->tp_mac;
2640 break;
2641 default:
2642 off = ph.h1->tp_mac;
2643 break;
2644 }
2645 }
2646 if (unlikely((off < off_min) || (off_max < off)))
2647 return -EINVAL;
2648 } else {
2649 off = po->tp_hdrlen - sizeof(struct sockaddr_ll);
2650 }
2651
2652 *data = frame + off;
2653 return tp_len;
2654}
2655
2656static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2657{
2658 struct sk_buff *skb = NULL;
2659 struct net_device *dev;
2660 struct virtio_net_hdr *vnet_hdr = NULL;
2661 struct sockcm_cookie sockc;
2662 __be16 proto;
2663 int err, reserve = 0;
2664 void *ph;
2665 DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2666 bool need_wait = !(msg->msg_flags & MSG_DONTWAIT);
2667 unsigned char *addr = NULL;
2668 int tp_len, size_max;
2669 void *data;
2670 int len_sum = 0;
2671 int status = TP_STATUS_AVAILABLE;
2672 int hlen, tlen, copylen = 0;
2673 long timeo = 0;
2674
2675 mutex_lock(&po->pg_vec_lock);
2676
2677 /* packet_sendmsg() check on tx_ring.pg_vec was lockless,
2678 * we need to confirm it under protection of pg_vec_lock.
2679 */
2680 if (unlikely(!po->tx_ring.pg_vec)) {
2681 err = -EBUSY;
2682 goto out;
2683 }
2684 if (likely(saddr == NULL)) {
2685 dev = packet_cached_dev_get(po);
2686 proto = po->num;
2687 } else {
2688 err = -EINVAL;
2689 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2690 goto out;
2691 if (msg->msg_namelen < (saddr->sll_halen
2692 + offsetof(struct sockaddr_ll,
2693 sll_addr)))
2694 goto out;
2695 proto = saddr->sll_protocol;
2696 dev = dev_get_by_index(sock_net(&po->sk), saddr->sll_ifindex);
2697 if (po->sk.sk_socket->type == SOCK_DGRAM) {
2698 if (dev && msg->msg_namelen < dev->addr_len +
2699 offsetof(struct sockaddr_ll, sll_addr))
2700 goto out_put;
2701 addr = saddr->sll_addr;
2702 }
2703 }
2704
2705 err = -ENXIO;
2706 if (unlikely(dev == NULL))
2707 goto out;
2708 err = -ENETDOWN;
2709 if (unlikely(!(dev->flags & IFF_UP)))
2710 goto out_put;
2711
2712 sockcm_init(&sockc, &po->sk);
2713 if (msg->msg_controllen) {
2714 err = sock_cmsg_send(&po->sk, msg, &sockc);
2715 if (unlikely(err))
2716 goto out_put;
2717 }
2718
2719 if (po->sk.sk_socket->type == SOCK_RAW)
2720 reserve = dev->hard_header_len;
2721 size_max = po->tx_ring.frame_size
2722 - (po->tp_hdrlen - sizeof(struct sockaddr_ll));
2723
2724 if ((size_max > dev->mtu + reserve + VLAN_HLEN) && !po->has_vnet_hdr)
2725 size_max = dev->mtu + reserve + VLAN_HLEN;
2726
2727 reinit_completion(&po->skb_completion);
2728
2729 do {
2730 ph = packet_current_frame(po, &po->tx_ring,
2731 TP_STATUS_SEND_REQUEST);
2732 if (unlikely(ph == NULL)) {
2733 if (need_wait && skb) {
2734 timeo = sock_sndtimeo(&po->sk, msg->msg_flags & MSG_DONTWAIT);
2735 timeo = wait_for_completion_interruptible_timeout(&po->skb_completion, timeo);
2736 if (timeo <= 0) {
2737 err = !timeo ? -ETIMEDOUT : -ERESTARTSYS;
2738 goto out_put;
2739 }
2740 }
2741 /* check for additional frames */
2742 continue;
2743 }
2744
2745 skb = NULL;
2746 tp_len = tpacket_parse_header(po, ph, size_max, &data);
2747 if (tp_len < 0)
2748 goto tpacket_error;
2749
2750 status = TP_STATUS_SEND_REQUEST;
2751 hlen = LL_RESERVED_SPACE(dev);
2752 tlen = dev->needed_tailroom;
2753 if (po->has_vnet_hdr) {
2754 vnet_hdr = data;
2755 data += sizeof(*vnet_hdr);
2756 tp_len -= sizeof(*vnet_hdr);
2757 if (tp_len < 0 ||
2758 __packet_snd_vnet_parse(vnet_hdr, tp_len)) {
2759 tp_len = -EINVAL;
2760 goto tpacket_error;
2761 }
2762 copylen = __virtio16_to_cpu(vio_le(),
2763 vnet_hdr->hdr_len);
2764 }
2765 copylen = max_t(int, copylen, dev->hard_header_len);
2766 skb = sock_alloc_send_skb(&po->sk,
2767 hlen + tlen + sizeof(struct sockaddr_ll) +
2768 (copylen - dev->hard_header_len),
2769 !need_wait, &err);
2770
2771 if (unlikely(skb == NULL)) {
2772 /* we assume the socket was initially writeable ... */
2773 if (likely(len_sum > 0))
2774 err = len_sum;
2775 goto out_status;
2776 }
2777 tp_len = tpacket_fill_skb(po, skb, ph, dev, data, tp_len, proto,
2778 addr, hlen, copylen, &sockc);
2779 if (likely(tp_len >= 0) &&
2780 tp_len > dev->mtu + reserve &&
2781 !po->has_vnet_hdr &&
2782 !packet_extra_vlan_len_allowed(dev, skb))
2783 tp_len = -EMSGSIZE;
2784
2785 if (unlikely(tp_len < 0)) {
2786tpacket_error:
2787 if (po->tp_loss) {
2788 __packet_set_status(po, ph,
2789 TP_STATUS_AVAILABLE);
2790 packet_increment_head(&po->tx_ring);
2791 kfree_skb(skb);
2792 continue;
2793 } else {
2794 status = TP_STATUS_WRONG_FORMAT;
2795 err = tp_len;
2796 goto out_status;
2797 }
2798 }
2799
2800 if (po->has_vnet_hdr) {
2801 if (virtio_net_hdr_to_skb(skb, vnet_hdr, vio_le())) {
2802 tp_len = -EINVAL;
2803 goto tpacket_error;
2804 }
2805 virtio_net_hdr_set_proto(skb, vnet_hdr);
2806 }
2807
2808 skb->destructor = tpacket_destruct_skb;
2809 __packet_set_status(po, ph, TP_STATUS_SENDING);
2810 packet_inc_pending(&po->tx_ring);
2811
2812 status = TP_STATUS_SEND_REQUEST;
2813 err = po->xmit(skb);
2814 if (unlikely(err > 0)) {
2815 err = net_xmit_errno(err);
2816 if (err && __packet_get_status(po, ph) ==
2817 TP_STATUS_AVAILABLE) {
2818 /* skb was destructed already */
2819 skb = NULL;
2820 goto out_status;
2821 }
2822 /*
2823 * skb was dropped but not destructed yet;
2824 * let's treat it like congestion or err < 0
2825 */
2826 err = 0;
2827 }
2828 packet_increment_head(&po->tx_ring);
2829 len_sum += tp_len;
2830 } while (likely((ph != NULL) ||
2831 /* Note: packet_read_pending() might be slow if we have
2832 * to call it as it's per_cpu variable, but in fast-path
2833 * we already short-circuit the loop with the first
2834 * condition, and luckily don't have to go that path
2835 * anyway.
2836 */
2837 (need_wait && packet_read_pending(&po->tx_ring))));
2838
2839 err = len_sum;
2840 goto out_put;
2841
2842out_status:
2843 __packet_set_status(po, ph, status);
2844 kfree_skb(skb);
2845out_put:
2846 dev_put(dev);
2847out:
2848 mutex_unlock(&po->pg_vec_lock);
2849 return err;
2850}
2851
2852static struct sk_buff *packet_alloc_skb(struct sock *sk, size_t prepad,
2853 size_t reserve, size_t len,
2854 size_t linear, int noblock,
2855 int *err)
2856{
2857 struct sk_buff *skb;
2858
2859 /* Under a page? Don't bother with paged skb. */
2860 if (prepad + len < PAGE_SIZE || !linear)
2861 linear = len;
2862
2863 skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
2864 err, 0);
2865 if (!skb)
2866 return NULL;
2867
2868 skb_reserve(skb, reserve);
2869 skb_put(skb, linear);
2870 skb->data_len = len - linear;
2871 skb->len += len - linear;
2872
2873 return skb;
2874}
2875
2876static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
2877{
2878 struct sock *sk = sock->sk;
2879 DECLARE_SOCKADDR(struct sockaddr_ll *, saddr, msg->msg_name);
2880 struct sk_buff *skb;
2881 struct net_device *dev;
2882 __be16 proto;
2883 unsigned char *addr = NULL;
2884 int err, reserve = 0;
2885 struct sockcm_cookie sockc;
2886 struct virtio_net_hdr vnet_hdr = { 0 };
2887 int offset = 0;
2888 struct packet_sock *po = pkt_sk(sk);
2889 bool has_vnet_hdr = false;
2890 int hlen, tlen, linear;
2891 int extra_len = 0;
2892
2893 /*
2894 * Get and verify the address.
2895 */
2896
2897 if (likely(saddr == NULL)) {
2898 dev = packet_cached_dev_get(po);
2899 proto = po->num;
2900 } else {
2901 err = -EINVAL;
2902 if (msg->msg_namelen < sizeof(struct sockaddr_ll))
2903 goto out;
2904 if (msg->msg_namelen < (saddr->sll_halen + offsetof(struct sockaddr_ll, sll_addr)))
2905 goto out;
2906 proto = saddr->sll_protocol;
2907 dev = dev_get_by_index(sock_net(sk), saddr->sll_ifindex);
2908 if (sock->type == SOCK_DGRAM) {
2909 if (dev && msg->msg_namelen < dev->addr_len +
2910 offsetof(struct sockaddr_ll, sll_addr))
2911 goto out_unlock;
2912 addr = saddr->sll_addr;
2913 }
2914 }
2915
2916 err = -ENXIO;
2917 if (unlikely(dev == NULL))
2918 goto out_unlock;
2919 err = -ENETDOWN;
2920 if (unlikely(!(dev->flags & IFF_UP)))
2921 goto out_unlock;
2922
2923 sockcm_init(&sockc, sk);
2924 sockc.mark = sk->sk_mark;
2925 if (msg->msg_controllen) {
2926 err = sock_cmsg_send(sk, msg, &sockc);
2927 if (unlikely(err))
2928 goto out_unlock;
2929 }
2930
2931 if (sock->type == SOCK_RAW)
2932 reserve = dev->hard_header_len;
2933 if (po->has_vnet_hdr) {
2934 err = packet_snd_vnet_parse(msg, &len, &vnet_hdr);
2935 if (err)
2936 goto out_unlock;
2937 has_vnet_hdr = true;
2938 }
2939
2940 if (unlikely(sock_flag(sk, SOCK_NOFCS))) {
2941 if (!netif_supports_nofcs(dev)) {
2942 err = -EPROTONOSUPPORT;
2943 goto out_unlock;
2944 }
2945 extra_len = 4; /* We're doing our own CRC */
2946 }
2947
2948 err = -EMSGSIZE;
2949 if (!vnet_hdr.gso_type &&
2950 (len > dev->mtu + reserve + VLAN_HLEN + extra_len))
2951 goto out_unlock;
2952
2953 err = -ENOBUFS;
2954 hlen = LL_RESERVED_SPACE(dev);
2955 tlen = dev->needed_tailroom;
2956 linear = __virtio16_to_cpu(vio_le(), vnet_hdr.hdr_len);
2957 linear = max(linear, min_t(int, len, dev->hard_header_len));
2958 skb = packet_alloc_skb(sk, hlen + tlen, hlen, len, linear,
2959 msg->msg_flags & MSG_DONTWAIT, &err);
2960 if (skb == NULL)
2961 goto out_unlock;
2962
2963 skb_reset_network_header(skb);
2964
2965 err = -EINVAL;
2966 if (sock->type == SOCK_DGRAM) {
2967 offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
2968 if (unlikely(offset < 0))
2969 goto out_free;
2970 } else if (reserve) {
2971 skb_reserve(skb, -reserve);
2972 if (len < reserve + sizeof(struct ipv6hdr) &&
2973 dev->min_header_len != dev->hard_header_len)
2974 skb_reset_network_header(skb);
2975 }
2976
2977 /* Returns -EFAULT on error */
2978 err = skb_copy_datagram_from_iter(skb, offset, &msg->msg_iter, len);
2979 if (err)
2980 goto out_free;
2981
2982 if (sock->type == SOCK_RAW &&
2983 !dev_validate_header(dev, skb->data, len)) {
2984 err = -EINVAL;
2985 goto out_free;
2986 }
2987
2988 skb_setup_tx_timestamp(skb, sockc.tsflags);
2989
2990 if (!vnet_hdr.gso_type && (len > dev->mtu + reserve + extra_len) &&
2991 !packet_extra_vlan_len_allowed(dev, skb)) {
2992 err = -EMSGSIZE;
2993 goto out_free;
2994 }
2995
2996 skb->protocol = proto;
2997 skb->dev = dev;
2998 skb->priority = sk->sk_priority;
2999 skb->mark = sockc.mark;
3000 skb->tstamp = sockc.transmit_time;
3001
3002 if (has_vnet_hdr) {
3003 err = virtio_net_hdr_to_skb(skb, &vnet_hdr, vio_le());
3004 if (err)
3005 goto out_free;
3006 len += sizeof(vnet_hdr);
3007 virtio_net_hdr_set_proto(skb, &vnet_hdr);
3008 }
3009
3010 packet_parse_headers(skb, sock);
3011
3012 if (unlikely(extra_len == 4))
3013 skb->no_fcs = 1;
3014
3015 err = po->xmit(skb);
3016 if (err > 0 && (err = net_xmit_errno(err)) != 0)
3017 goto out_unlock;
3018
3019 dev_put(dev);
3020
3021 return len;
3022
3023out_free:
3024 kfree_skb(skb);
3025out_unlock:
3026 if (dev)
3027 dev_put(dev);
3028out:
3029 return err;
3030}
3031
3032static int packet_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
3033{
3034 struct sock *sk = sock->sk;
3035 struct packet_sock *po = pkt_sk(sk);
3036
3037 if (po->tx_ring.pg_vec)
3038 return tpacket_snd(po, msg);
3039 else
3040 return packet_snd(sock, msg, len);
3041}
3042
3043/*
3044 * Close a PACKET socket. This is fairly simple. We immediately go
3045 * to 'closed' state and remove our protocol entry in the device list.
3046 */
3047
3048static int packet_release(struct socket *sock)
3049{
3050 struct sock *sk = sock->sk;
3051 struct packet_sock *po;
3052 struct packet_fanout *f;
3053 struct net *net;
3054 union tpacket_req_u req_u;
3055
3056 if (!sk)
3057 return 0;
3058
3059 net = sock_net(sk);
3060 po = pkt_sk(sk);
3061
3062 mutex_lock(&net->packet.sklist_lock);
3063 sk_del_node_init_rcu(sk);
3064 mutex_unlock(&net->packet.sklist_lock);
3065
3066 preempt_disable();
3067 sock_prot_inuse_add(net, sk->sk_prot, -1);
3068 preempt_enable();
3069
3070 spin_lock(&po->bind_lock);
3071 unregister_prot_hook(sk, false);
3072 packet_cached_dev_reset(po);
3073
3074 if (po->prot_hook.dev) {
3075 dev_put(po->prot_hook.dev);
3076 po->prot_hook.dev = NULL;
3077 }
3078 spin_unlock(&po->bind_lock);
3079
3080 packet_flush_mclist(sk);
3081
3082 lock_sock(sk);
3083 if (po->rx_ring.pg_vec) {
3084 memset(&req_u, 0, sizeof(req_u));
3085 packet_set_ring(sk, &req_u, 1, 0);
3086 }
3087
3088 if (po->tx_ring.pg_vec) {
3089 memset(&req_u, 0, sizeof(req_u));
3090 packet_set_ring(sk, &req_u, 1, 1);
3091 }
3092 release_sock(sk);
3093
3094 f = fanout_release(sk);
3095
3096 synchronize_net();
3097
3098 kfree(po->rollover);
3099 if (f) {
3100 fanout_release_data(f);
3101 kvfree(f);
3102 }
3103 /*
3104 * Now the socket is dead. No more input will appear.
3105 */
3106 sock_orphan(sk);
3107 sock->sk = NULL;
3108
3109 /* Purge queues */
3110
3111 skb_queue_purge(&sk->sk_receive_queue);
3112 packet_free_pending(po);
3113 sk_refcnt_debug_release(sk);
3114
3115 sock_put(sk);
3116 return 0;
3117}
3118
3119/*
3120 * Attach a packet hook.
3121 */
3122
3123static int packet_do_bind(struct sock *sk, const char *name, int ifindex,
3124 __be16 proto)
3125{
3126 struct packet_sock *po = pkt_sk(sk);
3127 struct net_device *dev_curr;
3128 __be16 proto_curr;
3129 bool need_rehook;
3130 struct net_device *dev = NULL;
3131 int ret = 0;
3132 bool unlisted = false;
3133
3134 lock_sock(sk);
3135 spin_lock(&po->bind_lock);
3136 rcu_read_lock();
3137
3138 if (po->fanout) {
3139 ret = -EINVAL;
3140 goto out_unlock;
3141 }
3142
3143 if (name) {
3144 dev = dev_get_by_name_rcu(sock_net(sk), name);
3145 if (!dev) {
3146 ret = -ENODEV;
3147 goto out_unlock;
3148 }
3149 } else if (ifindex) {
3150 dev = dev_get_by_index_rcu(sock_net(sk), ifindex);
3151 if (!dev) {
3152 ret = -ENODEV;
3153 goto out_unlock;
3154 }
3155 }
3156
3157 if (dev)
3158 dev_hold(dev);
3159
3160 proto_curr = po->prot_hook.type;
3161 dev_curr = po->prot_hook.dev;
3162
3163 need_rehook = proto_curr != proto || dev_curr != dev;
3164
3165 if (need_rehook) {
3166 if (po->running) {
3167 rcu_read_unlock();
3168 /* prevents packet_notifier() from calling
3169 * register_prot_hook()
3170 */
3171 po->num = 0;
3172 __unregister_prot_hook(sk, true);
3173 rcu_read_lock();
3174 dev_curr = po->prot_hook.dev;
3175 if (dev)
3176 unlisted = !dev_get_by_index_rcu(sock_net(sk),
3177 dev->ifindex);
3178 }
3179
3180 BUG_ON(po->running);
3181 po->num = proto;
3182 po->prot_hook.type = proto;
3183
3184 if (unlikely(unlisted)) {
3185 dev_put(dev);
3186 po->prot_hook.dev = NULL;
3187 po->ifindex = -1;
3188 packet_cached_dev_reset(po);
3189 } else {
3190 po->prot_hook.dev = dev;
3191 po->ifindex = dev ? dev->ifindex : 0;
3192 packet_cached_dev_assign(po, dev);
3193 }
3194 }
3195 if (dev_curr)
3196 dev_put(dev_curr);
3197
3198 if (proto == 0 || !need_rehook)
3199 goto out_unlock;
3200
3201 if (!unlisted && (!dev || (dev->flags & IFF_UP))) {
3202 register_prot_hook(sk);
3203 } else {
3204 sk->sk_err = ENETDOWN;
3205 if (!sock_flag(sk, SOCK_DEAD))
3206 sk->sk_error_report(sk);
3207 }
3208
3209out_unlock:
3210 rcu_read_unlock();
3211 spin_unlock(&po->bind_lock);
3212 release_sock(sk);
3213 return ret;
3214}
3215
3216/*
3217 * Bind a packet socket to a device
3218 */
3219
3220static int packet_bind_spkt(struct socket *sock, struct sockaddr *uaddr,
3221 int addr_len)
3222{
3223 struct sock *sk = sock->sk;
3224 char name[sizeof(uaddr->sa_data) + 1];
3225
3226 /*
3227 * Check legality
3228 */
3229
3230 if (addr_len != sizeof(struct sockaddr))
3231 return -EINVAL;
3232 /* uaddr->sa_data comes from the userspace, it's not guaranteed to be
3233 * zero-terminated.
3234 */
3235 memcpy(name, uaddr->sa_data, sizeof(uaddr->sa_data));
3236 name[sizeof(uaddr->sa_data)] = 0;
3237
3238 return packet_do_bind(sk, name, 0, pkt_sk(sk)->num);
3239}
3240
3241static int packet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
3242{
3243 struct sockaddr_ll *sll = (struct sockaddr_ll *)uaddr;
3244 struct sock *sk = sock->sk;
3245
3246 /*
3247 * Check legality
3248 */
3249
3250 if (addr_len < sizeof(struct sockaddr_ll))
3251 return -EINVAL;
3252 if (sll->sll_family != AF_PACKET)
3253 return -EINVAL;
3254
3255 return packet_do_bind(sk, NULL, sll->sll_ifindex,
3256 sll->sll_protocol ? : pkt_sk(sk)->num);
3257}
3258
3259static struct proto packet_proto = {
3260 .name = "PACKET",
3261 .owner = THIS_MODULE,
3262 .obj_size = sizeof(struct packet_sock),
3263};
3264
3265/*
3266 * Create a packet of type SOCK_PACKET.
3267 */
3268
3269static int packet_create(struct net *net, struct socket *sock, int protocol,
3270 int kern)
3271{
3272 struct sock *sk;
3273 struct packet_sock *po;
3274 __be16 proto = (__force __be16)protocol; /* weird, but documented */
3275 int err;
3276
3277 if (!ns_capable(net->user_ns, CAP_NET_RAW))
3278 return -EPERM;
3279 if (sock->type != SOCK_DGRAM && sock->type != SOCK_RAW &&
3280 sock->type != SOCK_PACKET)
3281 return -ESOCKTNOSUPPORT;
3282
3283 sock->state = SS_UNCONNECTED;
3284
3285 err = -ENOBUFS;
3286 sk = sk_alloc(net, PF_PACKET, GFP_KERNEL, &packet_proto, kern);
3287 if (sk == NULL)
3288 goto out;
3289
3290 sock->ops = &packet_ops;
3291 if (sock->type == SOCK_PACKET)
3292 sock->ops = &packet_ops_spkt;
3293
3294 sock_init_data(sock, sk);
3295
3296 po = pkt_sk(sk);
3297 init_completion(&po->skb_completion);
3298 sk->sk_family = PF_PACKET;
3299 po->num = proto;
3300 po->xmit = dev_queue_xmit;
3301
3302 err = packet_alloc_pending(po);
3303 if (err)
3304 goto out2;
3305
3306 packet_cached_dev_reset(po);
3307
3308 sk->sk_destruct = packet_sock_destruct;
3309 sk_refcnt_debug_inc(sk);
3310
3311 /*
3312 * Attach a protocol block
3313 */
3314
3315 spin_lock_init(&po->bind_lock);
3316 mutex_init(&po->pg_vec_lock);
3317 po->rollover = NULL;
3318 po->prot_hook.func = packet_rcv;
3319
3320 if (sock->type == SOCK_PACKET)
3321 po->prot_hook.func = packet_rcv_spkt;
3322
3323 po->prot_hook.af_packet_priv = sk;
3324
3325 if (proto) {
3326 po->prot_hook.type = proto;
3327 __register_prot_hook(sk);
3328 }
3329
3330 mutex_lock(&net->packet.sklist_lock);
3331 sk_add_node_tail_rcu(sk, &net->packet.sklist);
3332 mutex_unlock(&net->packet.sklist_lock);
3333
3334 preempt_disable();
3335 sock_prot_inuse_add(net, &packet_proto, 1);
3336 preempt_enable();
3337
3338 return 0;
3339out2:
3340 sk_free(sk);
3341out:
3342 return err;
3343}
3344
3345/*
3346 * Pull a packet from our receive queue and hand it to the user.
3347 * If necessary we block.
3348 */
3349
3350static int packet_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
3351 int flags)
3352{
3353 struct sock *sk = sock->sk;
3354 struct sk_buff *skb;
3355 int copied, err;
3356 int vnet_hdr_len = 0;
3357 unsigned int origlen = 0;
3358
3359 err = -EINVAL;
3360 if (flags & ~(MSG_PEEK|MSG_DONTWAIT|MSG_TRUNC|MSG_CMSG_COMPAT|MSG_ERRQUEUE))
3361 goto out;
3362
3363#if 0
3364 /* What error should we return now? EUNATTACH? */
3365 if (pkt_sk(sk)->ifindex < 0)
3366 return -ENODEV;
3367#endif
3368
3369 if (flags & MSG_ERRQUEUE) {
3370 err = sock_recv_errqueue(sk, msg, len,
3371 SOL_PACKET, PACKET_TX_TIMESTAMP);
3372 goto out;
3373 }
3374
3375 /*
3376 * Call the generic datagram receiver. This handles all sorts
3377 * of horrible races and re-entrancy so we can forget about it
3378 * in the protocol layers.
3379 *
3380 * Now it will return ENETDOWN, if device have just gone down,
3381 * but then it will block.
3382 */
3383
3384 skb = skb_recv_datagram(sk, flags, flags & MSG_DONTWAIT, &err);
3385
3386 /*
3387 * An error occurred so return it. Because skb_recv_datagram()
3388 * handles the blocking we don't see and worry about blocking
3389 * retries.
3390 */
3391
3392 if (skb == NULL)
3393 goto out;
3394
3395 packet_rcv_try_clear_pressure(pkt_sk(sk));
3396
3397 if (pkt_sk(sk)->has_vnet_hdr) {
3398 err = packet_rcv_vnet(msg, skb, &len);
3399 if (err)
3400 goto out_free;
3401 vnet_hdr_len = sizeof(struct virtio_net_hdr);
3402 }
3403
3404 /* You lose any data beyond the buffer you gave. If it worries
3405 * a user program they can ask the device for its MTU
3406 * anyway.
3407 */
3408 copied = skb->len;
3409 if (copied > len) {
3410 copied = len;
3411 msg->msg_flags |= MSG_TRUNC;
3412 }
3413
3414 err = skb_copy_datagram_msg(skb, 0, msg, copied);
3415 if (err)
3416 goto out_free;
3417
3418 if (sock->type != SOCK_PACKET) {
3419 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3420
3421 /* Original length was stored in sockaddr_ll fields */
3422 origlen = PACKET_SKB_CB(skb)->sa.origlen;
3423 sll->sll_family = AF_PACKET;
3424 sll->sll_protocol = skb->protocol;
3425 }
3426
3427 sock_recv_ts_and_drops(msg, sk, skb);
3428
3429 if (msg->msg_name) {
3430 int copy_len;
3431
3432 /* If the address length field is there to be filled
3433 * in, we fill it in now.
3434 */
3435 if (sock->type == SOCK_PACKET) {
3436 __sockaddr_check_size(sizeof(struct sockaddr_pkt));
3437 msg->msg_namelen = sizeof(struct sockaddr_pkt);
3438 copy_len = msg->msg_namelen;
3439 } else {
3440 struct sockaddr_ll *sll = &PACKET_SKB_CB(skb)->sa.ll;
3441
3442 msg->msg_namelen = sll->sll_halen +
3443 offsetof(struct sockaddr_ll, sll_addr);
3444 copy_len = msg->msg_namelen;
3445 if (msg->msg_namelen < sizeof(struct sockaddr_ll)) {
3446 memset(msg->msg_name +
3447 offsetof(struct sockaddr_ll, sll_addr),
3448 0, sizeof(sll->sll_addr));
3449 msg->msg_namelen = sizeof(struct sockaddr_ll);
3450 }
3451 }
3452 memcpy(msg->msg_name, &PACKET_SKB_CB(skb)->sa, copy_len);
3453 }
3454
3455 if (pkt_sk(sk)->auxdata) {
3456 struct tpacket_auxdata aux;
3457
3458 aux.tp_status = TP_STATUS_USER;
3459 if (skb->ip_summed == CHECKSUM_PARTIAL)
3460 aux.tp_status |= TP_STATUS_CSUMNOTREADY;
3461 else if (skb->pkt_type != PACKET_OUTGOING &&
3462 (skb->ip_summed == CHECKSUM_COMPLETE ||
3463 skb_csum_unnecessary(skb)))
3464 aux.tp_status |= TP_STATUS_CSUM_VALID;
3465
3466 aux.tp_len = origlen;
3467 aux.tp_snaplen = skb->len;
3468 aux.tp_mac = 0;
3469 aux.tp_net = skb_network_offset(skb);
3470 if (skb_vlan_tag_present(skb)) {
3471 aux.tp_vlan_tci = skb_vlan_tag_get(skb);
3472 aux.tp_vlan_tpid = ntohs(skb->vlan_proto);
3473 aux.tp_status |= TP_STATUS_VLAN_VALID | TP_STATUS_VLAN_TPID_VALID;
3474 } else {
3475 aux.tp_vlan_tci = 0;
3476 aux.tp_vlan_tpid = 0;
3477 }
3478 put_cmsg(msg, SOL_PACKET, PACKET_AUXDATA, sizeof(aux), &aux);
3479 }
3480
3481 /*
3482 * Free or return the buffer as appropriate. Again this
3483 * hides all the races and re-entrancy issues from us.
3484 */
3485 err = vnet_hdr_len + ((flags&MSG_TRUNC) ? skb->len : copied);
3486
3487out_free:
3488 skb_free_datagram(sk, skb);
3489out:
3490 return err;
3491}
3492
3493static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr,
3494 int peer)
3495{
3496 struct net_device *dev;
3497 struct sock *sk = sock->sk;
3498
3499 if (peer)
3500 return -EOPNOTSUPP;
3501
3502 uaddr->sa_family = AF_PACKET;
3503 memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data));
3504 rcu_read_lock();
3505 dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex);
3506 if (dev)
3507 strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data));
3508 rcu_read_unlock();
3509
3510 return sizeof(*uaddr);
3511}
3512
3513static int packet_getname(struct socket *sock, struct sockaddr *uaddr,
3514 int peer)
3515{
3516 struct net_device *dev;
3517 struct sock *sk = sock->sk;
3518 struct packet_sock *po = pkt_sk(sk);
3519 DECLARE_SOCKADDR(struct sockaddr_ll *, sll, uaddr);
3520
3521 if (peer)
3522 return -EOPNOTSUPP;
3523
3524 sll->sll_family = AF_PACKET;
3525 sll->sll_ifindex = po->ifindex;
3526 sll->sll_protocol = po->num;
3527 sll->sll_pkttype = 0;
3528 rcu_read_lock();
3529 dev = dev_get_by_index_rcu(sock_net(sk), po->ifindex);
3530 if (dev) {
3531 sll->sll_hatype = dev->type;
3532 sll->sll_halen = dev->addr_len;
3533 memcpy(sll->sll_addr, dev->dev_addr, dev->addr_len);
3534 } else {
3535 sll->sll_hatype = 0; /* Bad: we have no ARPHRD_UNSPEC */
3536 sll->sll_halen = 0;
3537 }
3538 rcu_read_unlock();
3539
3540 return offsetof(struct sockaddr_ll, sll_addr) + sll->sll_halen;
3541}
3542
3543static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
3544 int what)
3545{
3546 switch (i->type) {
3547 case PACKET_MR_MULTICAST:
3548 if (i->alen != dev->addr_len)
3549 return -EINVAL;
3550 if (what > 0)
3551 return dev_mc_add(dev, i->addr);
3552 else
3553 return dev_mc_del(dev, i->addr);
3554 break;
3555 case PACKET_MR_PROMISC:
3556 return dev_set_promiscuity(dev, what);
3557 case PACKET_MR_ALLMULTI:
3558 return dev_set_allmulti(dev, what);
3559 case PACKET_MR_UNICAST:
3560 if (i->alen != dev->addr_len)
3561 return -EINVAL;
3562 if (what > 0)
3563 return dev_uc_add(dev, i->addr);
3564 else
3565 return dev_uc_del(dev, i->addr);
3566 break;
3567 default:
3568 break;
3569 }
3570 return 0;
3571}
3572
3573static void packet_dev_mclist_delete(struct net_device *dev,
3574 struct packet_mclist **mlp)
3575{
3576 struct packet_mclist *ml;
3577
3578 while ((ml = *mlp) != NULL) {
3579 if (ml->ifindex == dev->ifindex) {
3580 packet_dev_mc(dev, ml, -1);
3581 *mlp = ml->next;
3582 kfree(ml);
3583 } else
3584 mlp = &ml->next;
3585 }
3586}
3587
3588static int packet_mc_add(struct sock *sk, struct packet_mreq_max *mreq)
3589{
3590 struct packet_sock *po = pkt_sk(sk);
3591 struct packet_mclist *ml, *i;
3592 struct net_device *dev;
3593 int err;
3594
3595 rtnl_lock();
3596
3597 err = -ENODEV;
3598 dev = __dev_get_by_index(sock_net(sk), mreq->mr_ifindex);
3599 if (!dev)
3600 goto done;
3601
3602 err = -EINVAL;
3603 if (mreq->mr_alen > dev->addr_len)
3604 goto done;
3605
3606 err = -ENOBUFS;
3607 i = kmalloc(sizeof(*i), GFP_KERNEL);
3608 if (i == NULL)
3609 goto done;
3610
3611 err = 0;
3612 for (ml = po->mclist; ml; ml = ml->next) {
3613 if (ml->ifindex == mreq->mr_ifindex &&
3614 ml->type == mreq->mr_type &&
3615 ml->alen == mreq->mr_alen &&
3616 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3617 ml->count++;
3618 /* Free the new element ... */
3619 kfree(i);
3620 goto done;
3621 }
3622 }
3623
3624 i->type = mreq->mr_type;
3625 i->ifindex = mreq->mr_ifindex;
3626 i->alen = mreq->mr_alen;
3627 memcpy(i->addr, mreq->mr_address, i->alen);
3628 memset(i->addr + i->alen, 0, sizeof(i->addr) - i->alen);
3629 i->count = 1;
3630 i->next = po->mclist;
3631 po->mclist = i;
3632 err = packet_dev_mc(dev, i, 1);
3633 if (err) {
3634 po->mclist = i->next;
3635 kfree(i);
3636 }
3637
3638done:
3639 rtnl_unlock();
3640 return err;
3641}
3642
3643static int packet_mc_drop(struct sock *sk, struct packet_mreq_max *mreq)
3644{
3645 struct packet_mclist *ml, **mlp;
3646
3647 rtnl_lock();
3648
3649 for (mlp = &pkt_sk(sk)->mclist; (ml = *mlp) != NULL; mlp = &ml->next) {
3650 if (ml->ifindex == mreq->mr_ifindex &&
3651 ml->type == mreq->mr_type &&
3652 ml->alen == mreq->mr_alen &&
3653 memcmp(ml->addr, mreq->mr_address, ml->alen) == 0) {
3654 if (--ml->count == 0) {
3655 struct net_device *dev;
3656 *mlp = ml->next;
3657 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3658 if (dev)
3659 packet_dev_mc(dev, ml, -1);
3660 kfree(ml);
3661 }
3662 break;
3663 }
3664 }
3665 rtnl_unlock();
3666 return 0;
3667}
3668
3669static void packet_flush_mclist(struct sock *sk)
3670{
3671 struct packet_sock *po = pkt_sk(sk);
3672 struct packet_mclist *ml;
3673
3674 if (!po->mclist)
3675 return;
3676
3677 rtnl_lock();
3678 while ((ml = po->mclist) != NULL) {
3679 struct net_device *dev;
3680
3681 po->mclist = ml->next;
3682 dev = __dev_get_by_index(sock_net(sk), ml->ifindex);
3683 if (dev != NULL)
3684 packet_dev_mc(dev, ml, -1);
3685 kfree(ml);
3686 }
3687 rtnl_unlock();
3688}
3689
3690static int
3691packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval,
3692 unsigned int optlen)
3693{
3694 struct sock *sk = sock->sk;
3695 struct packet_sock *po = pkt_sk(sk);
3696 int ret;
3697
3698 if (level != SOL_PACKET)
3699 return -ENOPROTOOPT;
3700
3701 switch (optname) {
3702 case PACKET_ADD_MEMBERSHIP:
3703 case PACKET_DROP_MEMBERSHIP:
3704 {
3705 struct packet_mreq_max mreq;
3706 int len = optlen;
3707 memset(&mreq, 0, sizeof(mreq));
3708 if (len < sizeof(struct packet_mreq))
3709 return -EINVAL;
3710 if (len > sizeof(mreq))
3711 len = sizeof(mreq);
3712 if (copy_from_sockptr(&mreq, optval, len))
3713 return -EFAULT;
3714 if (len < (mreq.mr_alen + offsetof(struct packet_mreq, mr_address)))
3715 return -EINVAL;
3716 if (optname == PACKET_ADD_MEMBERSHIP)
3717 ret = packet_mc_add(sk, &mreq);
3718 else
3719 ret = packet_mc_drop(sk, &mreq);
3720 return ret;
3721 }
3722
3723 case PACKET_RX_RING:
3724 case PACKET_TX_RING:
3725 {
3726 union tpacket_req_u req_u;
3727 int len;
3728
3729 lock_sock(sk);
3730 switch (po->tp_version) {
3731 case TPACKET_V1:
3732 case TPACKET_V2:
3733 len = sizeof(req_u.req);
3734 break;
3735 case TPACKET_V3:
3736 default:
3737 len = sizeof(req_u.req3);
3738 break;
3739 }
3740 if (optlen < len) {
3741 ret = -EINVAL;
3742 } else {
3743 if (copy_from_sockptr(&req_u.req, optval, len))
3744 ret = -EFAULT;
3745 else
3746 ret = packet_set_ring(sk, &req_u, 0,
3747 optname == PACKET_TX_RING);
3748 }
3749 release_sock(sk);
3750 return ret;
3751 }
3752 case PACKET_COPY_THRESH:
3753 {
3754 int val;
3755
3756 if (optlen != sizeof(val))
3757 return -EINVAL;
3758 if (copy_from_sockptr(&val, optval, sizeof(val)))
3759 return -EFAULT;
3760
3761 pkt_sk(sk)->copy_thresh = val;
3762 return 0;
3763 }
3764 case PACKET_VERSION:
3765 {
3766 int val;
3767
3768 if (optlen != sizeof(val))
3769 return -EINVAL;
3770 if (copy_from_sockptr(&val, optval, sizeof(val)))
3771 return -EFAULT;
3772 switch (val) {
3773 case TPACKET_V1:
3774 case TPACKET_V2:
3775 case TPACKET_V3:
3776 break;
3777 default:
3778 return -EINVAL;
3779 }
3780 lock_sock(sk);
3781 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3782 ret = -EBUSY;
3783 } else {
3784 po->tp_version = val;
3785 ret = 0;
3786 }
3787 release_sock(sk);
3788 return ret;
3789 }
3790 case PACKET_RESERVE:
3791 {
3792 unsigned int val;
3793
3794 if (optlen != sizeof(val))
3795 return -EINVAL;
3796 if (copy_from_sockptr(&val, optval, sizeof(val)))
3797 return -EFAULT;
3798 if (val > INT_MAX)
3799 return -EINVAL;
3800 lock_sock(sk);
3801 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3802 ret = -EBUSY;
3803 } else {
3804 po->tp_reserve = val;
3805 ret = 0;
3806 }
3807 release_sock(sk);
3808 return ret;
3809 }
3810 case PACKET_LOSS:
3811 {
3812 unsigned int val;
3813
3814 if (optlen != sizeof(val))
3815 return -EINVAL;
3816 if (copy_from_sockptr(&val, optval, sizeof(val)))
3817 return -EFAULT;
3818
3819 lock_sock(sk);
3820 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3821 ret = -EBUSY;
3822 } else {
3823 po->tp_loss = !!val;
3824 ret = 0;
3825 }
3826 release_sock(sk);
3827 return ret;
3828 }
3829 case PACKET_AUXDATA:
3830 {
3831 int val;
3832
3833 if (optlen < sizeof(val))
3834 return -EINVAL;
3835 if (copy_from_sockptr(&val, optval, sizeof(val)))
3836 return -EFAULT;
3837
3838 lock_sock(sk);
3839 po->auxdata = !!val;
3840 release_sock(sk);
3841 return 0;
3842 }
3843 case PACKET_ORIGDEV:
3844 {
3845 int val;
3846
3847 if (optlen < sizeof(val))
3848 return -EINVAL;
3849 if (copy_from_sockptr(&val, optval, sizeof(val)))
3850 return -EFAULT;
3851
3852 lock_sock(sk);
3853 po->origdev = !!val;
3854 release_sock(sk);
3855 return 0;
3856 }
3857 case PACKET_VNET_HDR:
3858 {
3859 int val;
3860
3861 if (sock->type != SOCK_RAW)
3862 return -EINVAL;
3863 if (optlen < sizeof(val))
3864 return -EINVAL;
3865 if (copy_from_sockptr(&val, optval, sizeof(val)))
3866 return -EFAULT;
3867
3868 lock_sock(sk);
3869 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3870 ret = -EBUSY;
3871 } else {
3872 po->has_vnet_hdr = !!val;
3873 ret = 0;
3874 }
3875 release_sock(sk);
3876 return ret;
3877 }
3878 case PACKET_TIMESTAMP:
3879 {
3880 int val;
3881
3882 if (optlen != sizeof(val))
3883 return -EINVAL;
3884 if (copy_from_sockptr(&val, optval, sizeof(val)))
3885 return -EFAULT;
3886
3887 po->tp_tstamp = val;
3888 return 0;
3889 }
3890 case PACKET_FANOUT:
3891 {
3892 struct fanout_args args = { 0 };
3893
3894 if (optlen != sizeof(int) && optlen != sizeof(args))
3895 return -EINVAL;
3896 if (copy_from_sockptr(&args, optval, optlen))
3897 return -EFAULT;
3898
3899 return fanout_add(sk, &args);
3900 }
3901 case PACKET_FANOUT_DATA:
3902 {
3903 if (!po->fanout)
3904 return -EINVAL;
3905
3906 return fanout_set_data(po, optval, optlen);
3907 }
3908 case PACKET_IGNORE_OUTGOING:
3909 {
3910 int val;
3911
3912 if (optlen != sizeof(val))
3913 return -EINVAL;
3914 if (copy_from_sockptr(&val, optval, sizeof(val)))
3915 return -EFAULT;
3916 if (val < 0 || val > 1)
3917 return -EINVAL;
3918
3919 po->prot_hook.ignore_outgoing = !!val;
3920 return 0;
3921 }
3922 case PACKET_TX_HAS_OFF:
3923 {
3924 unsigned int val;
3925
3926 if (optlen != sizeof(val))
3927 return -EINVAL;
3928 if (copy_from_sockptr(&val, optval, sizeof(val)))
3929 return -EFAULT;
3930
3931 lock_sock(sk);
3932 if (po->rx_ring.pg_vec || po->tx_ring.pg_vec) {
3933 ret = -EBUSY;
3934 } else {
3935 po->tp_tx_has_off = !!val;
3936 ret = 0;
3937 }
3938 release_sock(sk);
3939 return 0;
3940 }
3941 case PACKET_QDISC_BYPASS:
3942 {
3943 int val;
3944
3945 if (optlen != sizeof(val))
3946 return -EINVAL;
3947 if (copy_from_sockptr(&val, optval, sizeof(val)))
3948 return -EFAULT;
3949
3950 po->xmit = val ? packet_direct_xmit : dev_queue_xmit;
3951 return 0;
3952 }
3953 default:
3954 return -ENOPROTOOPT;
3955 }
3956}
3957
3958static int packet_getsockopt(struct socket *sock, int level, int optname,
3959 char __user *optval, int __user *optlen)
3960{
3961 int len;
3962 int val, lv = sizeof(val);
3963 struct sock *sk = sock->sk;
3964 struct packet_sock *po = pkt_sk(sk);
3965 void *data = &val;
3966 union tpacket_stats_u st;
3967 struct tpacket_rollover_stats rstats;
3968 int drops;
3969
3970 if (level != SOL_PACKET)
3971 return -ENOPROTOOPT;
3972
3973 if (get_user(len, optlen))
3974 return -EFAULT;
3975
3976 if (len < 0)
3977 return -EINVAL;
3978
3979 switch (optname) {
3980 case PACKET_STATISTICS:
3981 spin_lock_bh(&sk->sk_receive_queue.lock);
3982 memcpy(&st, &po->stats, sizeof(st));
3983 memset(&po->stats, 0, sizeof(po->stats));
3984 spin_unlock_bh(&sk->sk_receive_queue.lock);
3985 drops = atomic_xchg(&po->tp_drops, 0);
3986
3987 if (po->tp_version == TPACKET_V3) {
3988 lv = sizeof(struct tpacket_stats_v3);
3989 st.stats3.tp_drops = drops;
3990 st.stats3.tp_packets += drops;
3991 data = &st.stats3;
3992 } else {
3993 lv = sizeof(struct tpacket_stats);
3994 st.stats1.tp_drops = drops;
3995 st.stats1.tp_packets += drops;
3996 data = &st.stats1;
3997 }
3998
3999 break;
4000 case PACKET_AUXDATA:
4001 val = po->auxdata;
4002 break;
4003 case PACKET_ORIGDEV:
4004 val = po->origdev;
4005 break;
4006 case PACKET_VNET_HDR:
4007 val = po->has_vnet_hdr;
4008 break;
4009 case PACKET_VERSION:
4010 val = po->tp_version;
4011 break;
4012 case PACKET_HDRLEN:
4013 if (len > sizeof(int))
4014 len = sizeof(int);
4015 if (len < sizeof(int))
4016 return -EINVAL;
4017 if (copy_from_user(&val, optval, len))
4018 return -EFAULT;
4019 switch (val) {
4020 case TPACKET_V1:
4021 val = sizeof(struct tpacket_hdr);
4022 break;
4023 case TPACKET_V2:
4024 val = sizeof(struct tpacket2_hdr);
4025 break;
4026 case TPACKET_V3:
4027 val = sizeof(struct tpacket3_hdr);
4028 break;
4029 default:
4030 return -EINVAL;
4031 }
4032 break;
4033 case PACKET_RESERVE:
4034 val = po->tp_reserve;
4035 break;
4036 case PACKET_LOSS:
4037 val = po->tp_loss;
4038 break;
4039 case PACKET_TIMESTAMP:
4040 val = po->tp_tstamp;
4041 break;
4042 case PACKET_FANOUT:
4043 val = (po->fanout ?
4044 ((u32)po->fanout->id |
4045 ((u32)po->fanout->type << 16) |
4046 ((u32)po->fanout->flags << 24)) :
4047 0);
4048 break;
4049 case PACKET_IGNORE_OUTGOING:
4050 val = po->prot_hook.ignore_outgoing;
4051 break;
4052 case PACKET_ROLLOVER_STATS:
4053 if (!po->rollover)
4054 return -EINVAL;
4055 rstats.tp_all = atomic_long_read(&po->rollover->num);
4056 rstats.tp_huge = atomic_long_read(&po->rollover->num_huge);
4057 rstats.tp_failed = atomic_long_read(&po->rollover->num_failed);
4058 data = &rstats;
4059 lv = sizeof(rstats);
4060 break;
4061 case PACKET_TX_HAS_OFF:
4062 val = po->tp_tx_has_off;
4063 break;
4064 case PACKET_QDISC_BYPASS:
4065 val = packet_use_direct_xmit(po);
4066 break;
4067 default:
4068 return -ENOPROTOOPT;
4069 }
4070
4071 if (len > lv)
4072 len = lv;
4073 if (put_user(len, optlen))
4074 return -EFAULT;
4075 if (copy_to_user(optval, data, len))
4076 return -EFAULT;
4077 return 0;
4078}
4079
4080static int packet_notifier(struct notifier_block *this,
4081 unsigned long msg, void *ptr)
4082{
4083 struct sock *sk;
4084 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4085 struct net *net = dev_net(dev);
4086
4087 rcu_read_lock();
4088 sk_for_each_rcu(sk, &net->packet.sklist) {
4089 struct packet_sock *po = pkt_sk(sk);
4090
4091 switch (msg) {
4092 case NETDEV_UNREGISTER:
4093 if (po->mclist)
4094 packet_dev_mclist_delete(dev, &po->mclist);
4095 fallthrough;
4096
4097 case NETDEV_DOWN:
4098 if (dev->ifindex == po->ifindex) {
4099 spin_lock(&po->bind_lock);
4100 if (po->running) {
4101 __unregister_prot_hook(sk, false);
4102 sk->sk_err = ENETDOWN;
4103 if (!sock_flag(sk, SOCK_DEAD))
4104 sk->sk_error_report(sk);
4105 }
4106 if (msg == NETDEV_UNREGISTER) {
4107 packet_cached_dev_reset(po);
4108 po->ifindex = -1;
4109 if (po->prot_hook.dev)
4110 dev_put(po->prot_hook.dev);
4111 po->prot_hook.dev = NULL;
4112 }
4113 spin_unlock(&po->bind_lock);
4114 }
4115 break;
4116 case NETDEV_UP:
4117 if (dev->ifindex == po->ifindex) {
4118 spin_lock(&po->bind_lock);
4119 if (po->num)
4120 register_prot_hook(sk);
4121 spin_unlock(&po->bind_lock);
4122 }
4123 break;
4124 }
4125 }
4126 rcu_read_unlock();
4127 return NOTIFY_DONE;
4128}
4129
4130
4131static int packet_ioctl(struct socket *sock, unsigned int cmd,
4132 unsigned long arg)
4133{
4134 struct sock *sk = sock->sk;
4135
4136 switch (cmd) {
4137 case SIOCOUTQ:
4138 {
4139 int amount = sk_wmem_alloc_get(sk);
4140
4141 return put_user(amount, (int __user *)arg);
4142 }
4143 case SIOCINQ:
4144 {
4145 struct sk_buff *skb;
4146 int amount = 0;
4147
4148 spin_lock_bh(&sk->sk_receive_queue.lock);
4149 skb = skb_peek(&sk->sk_receive_queue);
4150 if (skb)
4151 amount = skb->len;
4152 spin_unlock_bh(&sk->sk_receive_queue.lock);
4153 return put_user(amount, (int __user *)arg);
4154 }
4155#ifdef CONFIG_INET
4156 case SIOCADDRT:
4157 case SIOCDELRT:
4158 case SIOCDARP:
4159 case SIOCGARP:
4160 case SIOCSARP:
4161 case SIOCGIFADDR:
4162 case SIOCSIFADDR:
4163 case SIOCGIFBRDADDR:
4164 case SIOCSIFBRDADDR:
4165 case SIOCGIFNETMASK:
4166 case SIOCSIFNETMASK:
4167 case SIOCGIFDSTADDR:
4168 case SIOCSIFDSTADDR:
4169 case SIOCSIFFLAGS:
4170 return inet_dgram_ops.ioctl(sock, cmd, arg);
4171#endif
4172
4173 default:
4174 return -ENOIOCTLCMD;
4175 }
4176 return 0;
4177}
4178
4179static __poll_t packet_poll(struct file *file, struct socket *sock,
4180 poll_table *wait)
4181{
4182 struct sock *sk = sock->sk;
4183 struct packet_sock *po = pkt_sk(sk);
4184 __poll_t mask = datagram_poll(file, sock, wait);
4185
4186 spin_lock_bh(&sk->sk_receive_queue.lock);
4187 if (po->rx_ring.pg_vec) {
4188 if (!packet_previous_rx_frame(po, &po->rx_ring,
4189 TP_STATUS_KERNEL))
4190 mask |= EPOLLIN | EPOLLRDNORM;
4191 }
4192 packet_rcv_try_clear_pressure(po);
4193 spin_unlock_bh(&sk->sk_receive_queue.lock);
4194 spin_lock_bh(&sk->sk_write_queue.lock);
4195 if (po->tx_ring.pg_vec) {
4196 if (packet_current_frame(po, &po->tx_ring, TP_STATUS_AVAILABLE))
4197 mask |= EPOLLOUT | EPOLLWRNORM;
4198 }
4199 spin_unlock_bh(&sk->sk_write_queue.lock);
4200 return mask;
4201}
4202
4203
4204/* Dirty? Well, I still did not learn better way to account
4205 * for user mmaps.
4206 */
4207
4208static void packet_mm_open(struct vm_area_struct *vma)
4209{
4210 struct file *file = vma->vm_file;
4211 struct socket *sock = file->private_data;
4212 struct sock *sk = sock->sk;
4213
4214 if (sk)
4215 atomic_inc(&pkt_sk(sk)->mapped);
4216}
4217
4218static void packet_mm_close(struct vm_area_struct *vma)
4219{
4220 struct file *file = vma->vm_file;
4221 struct socket *sock = file->private_data;
4222 struct sock *sk = sock->sk;
4223
4224 if (sk)
4225 atomic_dec(&pkt_sk(sk)->mapped);
4226}
4227
4228static const struct vm_operations_struct packet_mmap_ops = {
4229 .open = packet_mm_open,
4230 .close = packet_mm_close,
4231};
4232
4233static void free_pg_vec(struct pgv *pg_vec, unsigned int order,
4234 unsigned int len)
4235{
4236 int i;
4237
4238 for (i = 0; i < len; i++) {
4239 if (likely(pg_vec[i].buffer)) {
4240 if (is_vmalloc_addr(pg_vec[i].buffer))
4241 vfree(pg_vec[i].buffer);
4242 else
4243 free_pages((unsigned long)pg_vec[i].buffer,
4244 order);
4245 pg_vec[i].buffer = NULL;
4246 }
4247 }
4248 kfree(pg_vec);
4249}
4250
4251static char *alloc_one_pg_vec_page(unsigned long order)
4252{
4253 char *buffer;
4254 gfp_t gfp_flags = GFP_KERNEL | __GFP_COMP |
4255 __GFP_ZERO | __GFP_NOWARN | __GFP_NORETRY;
4256
4257 buffer = (char *) __get_free_pages(gfp_flags, order);
4258 if (buffer)
4259 return buffer;
4260
4261 /* __get_free_pages failed, fall back to vmalloc */
4262 buffer = vzalloc(array_size((1 << order), PAGE_SIZE));
4263 if (buffer)
4264 return buffer;
4265
4266 /* vmalloc failed, lets dig into swap here */
4267 gfp_flags &= ~__GFP_NORETRY;
4268 buffer = (char *) __get_free_pages(gfp_flags, order);
4269 if (buffer)
4270 return buffer;
4271
4272 /* complete and utter failure */
4273 return NULL;
4274}
4275
4276static struct pgv *alloc_pg_vec(struct tpacket_req *req, int order)
4277{
4278 unsigned int block_nr = req->tp_block_nr;
4279 struct pgv *pg_vec;
4280 int i;
4281
4282 pg_vec = kcalloc(block_nr, sizeof(struct pgv), GFP_KERNEL | __GFP_NOWARN);
4283 if (unlikely(!pg_vec))
4284 goto out;
4285
4286 for (i = 0; i < block_nr; i++) {
4287 pg_vec[i].buffer = alloc_one_pg_vec_page(order);
4288 if (unlikely(!pg_vec[i].buffer))
4289 goto out_free_pgvec;
4290 }
4291
4292out:
4293 return pg_vec;
4294
4295out_free_pgvec:
4296 free_pg_vec(pg_vec, order, block_nr);
4297 pg_vec = NULL;
4298 goto out;
4299}
4300
4301static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
4302 int closing, int tx_ring)
4303{
4304 struct pgv *pg_vec = NULL;
4305 struct packet_sock *po = pkt_sk(sk);
4306 unsigned long *rx_owner_map = NULL;
4307 int was_running, order = 0;
4308 struct packet_ring_buffer *rb;
4309 struct sk_buff_head *rb_queue;
4310 __be16 num;
4311 int err;
4312 /* Added to avoid minimal code churn */
4313 struct tpacket_req *req = &req_u->req;
4314
4315 rb = tx_ring ? &po->tx_ring : &po->rx_ring;
4316 rb_queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
4317
4318 err = -EBUSY;
4319 if (!closing) {
4320 if (atomic_read(&po->mapped))
4321 goto out;
4322 if (packet_read_pending(rb))
4323 goto out;
4324 }
4325
4326 if (req->tp_block_nr) {
4327 unsigned int min_frame_size;
4328
4329 /* Sanity tests and some calculations */
4330 err = -EBUSY;
4331 if (unlikely(rb->pg_vec))
4332 goto out;
4333
4334 switch (po->tp_version) {
4335 case TPACKET_V1:
4336 po->tp_hdrlen = TPACKET_HDRLEN;
4337 break;
4338 case TPACKET_V2:
4339 po->tp_hdrlen = TPACKET2_HDRLEN;
4340 break;
4341 case TPACKET_V3:
4342 po->tp_hdrlen = TPACKET3_HDRLEN;
4343 break;
4344 }
4345
4346 err = -EINVAL;
4347 if (unlikely((int)req->tp_block_size <= 0))
4348 goto out;
4349 if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
4350 goto out;
4351 min_frame_size = po->tp_hdrlen + po->tp_reserve;
4352 if (po->tp_version >= TPACKET_V3 &&
4353 req->tp_block_size <
4354 BLK_PLUS_PRIV((u64)req_u->req3.tp_sizeof_priv) + min_frame_size)
4355 goto out;
4356 if (unlikely(req->tp_frame_size < min_frame_size))
4357 goto out;
4358 if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
4359 goto out;
4360
4361 rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
4362 if (unlikely(rb->frames_per_block == 0))
4363 goto out;
4364 if (unlikely(rb->frames_per_block > UINT_MAX / req->tp_block_nr))
4365 goto out;
4366 if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
4367 req->tp_frame_nr))
4368 goto out;
4369
4370 err = -ENOMEM;
4371 order = get_order(req->tp_block_size);
4372 pg_vec = alloc_pg_vec(req, order);
4373 if (unlikely(!pg_vec))
4374 goto out;
4375 switch (po->tp_version) {
4376 case TPACKET_V3:
4377 /* Block transmit is not supported yet */
4378 if (!tx_ring) {
4379 init_prb_bdqc(po, rb, pg_vec, req_u);
4380 } else {
4381 struct tpacket_req3 *req3 = &req_u->req3;
4382
4383 if (req3->tp_retire_blk_tov ||
4384 req3->tp_sizeof_priv ||
4385 req3->tp_feature_req_word) {
4386 err = -EINVAL;
4387 goto out_free_pg_vec;
4388 }
4389 }
4390 break;
4391 default:
4392 if (!tx_ring) {
4393 rx_owner_map = bitmap_alloc(req->tp_frame_nr,
4394 GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO);
4395 if (!rx_owner_map)
4396 goto out_free_pg_vec;
4397 }
4398 break;
4399 }
4400 }
4401 /* Done */
4402 else {
4403 err = -EINVAL;
4404 if (unlikely(req->tp_frame_nr))
4405 goto out;
4406 }
4407
4408
4409 /* Detach socket from network */
4410 spin_lock(&po->bind_lock);
4411 was_running = po->running;
4412 num = po->num;
4413 if (was_running) {
4414 po->num = 0;
4415 __unregister_prot_hook(sk, false);
4416 }
4417 spin_unlock(&po->bind_lock);
4418
4419 synchronize_net();
4420
4421 err = -EBUSY;
4422 mutex_lock(&po->pg_vec_lock);
4423 if (closing || atomic_read(&po->mapped) == 0) {
4424 err = 0;
4425 spin_lock_bh(&rb_queue->lock);
4426 swap(rb->pg_vec, pg_vec);
4427 if (po->tp_version <= TPACKET_V2)
4428 swap(rb->rx_owner_map, rx_owner_map);
4429 rb->frame_max = (req->tp_frame_nr - 1);
4430 rb->head = 0;
4431 rb->frame_size = req->tp_frame_size;
4432 spin_unlock_bh(&rb_queue->lock);
4433
4434 swap(rb->pg_vec_order, order);
4435 swap(rb->pg_vec_len, req->tp_block_nr);
4436
4437 rb->pg_vec_pages = req->tp_block_size/PAGE_SIZE;
4438 po->prot_hook.func = (po->rx_ring.pg_vec) ?
4439 tpacket_rcv : packet_rcv;
4440 skb_queue_purge(rb_queue);
4441 if (atomic_read(&po->mapped))
4442 pr_err("packet_mmap: vma is busy: %d\n",
4443 atomic_read(&po->mapped));
4444 }
4445 mutex_unlock(&po->pg_vec_lock);
4446
4447 spin_lock(&po->bind_lock);
4448 if (was_running) {
4449 po->num = num;
4450 register_prot_hook(sk);
4451 }
4452 spin_unlock(&po->bind_lock);
4453 if (pg_vec && (po->tp_version > TPACKET_V2)) {
4454 /* Because we don't support block-based V3 on tx-ring */
4455 if (!tx_ring)
4456 prb_shutdown_retire_blk_timer(po, rb_queue);
4457 }
4458
4459out_free_pg_vec:
4460 bitmap_free(rx_owner_map);
4461 if (pg_vec)
4462 free_pg_vec(pg_vec, order, req->tp_block_nr);
4463out:
4464 return err;
4465}
4466
4467static int packet_mmap(struct file *file, struct socket *sock,
4468 struct vm_area_struct *vma)
4469{
4470 struct sock *sk = sock->sk;
4471 struct packet_sock *po = pkt_sk(sk);
4472 unsigned long size, expected_size;
4473 struct packet_ring_buffer *rb;
4474 unsigned long start;
4475 int err = -EINVAL;
4476 int i;
4477
4478 if (vma->vm_pgoff)
4479 return -EINVAL;
4480
4481 mutex_lock(&po->pg_vec_lock);
4482
4483 expected_size = 0;
4484 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4485 if (rb->pg_vec) {
4486 expected_size += rb->pg_vec_len
4487 * rb->pg_vec_pages
4488 * PAGE_SIZE;
4489 }
4490 }
4491
4492 if (expected_size == 0)
4493 goto out;
4494
4495 size = vma->vm_end - vma->vm_start;
4496 if (size != expected_size)
4497 goto out;
4498
4499 start = vma->vm_start;
4500 for (rb = &po->rx_ring; rb <= &po->tx_ring; rb++) {
4501 if (rb->pg_vec == NULL)
4502 continue;
4503
4504 for (i = 0; i < rb->pg_vec_len; i++) {
4505 struct page *page;
4506 void *kaddr = rb->pg_vec[i].buffer;
4507 int pg_num;
4508
4509 for (pg_num = 0; pg_num < rb->pg_vec_pages; pg_num++) {
4510 page = pgv_to_page(kaddr);
4511 err = vm_insert_page(vma, start, page);
4512 if (unlikely(err))
4513 goto out;
4514 start += PAGE_SIZE;
4515 kaddr += PAGE_SIZE;
4516 }
4517 }
4518 }
4519
4520 atomic_inc(&po->mapped);
4521 vma->vm_ops = &packet_mmap_ops;
4522 err = 0;
4523
4524out:
4525 mutex_unlock(&po->pg_vec_lock);
4526 return err;
4527}
4528
4529static const struct proto_ops packet_ops_spkt = {
4530 .family = PF_PACKET,
4531 .owner = THIS_MODULE,
4532 .release = packet_release,
4533 .bind = packet_bind_spkt,
4534 .connect = sock_no_connect,
4535 .socketpair = sock_no_socketpair,
4536 .accept = sock_no_accept,
4537 .getname = packet_getname_spkt,
4538 .poll = datagram_poll,
4539 .ioctl = packet_ioctl,
4540 .gettstamp = sock_gettstamp,
4541 .listen = sock_no_listen,
4542 .shutdown = sock_no_shutdown,
4543 .sendmsg = packet_sendmsg_spkt,
4544 .recvmsg = packet_recvmsg,
4545 .mmap = sock_no_mmap,
4546 .sendpage = sock_no_sendpage,
4547};
4548
4549static const struct proto_ops packet_ops = {
4550 .family = PF_PACKET,
4551 .owner = THIS_MODULE,
4552 .release = packet_release,
4553 .bind = packet_bind,
4554 .connect = sock_no_connect,
4555 .socketpair = sock_no_socketpair,
4556 .accept = sock_no_accept,
4557 .getname = packet_getname,
4558 .poll = packet_poll,
4559 .ioctl = packet_ioctl,
4560 .gettstamp = sock_gettstamp,
4561 .listen = sock_no_listen,
4562 .shutdown = sock_no_shutdown,
4563 .setsockopt = packet_setsockopt,
4564 .getsockopt = packet_getsockopt,
4565 .sendmsg = packet_sendmsg,
4566 .recvmsg = packet_recvmsg,
4567 .mmap = packet_mmap,
4568 .sendpage = sock_no_sendpage,
4569};
4570
4571static const struct net_proto_family packet_family_ops = {
4572 .family = PF_PACKET,
4573 .create = packet_create,
4574 .owner = THIS_MODULE,
4575};
4576
4577static struct notifier_block packet_netdev_notifier = {
4578 .notifier_call = packet_notifier,
4579};
4580
4581#ifdef CONFIG_PROC_FS
4582
4583static void *packet_seq_start(struct seq_file *seq, loff_t *pos)
4584 __acquires(RCU)
4585{
4586 struct net *net = seq_file_net(seq);
4587
4588 rcu_read_lock();
4589 return seq_hlist_start_head_rcu(&net->packet.sklist, *pos);
4590}
4591
4592static void *packet_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4593{
4594 struct net *net = seq_file_net(seq);
4595 return seq_hlist_next_rcu(v, &net->packet.sklist, pos);
4596}
4597
4598static void packet_seq_stop(struct seq_file *seq, void *v)
4599 __releases(RCU)
4600{
4601 rcu_read_unlock();
4602}
4603
4604static int packet_seq_show(struct seq_file *seq, void *v)
4605{
4606 if (v == SEQ_START_TOKEN)
4607 seq_printf(seq,
4608 "%*sRefCnt Type Proto Iface R Rmem User Inode\n",
4609 IS_ENABLED(CONFIG_64BIT) ? -17 : -9, "sk");
4610 else {
4611 struct sock *s = sk_entry(v);
4612 const struct packet_sock *po = pkt_sk(s);
4613
4614 seq_printf(seq,
4615 "%pK %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n",
4616 s,
4617 refcount_read(&s->sk_refcnt),
4618 s->sk_type,
4619 ntohs(po->num),
4620 po->ifindex,
4621 po->running,
4622 atomic_read(&s->sk_rmem_alloc),
4623 from_kuid_munged(seq_user_ns(seq), sock_i_uid(s)),
4624 sock_i_ino(s));
4625 }
4626
4627 return 0;
4628}
4629
4630static const struct seq_operations packet_seq_ops = {
4631 .start = packet_seq_start,
4632 .next = packet_seq_next,
4633 .stop = packet_seq_stop,
4634 .show = packet_seq_show,
4635};
4636#endif
4637
4638static int __net_init packet_net_init(struct net *net)
4639{
4640 mutex_init(&net->packet.sklist_lock);
4641 INIT_HLIST_HEAD(&net->packet.sklist);
4642
4643#ifdef CONFIG_PROC_FS
4644 if (!proc_create_net("packet", 0, net->proc_net, &packet_seq_ops,
4645 sizeof(struct seq_net_private)))
4646 return -ENOMEM;
4647#endif /* CONFIG_PROC_FS */
4648
4649 return 0;
4650}
4651
4652static void __net_exit packet_net_exit(struct net *net)
4653{
4654 remove_proc_entry("packet", net->proc_net);
4655 WARN_ON_ONCE(!hlist_empty(&net->packet.sklist));
4656}
4657
4658static struct pernet_operations packet_net_ops = {
4659 .init = packet_net_init,
4660 .exit = packet_net_exit,
4661};
4662
4663
4664static void __exit packet_exit(void)
4665{
4666 unregister_netdevice_notifier(&packet_netdev_notifier);
4667 unregister_pernet_subsys(&packet_net_ops);
4668 sock_unregister(PF_PACKET);
4669 proto_unregister(&packet_proto);
4670}
4671
4672static int __init packet_init(void)
4673{
4674 int rc;
4675
4676 rc = proto_register(&packet_proto, 0);
4677 if (rc)
4678 goto out;
4679 rc = sock_register(&packet_family_ops);
4680 if (rc)
4681 goto out_proto;
4682 rc = register_pernet_subsys(&packet_net_ops);
4683 if (rc)
4684 goto out_sock;
4685 rc = register_netdevice_notifier(&packet_netdev_notifier);
4686 if (rc)
4687 goto out_pernet;
4688
4689 return 0;
4690
4691out_pernet:
4692 unregister_pernet_subsys(&packet_net_ops);
4693out_sock:
4694 sock_unregister(PF_PACKET);
4695out_proto:
4696 proto_unregister(&packet_proto);
4697out:
4698 return rc;
4699}
4700
4701module_init(packet_init);
4702module_exit(packet_exit);
4703MODULE_LICENSE("GPL");
4704MODULE_ALIAS_NETPROTO(PF_PACKET);