Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
torvalds
GitHub Repository: torvalds/linux
Path: blob/master/net/ipv4/ip_input.c
29265 views
1
// SPDX-License-Identifier: GPL-2.0-or-later
2
/*
3
* INET An implementation of the TCP/IP protocol suite for the LINUX
4
* operating system. INET is implemented using the BSD Socket
5
* interface as the means of communication with the user level.
6
*
7
* The Internet Protocol (IP) module.
8
*
9
* Authors: Ross Biro
10
* Fred N. van Kempen, <[email protected]>
11
* Donald Becker, <[email protected]>
12
* Alan Cox, <[email protected]>
13
* Richard Underwood
14
* Stefan Becker, <[email protected]>
15
* Jorge Cwik, <[email protected]>
16
* Arnt Gulbrandsen, <[email protected]>
17
*
18
* Fixes:
19
* Alan Cox : Commented a couple of minor bits of surplus code
20
* Alan Cox : Undefining IP_FORWARD doesn't include the code
21
* (just stops a compiler warning).
22
* Alan Cox : Frames with >=MAX_ROUTE record routes, strict routes or loose routes
23
* are junked rather than corrupting things.
24
* Alan Cox : Frames to bad broadcast subnets are dumped
25
* We used to process them non broadcast and
26
* boy could that cause havoc.
27
* Alan Cox : ip_forward sets the free flag on the
28
* new frame it queues. Still crap because
29
* it copies the frame but at least it
30
* doesn't eat memory too.
31
* Alan Cox : Generic queue code and memory fixes.
32
* Fred Van Kempen : IP fragment support (borrowed from NET2E)
33
* Gerhard Koerting: Forward fragmented frames correctly.
34
* Gerhard Koerting: Fixes to my fix of the above 8-).
35
* Gerhard Koerting: IP interface addressing fix.
36
* Linus Torvalds : More robustness checks
37
* Alan Cox : Even more checks: Still not as robust as it ought to be
38
* Alan Cox : Save IP header pointer for later
39
* Alan Cox : ip option setting
40
* Alan Cox : Use ip_tos/ip_ttl settings
41
* Alan Cox : Fragmentation bogosity removed
42
* (Thanks to [email protected])
43
* Dmitry Gorodchanin : Send of a raw packet crash fix.
44
* Alan Cox : Silly ip bug when an overlength
45
* fragment turns up. Now frees the
46
* queue.
47
* Linus Torvalds/ : Memory leakage on fragmentation
48
* Alan Cox : handling.
49
* Gerhard Koerting: Forwarding uses IP priority hints
50
* Teemu Rantanen : Fragment problems.
51
* Alan Cox : General cleanup, comments and reformat
52
* Alan Cox : SNMP statistics
53
* Alan Cox : BSD address rule semantics. Also see
54
* UDP as there is a nasty checksum issue
55
* if you do things the wrong way.
56
* Alan Cox : Always defrag, moved IP_FORWARD to the config.in file
57
* Alan Cox : IP options adjust sk->priority.
58
* Pedro Roque : Fix mtu/length error in ip_forward.
59
* Alan Cox : Avoid ip_chk_addr when possible.
60
* Richard Underwood : IP multicasting.
61
* Alan Cox : Cleaned up multicast handlers.
62
* Alan Cox : RAW sockets demultiplex in the BSD style.
63
* Gunther Mayer : Fix the SNMP reporting typo
64
* Alan Cox : Always in group 224.0.0.1
65
* Pauline Middelink : Fast ip_checksum update when forwarding
66
* Masquerading support.
67
* Alan Cox : Multicast loopback error for 224.0.0.1
68
* Alan Cox : IP_MULTICAST_LOOP option.
69
* Alan Cox : Use notifiers.
70
* Bjorn Ekwall : Removed ip_csum (from slhc.c too)
71
* Bjorn Ekwall : Moved ip_fast_csum to ip.h (inline!)
72
* Stefan Becker : Send out ICMP HOST REDIRECT
73
* Arnt Gulbrandsen : ip_build_xmit
74
* Alan Cox : Per socket routing cache
75
* Alan Cox : Fixed routing cache, added header cache.
76
* Alan Cox : Loopback didn't work right in original ip_build_xmit - fixed it.
77
* Alan Cox : Only send ICMP_REDIRECT if src/dest are the same net.
78
* Alan Cox : Incoming IP option handling.
79
* Alan Cox : Set saddr on raw output frames as per BSD.
80
* Alan Cox : Stopped broadcast source route explosions.
81
* Alan Cox : Can disable source routing
82
* Takeshi Sone : Masquerading didn't work.
83
* Dave Bonn,Alan Cox : Faster IP forwarding whenever possible.
84
* Alan Cox : Memory leaks, tramples, misc debugging.
85
* Alan Cox : Fixed multicast (by popular demand 8))
86
* Alan Cox : Fixed forwarding (by even more popular demand 8))
87
* Alan Cox : Fixed SNMP statistics [I think]
88
* Gerhard Koerting : IP fragmentation forwarding fix
89
* Alan Cox : Device lock against page fault.
90
* Alan Cox : IP_HDRINCL facility.
91
* Werner Almesberger : Zero fragment bug
92
* Alan Cox : RAW IP frame length bug
93
* Alan Cox : Outgoing firewall on build_xmit
94
* A.N.Kuznetsov : IP_OPTIONS support throughout the kernel
95
* Alan Cox : Multicast routing hooks
96
* Jos Vos : Do accounting *before* call_in_firewall
97
* Willy Konynenberg : Transparent proxying support
98
*
99
* To Fix:
100
* IP fragmentation wants rewriting cleanly. The RFC815 algorithm is much more efficient
101
* and could be made very efficient with the addition of some virtual memory hacks to permit
102
* the allocation of a buffer that can then be 'grown' by twiddling page tables.
103
* Output fragmentation wants updating along with the buffer management to use a single
104
* interleaved copy algorithm so that fragmenting has a one copy overhead. Actual packet
105
* output should probably do its own fragmentation at the UDP/RAW layer. TCP shouldn't cause
106
* fragmentation anyway.
107
*/
108
109
#define pr_fmt(fmt) "IPv4: " fmt
110
111
#include <linux/module.h>
112
#include <linux/types.h>
113
#include <linux/kernel.h>
114
#include <linux/string.h>
115
#include <linux/errno.h>
116
#include <linux/slab.h>
117
118
#include <linux/net.h>
119
#include <linux/socket.h>
120
#include <linux/sockios.h>
121
#include <linux/in.h>
122
#include <linux/inet.h>
123
#include <linux/inetdevice.h>
124
#include <linux/netdevice.h>
125
#include <linux/etherdevice.h>
126
#include <linux/indirect_call_wrapper.h>
127
128
#include <net/snmp.h>
129
#include <net/ip.h>
130
#include <net/protocol.h>
131
#include <net/route.h>
132
#include <linux/skbuff.h>
133
#include <net/sock.h>
134
#include <net/arp.h>
135
#include <net/icmp.h>
136
#include <net/raw.h>
137
#include <net/checksum.h>
138
#include <net/inet_ecn.h>
139
#include <linux/netfilter_ipv4.h>
140
#include <net/xfrm.h>
141
#include <linux/mroute.h>
142
#include <linux/netlink.h>
143
#include <net/dst_metadata.h>
144
145
/*
146
* Process Router Attention IP option (RFC 2113)
147
*/
148
bool ip_call_ra_chain(struct sk_buff *skb)
149
{
150
struct ip_ra_chain *ra;
151
u8 protocol = ip_hdr(skb)->protocol;
152
struct sock *last = NULL;
153
struct net_device *dev = skb->dev;
154
struct net *net = dev_net(dev);
155
156
for (ra = rcu_dereference(net->ipv4.ra_chain); ra; ra = rcu_dereference(ra->next)) {
157
struct sock *sk = ra->sk;
158
159
/* If socket is bound to an interface, only report
160
* the packet if it came from that interface.
161
*/
162
if (sk && inet_sk(sk)->inet_num == protocol &&
163
(!sk->sk_bound_dev_if ||
164
sk->sk_bound_dev_if == dev->ifindex)) {
165
if (ip_is_fragment(ip_hdr(skb))) {
166
if (ip_defrag(net, skb, IP_DEFRAG_CALL_RA_CHAIN))
167
return true;
168
}
169
if (last) {
170
struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
171
if (skb2)
172
raw_rcv(last, skb2);
173
}
174
last = sk;
175
}
176
}
177
178
if (last) {
179
raw_rcv(last, skb);
180
return true;
181
}
182
return false;
183
}
184
185
INDIRECT_CALLABLE_DECLARE(int udp_rcv(struct sk_buff *));
186
INDIRECT_CALLABLE_DECLARE(int tcp_v4_rcv(struct sk_buff *));
187
void ip_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int protocol)
188
{
189
const struct net_protocol *ipprot;
190
int raw, ret;
191
192
resubmit:
193
raw = raw_local_deliver(skb, protocol);
194
195
ipprot = rcu_dereference(inet_protos[protocol]);
196
if (ipprot) {
197
if (!ipprot->no_policy) {
198
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
199
kfree_skb_reason(skb,
200
SKB_DROP_REASON_XFRM_POLICY);
201
return;
202
}
203
nf_reset_ct(skb);
204
}
205
ret = INDIRECT_CALL_2(ipprot->handler, tcp_v4_rcv, udp_rcv,
206
skb);
207
if (ret < 0) {
208
protocol = -ret;
209
goto resubmit;
210
}
211
__IP_INC_STATS(net, IPSTATS_MIB_INDELIVERS);
212
} else {
213
if (!raw) {
214
if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
215
__IP_INC_STATS(net, IPSTATS_MIB_INUNKNOWNPROTOS);
216
icmp_send(skb, ICMP_DEST_UNREACH,
217
ICMP_PROT_UNREACH, 0);
218
}
219
kfree_skb_reason(skb, SKB_DROP_REASON_IP_NOPROTO);
220
} else {
221
__IP_INC_STATS(net, IPSTATS_MIB_INDELIVERS);
222
consume_skb(skb);
223
}
224
}
225
}
226
227
static int ip_local_deliver_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
228
{
229
if (unlikely(skb_orphan_frags_rx(skb, GFP_ATOMIC))) {
230
__IP_INC_STATS(net, IPSTATS_MIB_INDISCARDS);
231
kfree_skb_reason(skb, SKB_DROP_REASON_NOMEM);
232
return 0;
233
}
234
235
skb_clear_delivery_time(skb);
236
__skb_pull(skb, skb_network_header_len(skb));
237
238
rcu_read_lock();
239
ip_protocol_deliver_rcu(net, skb, ip_hdr(skb)->protocol);
240
rcu_read_unlock();
241
242
return 0;
243
}
244
245
/*
246
* Deliver IP Packets to the higher protocol layers.
247
*/
248
int ip_local_deliver(struct sk_buff *skb)
249
{
250
/*
251
* Reassemble IP fragments.
252
*/
253
struct net *net = dev_net(skb->dev);
254
255
if (ip_is_fragment(ip_hdr(skb))) {
256
if (ip_defrag(net, skb, IP_DEFRAG_LOCAL_DELIVER))
257
return 0;
258
}
259
260
return NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_IN,
261
net, NULL, skb, skb->dev, NULL,
262
ip_local_deliver_finish);
263
}
264
EXPORT_SYMBOL(ip_local_deliver);
265
266
static inline enum skb_drop_reason
267
ip_rcv_options(struct sk_buff *skb, struct net_device *dev)
268
{
269
const struct iphdr *iph;
270
struct ip_options *opt;
271
272
/* It looks as overkill, because not all
273
IP options require packet mangling.
274
But it is the easiest for now, especially taking
275
into account that combination of IP options
276
and running sniffer is extremely rare condition.
277
--ANK (980813)
278
*/
279
if (skb_cow(skb, skb_headroom(skb))) {
280
__IP_INC_STATS(dev_net(dev), IPSTATS_MIB_INDISCARDS);
281
return SKB_DROP_REASON_NOMEM;
282
}
283
284
iph = ip_hdr(skb);
285
opt = &(IPCB(skb)->opt);
286
opt->optlen = iph->ihl*4 - sizeof(struct iphdr);
287
288
if (ip_options_compile(dev_net(dev), opt, skb)) {
289
__IP_INC_STATS(dev_net(dev), IPSTATS_MIB_INHDRERRORS);
290
return SKB_DROP_REASON_IP_INHDR;
291
}
292
293
if (unlikely(opt->srr)) {
294
struct in_device *in_dev = __in_dev_get_rcu(dev);
295
296
if (in_dev) {
297
if (!IN_DEV_SOURCE_ROUTE(in_dev)) {
298
if (IN_DEV_LOG_MARTIANS(in_dev))
299
net_info_ratelimited("source route option %pI4 -> %pI4\n",
300
&iph->saddr,
301
&iph->daddr);
302
return SKB_DROP_REASON_NOT_SPECIFIED;
303
}
304
}
305
306
if (ip_options_rcv_srr(skb, dev))
307
return SKB_DROP_REASON_NOT_SPECIFIED;
308
}
309
310
return SKB_NOT_DROPPED_YET;
311
}
312
313
static bool ip_can_use_hint(const struct sk_buff *skb, const struct iphdr *iph,
314
const struct sk_buff *hint)
315
{
316
return hint && !skb_dst(skb) && ip_hdr(hint)->daddr == iph->daddr &&
317
ip_hdr(hint)->tos == iph->tos;
318
}
319
320
int tcp_v4_early_demux(struct sk_buff *skb);
321
enum skb_drop_reason udp_v4_early_demux(struct sk_buff *skb);
322
static int ip_rcv_finish_core(struct net *net,
323
struct sk_buff *skb, struct net_device *dev,
324
const struct sk_buff *hint)
325
{
326
const struct iphdr *iph = ip_hdr(skb);
327
struct rtable *rt;
328
int drop_reason;
329
330
if (ip_can_use_hint(skb, iph, hint)) {
331
drop_reason = ip_route_use_hint(skb, iph->daddr, iph->saddr,
332
ip4h_dscp(iph), dev, hint);
333
if (unlikely(drop_reason))
334
goto drop_error;
335
}
336
337
if (READ_ONCE(net->ipv4.sysctl_ip_early_demux) &&
338
!skb_dst(skb) &&
339
!skb->sk &&
340
!ip_is_fragment(iph)) {
341
switch (iph->protocol) {
342
case IPPROTO_TCP:
343
if (READ_ONCE(net->ipv4.sysctl_tcp_early_demux)) {
344
tcp_v4_early_demux(skb);
345
346
/* must reload iph, skb->head might have changed */
347
iph = ip_hdr(skb);
348
}
349
break;
350
case IPPROTO_UDP:
351
if (READ_ONCE(net->ipv4.sysctl_udp_early_demux)) {
352
drop_reason = udp_v4_early_demux(skb);
353
if (unlikely(drop_reason))
354
goto drop_error;
355
356
/* must reload iph, skb->head might have changed */
357
iph = ip_hdr(skb);
358
}
359
break;
360
}
361
}
362
363
/*
364
* Initialise the virtual path cache for the packet. It describes
365
* how the packet travels inside Linux networking.
366
*/
367
if (!skb_valid_dst(skb)) {
368
drop_reason = ip_route_input_noref(skb, iph->daddr, iph->saddr,
369
ip4h_dscp(iph), dev);
370
if (unlikely(drop_reason))
371
goto drop_error;
372
} else {
373
struct in_device *in_dev = __in_dev_get_rcu(dev);
374
375
if (in_dev && IN_DEV_ORCONF(in_dev, NOPOLICY))
376
IPCB(skb)->flags |= IPSKB_NOPOLICY;
377
}
378
379
#ifdef CONFIG_IP_ROUTE_CLASSID
380
if (unlikely(skb_dst(skb)->tclassid)) {
381
struct ip_rt_acct *st = this_cpu_ptr(ip_rt_acct);
382
u32 idx = skb_dst(skb)->tclassid;
383
st[idx&0xFF].o_packets++;
384
st[idx&0xFF].o_bytes += skb->len;
385
st[(idx>>16)&0xFF].i_packets++;
386
st[(idx>>16)&0xFF].i_bytes += skb->len;
387
}
388
#endif
389
390
if (iph->ihl > 5) {
391
drop_reason = ip_rcv_options(skb, dev);
392
if (drop_reason)
393
goto drop;
394
}
395
396
rt = skb_rtable(skb);
397
if (rt->rt_type == RTN_MULTICAST) {
398
__IP_UPD_PO_STATS(net, IPSTATS_MIB_INMCAST, skb->len);
399
} else if (rt->rt_type == RTN_BROADCAST) {
400
__IP_UPD_PO_STATS(net, IPSTATS_MIB_INBCAST, skb->len);
401
} else if (skb->pkt_type == PACKET_BROADCAST ||
402
skb->pkt_type == PACKET_MULTICAST) {
403
struct in_device *in_dev = __in_dev_get_rcu(dev);
404
405
/* RFC 1122 3.3.6:
406
*
407
* When a host sends a datagram to a link-layer broadcast
408
* address, the IP destination address MUST be a legal IP
409
* broadcast or IP multicast address.
410
*
411
* A host SHOULD silently discard a datagram that is received
412
* via a link-layer broadcast (see Section 2.4) but does not
413
* specify an IP multicast or broadcast destination address.
414
*
415
* This doesn't explicitly say L2 *broadcast*, but broadcast is
416
* in a way a form of multicast and the most common use case for
417
* this is 802.11 protecting against cross-station spoofing (the
418
* so-called "hole-196" attack) so do it for both.
419
*/
420
if (in_dev &&
421
IN_DEV_ORCONF(in_dev, DROP_UNICAST_IN_L2_MULTICAST)) {
422
drop_reason = SKB_DROP_REASON_UNICAST_IN_L2_MULTICAST;
423
goto drop;
424
}
425
}
426
427
return NET_RX_SUCCESS;
428
429
drop:
430
kfree_skb_reason(skb, drop_reason);
431
return NET_RX_DROP;
432
433
drop_error:
434
if (drop_reason == SKB_DROP_REASON_IP_RPFILTER)
435
__NET_INC_STATS(net, LINUX_MIB_IPRPFILTER);
436
goto drop;
437
}
438
439
static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
440
{
441
struct net_device *dev = skb->dev;
442
int ret;
443
444
/* if ingress device is enslaved to an L3 master device pass the
445
* skb to its handler for processing
446
*/
447
skb = l3mdev_ip_rcv(skb);
448
if (!skb)
449
return NET_RX_SUCCESS;
450
451
ret = ip_rcv_finish_core(net, skb, dev, NULL);
452
if (ret != NET_RX_DROP)
453
ret = dst_input(skb);
454
return ret;
455
}
456
457
/*
458
* Main IP Receive routine.
459
*/
460
static struct sk_buff *ip_rcv_core(struct sk_buff *skb, struct net *net)
461
{
462
const struct iphdr *iph;
463
int drop_reason;
464
u32 len;
465
466
/* When the interface is in promisc. mode, drop all the crap
467
* that it receives, do not try to analyse it.
468
*/
469
if (skb->pkt_type == PACKET_OTHERHOST) {
470
dev_core_stats_rx_otherhost_dropped_inc(skb->dev);
471
drop_reason = SKB_DROP_REASON_OTHERHOST;
472
goto drop;
473
}
474
475
__IP_UPD_PO_STATS(net, IPSTATS_MIB_IN, skb->len);
476
477
skb = skb_share_check(skb, GFP_ATOMIC);
478
if (!skb) {
479
__IP_INC_STATS(net, IPSTATS_MIB_INDISCARDS);
480
goto out;
481
}
482
483
drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
484
if (!pskb_may_pull(skb, sizeof(struct iphdr)))
485
goto inhdr_error;
486
487
iph = ip_hdr(skb);
488
489
/*
490
* RFC1122: 3.2.1.2 MUST silently discard any IP frame that fails the checksum.
491
*
492
* Is the datagram acceptable?
493
*
494
* 1. Length at least the size of an ip header
495
* 2. Version of 4
496
* 3. Checksums correctly. [Speed optimisation for later, skip loopback checksums]
497
* 4. Doesn't have a bogus length
498
*/
499
500
if (iph->ihl < 5 || iph->version != 4)
501
goto inhdr_error;
502
503
BUILD_BUG_ON(IPSTATS_MIB_ECT1PKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_ECT_1);
504
BUILD_BUG_ON(IPSTATS_MIB_ECT0PKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_ECT_0);
505
BUILD_BUG_ON(IPSTATS_MIB_CEPKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_CE);
506
__IP_ADD_STATS(net,
507
IPSTATS_MIB_NOECTPKTS + (iph->tos & INET_ECN_MASK),
508
max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs));
509
510
if (!pskb_may_pull(skb, iph->ihl*4))
511
goto inhdr_error;
512
513
iph = ip_hdr(skb);
514
515
if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
516
goto csum_error;
517
518
len = iph_totlen(skb, iph);
519
if (skb->len < len) {
520
drop_reason = SKB_DROP_REASON_PKT_TOO_SMALL;
521
__IP_INC_STATS(net, IPSTATS_MIB_INTRUNCATEDPKTS);
522
goto drop;
523
} else if (len < (iph->ihl*4))
524
goto inhdr_error;
525
526
/* Our transport medium may have padded the buffer out. Now we know it
527
* is IP we can trim to the true length of the frame.
528
* Note this now means skb->len holds ntohs(iph->tot_len).
529
*/
530
if (pskb_trim_rcsum(skb, len)) {
531
__IP_INC_STATS(net, IPSTATS_MIB_INDISCARDS);
532
goto drop;
533
}
534
535
iph = ip_hdr(skb);
536
skb->transport_header = skb->network_header + iph->ihl*4;
537
538
/* Remove any debris in the socket control block */
539
memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
540
IPCB(skb)->iif = skb->skb_iif;
541
542
/* Must drop socket now because of tproxy. */
543
if (!skb_sk_is_prefetched(skb))
544
skb_orphan(skb);
545
546
return skb;
547
548
csum_error:
549
drop_reason = SKB_DROP_REASON_IP_CSUM;
550
__IP_INC_STATS(net, IPSTATS_MIB_CSUMERRORS);
551
inhdr_error:
552
if (drop_reason == SKB_DROP_REASON_NOT_SPECIFIED)
553
drop_reason = SKB_DROP_REASON_IP_INHDR;
554
__IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS);
555
drop:
556
kfree_skb_reason(skb, drop_reason);
557
out:
558
return NULL;
559
}
560
561
/*
562
* IP receive entry point
563
*/
564
int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
565
struct net_device *orig_dev)
566
{
567
struct net *net = dev_net(dev);
568
569
skb = ip_rcv_core(skb, net);
570
if (skb == NULL)
571
return NET_RX_DROP;
572
573
return NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING,
574
net, NULL, skb, dev, NULL,
575
ip_rcv_finish);
576
}
577
578
static void ip_sublist_rcv_finish(struct list_head *head)
579
{
580
struct sk_buff *skb, *next;
581
582
list_for_each_entry_safe(skb, next, head, list) {
583
skb_list_del_init(skb);
584
dst_input(skb);
585
}
586
}
587
588
static struct sk_buff *ip_extract_route_hint(const struct net *net,
589
struct sk_buff *skb)
590
{
591
const struct iphdr *iph = ip_hdr(skb);
592
593
if (fib4_has_custom_rules(net) ||
594
ipv4_is_lbcast(iph->daddr) ||
595
ipv4_is_zeronet(iph->daddr) ||
596
IPCB(skb)->flags & IPSKB_MULTIPATH)
597
return NULL;
598
599
return skb;
600
}
601
602
static void ip_list_rcv_finish(struct net *net, struct list_head *head)
603
{
604
struct sk_buff *skb, *next, *hint = NULL;
605
struct dst_entry *curr_dst = NULL;
606
LIST_HEAD(sublist);
607
608
list_for_each_entry_safe(skb, next, head, list) {
609
struct net_device *dev = skb->dev;
610
struct dst_entry *dst;
611
612
skb_list_del_init(skb);
613
/* if ingress device is enslaved to an L3 master device pass the
614
* skb to its handler for processing
615
*/
616
skb = l3mdev_ip_rcv(skb);
617
if (!skb)
618
continue;
619
if (ip_rcv_finish_core(net, skb, dev, hint) == NET_RX_DROP)
620
continue;
621
622
dst = skb_dst(skb);
623
if (curr_dst != dst) {
624
hint = ip_extract_route_hint(net, skb);
625
626
/* dispatch old sublist */
627
if (!list_empty(&sublist))
628
ip_sublist_rcv_finish(&sublist);
629
/* start new sublist */
630
INIT_LIST_HEAD(&sublist);
631
curr_dst = dst;
632
}
633
list_add_tail(&skb->list, &sublist);
634
}
635
/* dispatch final sublist */
636
ip_sublist_rcv_finish(&sublist);
637
}
638
639
static void ip_sublist_rcv(struct list_head *head, struct net_device *dev,
640
struct net *net)
641
{
642
NF_HOOK_LIST(NFPROTO_IPV4, NF_INET_PRE_ROUTING, net, NULL,
643
head, dev, NULL, ip_rcv_finish);
644
ip_list_rcv_finish(net, head);
645
}
646
647
/* Receive a list of IP packets */
648
void ip_list_rcv(struct list_head *head, struct packet_type *pt,
649
struct net_device *orig_dev)
650
{
651
struct net_device *curr_dev = NULL;
652
struct net *curr_net = NULL;
653
struct sk_buff *skb, *next;
654
LIST_HEAD(sublist);
655
656
list_for_each_entry_safe(skb, next, head, list) {
657
struct net_device *dev = skb->dev;
658
struct net *net = dev_net(dev);
659
660
skb_list_del_init(skb);
661
skb = ip_rcv_core(skb, net);
662
if (skb == NULL)
663
continue;
664
665
if (curr_dev != dev || curr_net != net) {
666
/* dispatch old sublist */
667
if (!list_empty(&sublist))
668
ip_sublist_rcv(&sublist, curr_dev, curr_net);
669
/* start new sublist */
670
INIT_LIST_HEAD(&sublist);
671
curr_dev = dev;
672
curr_net = net;
673
}
674
list_add_tail(&skb->list, &sublist);
675
}
676
/* dispatch final sublist */
677
if (!list_empty(&sublist))
678
ip_sublist_rcv(&sublist, curr_dev, curr_net);
679
}
680
681