diff options
author | Linus Torvalds <torvalds@g5.osdl.org> | 2006-07-09 15:50:41 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-07-09 15:50:41 -0700 |
commit | 09075ef0fd585fb093bb9a6cd1240272114f89cf (patch) | |
tree | c01d2cc260a18df73f785bea4de1c1cfbcbbd16f | |
parent | c87fed1546bd00b42ee75f26c6b45393e4bf7559 (diff) | |
parent | 1b30dd359ebec22d035e8b145751319f63772ca1 (diff) |
Merge commit master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6 of HEAD
* HEAD:
[AX.25]: Use kzalloc
[ATM] net/atm/clip.c: fix PROC_FS=n compile
[PKT_SCHED]: act_api: Fix module leak while flushing actions
[NET]: Fix IPv4/DECnet routing rule dumping
[NET] gso: Fix up GSO packets with broken checksums
[NET] gso: Add skb_is_gso
[IRDA]: fix drivers/net/irda/ali-ircc.c:ali_ircc_init()
[ATM]: fix possible recursive locking in skb_migrate()
[ATM]: Typo in drivers/atm/Kconfig...
[TG3]: add amd8131 to "write reorder" chipsets
[NET]: Fix network device interface printk message priority
37 files changed, 237 insertions, 86 deletions
diff --git a/drivers/atm/Kconfig b/drivers/atm/Kconfig index 01a9f1cb7743..cfa5af883e13 100644 --- a/drivers/atm/Kconfig +++ b/drivers/atm/Kconfig @@ -398,7 +398,7 @@ config ATM_FORE200E_USE_TASKLET default n help This defers work to be done by the interrupt handler to a - tasklet instead of hanlding everything at interrupt time. This + tasklet instead of handling everything at interrupt time. This may improve the responsive of the host. config ATM_FORE200E_TX_RETRY diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index 64b6a72b4f6a..db73de0d2511 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c @@ -1639,7 +1639,7 @@ bnx2_tx_int(struct bnx2 *bp) skb = tx_buf->skb; #ifdef BCM_TSO /* partial BD completions possible with TSO packets */ - if (skb_shinfo(skb)->gso_size) { + if (skb_is_gso(skb)) { u16 last_idx, last_ring_idx; last_idx = sw_cons + diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c index 87f94d939ff8..61b3754f50ff 100644 --- a/drivers/net/chelsio/sge.c +++ b/drivers/net/chelsio/sge.c @@ -1417,7 +1417,7 @@ int t1_start_xmit(struct sk_buff *skb, struct net_device *dev) struct cpl_tx_pkt *cpl; #ifdef NETIF_F_TSO - if (skb_shinfo(skb)->gso_size) { + if (skb_is_gso(skb)) { int eth_type; struct cpl_tx_pkt_lso *hdr; diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c index f06b281c8f6e..6e7d31bacf4d 100644 --- a/drivers/net/e1000/e1000_main.c +++ b/drivers/net/e1000/e1000_main.c @@ -2524,7 +2524,7 @@ e1000_tso(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, uint8_t ipcss, ipcso, tucss, tucso, hdr_len; int err; - if (skb_shinfo(skb)->gso_size) { + if (skb_is_gso(skb)) { if (skb_header_cloned(skb)) { err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); if (err) @@ -2649,7 +2649,7 @@ e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring, * tso gets written back prematurely before the data is fully * DMA'd to the controller */ if (!skb->data_len && tx_ring->last_tx_tso && - !skb_shinfo(skb)->gso_size) { + !skb_is_gso(skb)) { tx_ring->last_tx_tso = 0; size -= 4; } @@ -2937,8 +2937,7 @@ e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev) #ifdef NETIF_F_TSO /* Controller Erratum workaround */ - if (!skb->data_len && tx_ring->last_tx_tso && - !skb_shinfo(skb)->gso_size) + if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb)) count++; #endif diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c index 037d870712ff..ad81ec68f887 100644 --- a/drivers/net/forcedeth.c +++ b/drivers/net/forcedeth.c @@ -1495,7 +1495,7 @@ static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev) np->tx_skbuff[nr] = skb; #ifdef NETIF_F_TSO - if (skb_shinfo(skb)->gso_size) + if (skb_is_gso(skb)) tx_flags_extra = NV_TX2_TSO | (skb_shinfo(skb)->gso_size << NV_TX2_TSO_SHIFT); else #endif diff --git a/drivers/net/irda/ali-ircc.c b/drivers/net/irda/ali-ircc.c index bf1fca5a3fa0..e3c8cd5eca67 100644 --- a/drivers/net/irda/ali-ircc.c +++ b/drivers/net/irda/ali-ircc.c @@ -146,7 +146,7 @@ static int __init ali_ircc_init(void) { ali_chip_t *chip; chipio_t info; - int ret = -ENODEV; + int ret; int cfg, cfg_base; int reg, revision; int i = 0; @@ -160,6 +160,7 @@ static int __init ali_ircc_init(void) return ret; } + ret = -ENODEV; /* Probe for all the ALi chipsets we know about */ for (chip= chips; chip->name; chip++, i++) diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c index b91e082483f6..7eb08d929139 100644 --- a/drivers/net/ixgb/ixgb_main.c +++ b/drivers/net/ixgb/ixgb_main.c @@ -1173,7 +1173,7 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb) uint16_t ipcse, tucse, mss; int err; - if(likely(skb_shinfo(skb)->gso_size)) { + if (likely(skb_is_gso(skb))) { if (skb_header_cloned(skb)) { err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); if (err) diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index 43fef7de8cb9..997cbce9af6e 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c @@ -139,7 +139,7 @@ static int loopback_xmit(struct sk_buff *skb, struct net_device *dev) #endif #ifdef LOOPBACK_TSO - if (skb_shinfo(skb)->gso_size) { + if (skb_is_gso(skb)) { BUG_ON(skb->protocol != htons(ETH_P_IP)); BUG_ON(skb->nh.iph->protocol != IPPROTO_TCP); diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c index f4c8fd373b9b..ee1de971a712 100644 --- a/drivers/net/myri10ge/myri10ge.c +++ b/drivers/net/myri10ge/myri10ge.c @@ -2116,7 +2116,7 @@ abort_linearize: } idx = (idx + 1) & tx->mask; } while (idx != last_idx); - if (skb_shinfo(skb)->gso_size) { + if (skb_is_gso(skb)) { printk(KERN_ERR "myri10ge: %s: TSO but wanted to linearize?!?!?\n", mgp->dev->name); diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c index 418f169a6a31..31093760aa1e 100644 --- a/drivers/net/sky2.c +++ b/drivers/net/sky2.c @@ -1159,7 +1159,7 @@ static unsigned tx_le_req(const struct sk_buff *skb) count = sizeof(dma_addr_t) / sizeof(u32); count += skb_shinfo(skb)->nr_frags * count; - if (skb_shinfo(skb)->gso_size) + if (skb_is_gso(skb)) ++count; if (skb->ip_summed == CHECKSUM_HW) diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index f645921aff8b..ce6f3be86da0 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c @@ -10078,6 +10078,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) static struct pci_device_id write_reorder_chipsets[] = { { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) }, + { PCI_DEVICE(PCI_VENDOR_ID_AMD, + PCI_DEVICE_ID_AMD_8131_BRIDGE) }, { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) }, { }, diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c index 063816f2b11e..4103c37172f9 100644 --- a/drivers/net/typhoon.c +++ b/drivers/net/typhoon.c @@ -805,7 +805,7 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev) * If problems develop with TSO, check this first. */ numDesc = skb_shinfo(skb)->nr_frags + 1; - if(skb_tso_size(skb)) + if (skb_is_gso(skb)) numDesc++; /* When checking for free space in the ring, we need to also @@ -845,7 +845,7 @@ typhoon_start_tx(struct sk_buff *skb, struct net_device *dev) TYPHOON_TX_PF_VLAN_TAG_SHIFT); } - if(skb_tso_size(skb)) { + if (skb_is_gso(skb)) { first_txd->processFlags |= TYPHOON_TX_PF_TCP_SEGMENT; first_txd->numDesc++; diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c index 8e8963f15731..329e12c1fae4 100644 --- a/drivers/s390/net/qeth_main.c +++ b/drivers/s390/net/qeth_main.c @@ -4457,7 +4457,7 @@ qeth_send_packet(struct qeth_card *card, struct sk_buff *skb) queue = card->qdio.out_qs [qeth_get_priority_queue(card, skb, ipv, cast_type)]; - if (skb_shinfo(skb)->gso_size) + if (skb_is_gso(skb)) large_send = card->options.large_send; /*are we able to do TSO ? If so ,prepare and send it from here */ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 85f99f60deea..76cc099c8580 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -549,6 +549,7 @@ struct packet_type { struct net_device *); struct sk_buff *(*gso_segment)(struct sk_buff *skb, int features); + int (*gso_send_check)(struct sk_buff *skb); void *af_packet_priv; struct list_head list; }; @@ -1001,13 +1002,14 @@ static inline int net_gso_ok(int features, int gso_type) static inline int skb_gso_ok(struct sk_buff *skb, int features) { - return net_gso_ok(features, skb_shinfo(skb)->gso_size ? - skb_shinfo(skb)->gso_type : 0); + return net_gso_ok(features, skb_shinfo(skb)->gso_type); } static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb) { - return !skb_gso_ok(skb, dev->features); + return skb_is_gso(skb) && + (!skb_gso_ok(skb, dev->features) || + unlikely(skb->ip_summed != CHECKSUM_HW)); } #endif /* __KERNEL__ */ diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 3597b4f14389..0bf31b83578c 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1455,5 +1455,10 @@ static inline void skb_init_secmark(struct sk_buff *skb) { } #endif +static inline int skb_is_gso(const struct sk_buff *skb) +{ + return skb_shinfo(skb)->gso_size; +} + #endif /* __KERNEL__ */ #endif /* _LINUX_SKBUFF_H */ diff --git a/include/net/protocol.h b/include/net/protocol.h index a225d6371cb1..c643bce64e55 100644 --- a/include/net/protocol.h +++ b/include/net/protocol.h @@ -36,6 +36,7 @@ struct net_protocol { int (*handler)(struct sk_buff *skb); void (*err_handler)(struct sk_buff *skb, u32 info); + int (*gso_send_check)(struct sk_buff *skb); struct sk_buff *(*gso_segment)(struct sk_buff *skb, int features); int no_policy; @@ -51,6 +52,7 @@ struct inet6_protocol int type, int code, int offset, __u32 info); + int (*gso_send_check)(struct sk_buff *skb); struct sk_buff *(*gso_segment)(struct sk_buff *skb, int features); diff --git a/include/net/tcp.h b/include/net/tcp.h index 3cd803b0d7a5..0720bddff1e9 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -1086,6 +1086,7 @@ extern struct request_sock_ops tcp_request_sock_ops; extern int tcp_v4_destroy_sock(struct sock *sk); +extern int tcp_v4_gso_send_check(struct sk_buff *skb); extern struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features); #ifdef CONFIG_PROC_FS diff --git a/net/atm/clip.c b/net/atm/clip.c index 121bf6f49148..2e62105d91bd 100644 --- a/net/atm/clip.c +++ b/net/atm/clip.c @@ -962,7 +962,6 @@ static struct file_operations arp_seq_fops = { static int __init atm_clip_init(void) { - struct proc_dir_entry *p; neigh_table_init_no_netlink(&clip_tbl); clip_tbl_hook = &clip_tbl; @@ -972,9 +971,15 @@ static int __init atm_clip_init(void) setup_timer(&idle_timer, idle_timer_check, 0); - p = create_proc_entry("arp", S_IRUGO, atm_proc_root); - if (p) - p->proc_fops = &arp_seq_fops; +#ifdef CONFIG_PROC_FS + { + struct proc_dir_entry *p; + + p = create_proc_entry("arp", S_IRUGO, atm_proc_root); + if (p) + p->proc_fops = &arp_seq_fops; + } +#endif return 0; } diff --git a/net/atm/ipcommon.c b/net/atm/ipcommon.c index 4b1faca5013f..1d3de42fada0 100644 --- a/net/atm/ipcommon.c +++ b/net/atm/ipcommon.c @@ -25,22 +25,27 @@ /* * skb_migrate appends the list at "from" to "to", emptying "from" in the * process. skb_migrate is atomic with respect to all other skb operations on - * "from" and "to". Note that it locks both lists at the same time, so beware - * of potential deadlocks. + * "from" and "to". Note that it locks both lists at the same time, so to deal + * with the lock ordering, the locks are taken in address order. * * This function should live in skbuff.c or skbuff.h. */ -void skb_migrate(struct sk_buff_head *from,struct sk_buff_head *to) +void skb_migrate(struct sk_buff_head *from, struct sk_buff_head *to) { unsigned long flags; struct sk_buff *skb_from = (struct sk_buff *) from; struct sk_buff *skb_to = (struct sk_buff *) to; struct sk_buff *prev; - spin_lock_irqsave(&from->lock,flags); - spin_lock(&to->lock); + if ((unsigned long) from < (unsigned long) to) { + spin_lock_irqsave(&from->lock, flags); + spin_lock_nested(&to->lock, SINGLE_DEPTH_NESTING); + } else { + spin_lock_irqsave(&to->lock, flags); + spin_lock_nested(&from->lock, SINGLE_DEPTH_NESTING); + } prev = from->prev; from->next->prev = to->prev; prev->next = skb_to; @@ -51,7 +56,7 @@ void skb_migrate(struct sk_buff_head *from,struct sk_buff_head *to) from->prev = skb_from; from->next = skb_from; from->qlen = 0; - spin_unlock_irqrestore(&from->lock,flags); + spin_unlock_irqrestore(&from->lock, flags); } diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index 10a3c0aa8398..f12be2acf9bc 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -486,10 +486,9 @@ ax25_cb *ax25_create_cb(void) { ax25_cb *ax25; - if ((ax25 = kmalloc(sizeof(*ax25), GFP_ATOMIC)) == NULL) + if ((ax25 = kzalloc(sizeof(*ax25), GFP_ATOMIC)) == NULL) return NULL; - memset(ax25, 0x00, sizeof(*ax25)); atomic_set(&ax25->refcount, 1); skb_queue_head_init(&ax25->write_queue); diff --git a/net/ax25/ax25_dev.c b/net/ax25/ax25_dev.c index 47e6e790bd67..b787678220ff 100644 --- a/net/ax25/ax25_dev.c +++ b/net/ax25/ax25_dev.c @@ -55,15 +55,13 @@ void ax25_dev_device_up(struct net_device *dev) { ax25_dev *ax25_dev; - if ((ax25_dev = kmalloc(sizeof(*ax25_dev), GFP_ATOMIC)) == NULL) { + if ((ax25_dev = kzalloc(sizeof(*ax25_dev), GFP_ATOMIC)) == NULL) { printk(KERN_ERR "AX.25: ax25_dev_device_up - out of memory\n"); return; } ax25_unregister_sysctl(); - memset(ax25_dev, 0x00, sizeof(*ax25_dev)); - dev->ax25_ptr = ax25_dev; ax25_dev->dev = dev; dev_hold(dev); diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c index 8be9f2123e54..6ccd32b30809 100644 --- a/net/bridge/br_forward.c +++ b/net/bridge/br_forward.c @@ -35,7 +35,7 @@ static inline unsigned packet_length(const struct sk_buff *skb) int br_dev_queue_push_xmit(struct sk_buff *skb) { /* drop mtu oversized packets except gso */ - if (packet_length(skb) > skb->dev->mtu && !skb_shinfo(skb)->gso_size) + if (packet_length(skb) > skb->dev->mtu && !skb_is_gso(skb)) kfree_skb(skb); else { #ifdef CONFIG_BRIDGE_NETFILTER diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c index 8298a5179aef..cbc8a389a0a8 100644 --- a/net/bridge/br_netfilter.c +++ b/net/bridge/br_netfilter.c @@ -761,7 +761,7 @@ static int br_nf_dev_queue_xmit(struct sk_buff *skb) { if (skb->protocol == htons(ETH_P_IP) && skb->len > skb->dev->mtu && - !skb_shinfo(skb)->gso_size) + !skb_is_gso(skb)) return ip_fragment(skb, br_dev_queue_push_xmit); else return br_dev_queue_push_xmit(skb); diff --git a/net/core/dev.c b/net/core/dev.c index 066a60a75280..4d2b5167d7f5 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1162,9 +1162,17 @@ int skb_checksum_help(struct sk_buff *skb, int inward) unsigned int csum; int ret = 0, offset = skb->h.raw - skb->data; - if (inward) { - skb->ip_summed = CHECKSUM_NONE; - goto out; + if (inward) + goto out_set_summed; + + if (unlikely(skb_shinfo(skb)->gso_size)) { + static int warned; + + WARN_ON(!warned); + warned = 1; + + /* Let GSO fix up the checksum. */ + goto out_set_summed; } if (skb_cloned(skb)) { @@ -1181,6 +1189,8 @@ int skb_checksum_help(struct sk_buff *skb, int inward) BUG_ON(skb->csum + 2 > offset); *(u16*)(skb->h.raw + skb->csum) = csum_fold(csum); + +out_set_summed: skb->ip_summed = CHECKSUM_NONE; out: return ret; @@ -1201,17 +1211,35 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features) struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT); struct packet_type *ptype; int type = skb->protocol; + int err; BUG_ON(skb_shinfo(skb)->frag_list); - BUG_ON(skb->ip_summed != CHECKSUM_HW); skb->mac.raw = skb->data; skb->mac_len = skb->nh.raw - skb->data; __skb_pull(skb, skb->mac_len); + if (unlikely(skb->ip_summed != CHECKSUM_HW)) { + static int warned; + + WARN_ON(!warned); + warned = 1; + + if (skb_header_cloned(skb) && + (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) + return ERR_PTR(err); + } + rcu_read_lock(); list_for_each_entry_rcu(ptype, &ptype_base[ntohs(type) & 15], list) { if (ptype->type == type && !ptype->dev && ptype->gso_segment) { + if (unlikely(skb->ip_summed != CHECKSUM_HW)) { + err = ptype->gso_send_check(skb); + segs = ERR_PTR(err); + if (err || skb_gso_ok(skb, features)) + break; + __skb_push(skb, skb->data - skb->nh.raw); + } segs = ptype->gso_segment(skb, features); break; } @@ -1727,7 +1755,7 @@ static int ing_filter(struct sk_buff *skb) if (dev->qdisc_ingress) { __u32 ttl = (__u32) G_TC_RTTL(skb->tc_verd); if (MAX_RED_LOOP < ttl++) { - printk("Redir loop detected Dropping packet (%s->%s)\n", + printk(KERN_WARNING "Redir loop detected Dropping packet (%s->%s)\n", skb->input_dev->name, skb->dev->name); return TC_ACT_SHOT; } @@ -2922,7 +2950,7 @@ int register_netdevice(struct net_device *dev) /* Fix illegal SG+CSUM combinations. */ if ((dev->features & NETIF_F_SG) && !(dev->features & NETIF_F_ALL_CSUM)) { - printk("%s: Dropping NETIF_F_SG since no checksum feature.\n", + printk(KERN_NOTICE "%s: Dropping NETIF_F_SG since no checksum feature.\n", dev->name); dev->features &= ~NETIF_F_SG; } @@ -2930,7 +2958,7 @@ int register_netdevice(struct net_device *dev) /* TSO requires that SG is present as well. */ if ((dev->features & NETIF_F_TSO) && !(dev->features & NETIF_F_SG)) { - printk("%s: Dropping NETIF_F_TSO since no SG feature.\n", + printk(KERN_NOTICE "%s: Dropping NETIF_F_TSO since no SG feature.\n", dev->name); dev->features &= ~NETIF_F_TSO; } diff --git a/net/decnet/dn_rules.c b/net/decnet/dn_rules.c index 06e785fe5757..22f321d9bf9d 100644 --- a/net/decnet/dn_rules.c +++ b/net/decnet/dn_rules.c @@ -399,9 +399,10 @@ int dn_fib_dump_rules(struct sk_buff *skb, struct netlink_callback *cb) rcu_read_lock(); hlist_for_each_entry(r, node, &dn_fib_rules, r_hlist) { if (idx < s_idx) - continue; + goto next; if (dn_fib_fill_rule(skb, r, cb, NLM_F_MULTI) < 0) break; +next: idx++; } rcu_read_unlock(); diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 318d4674faa1..c84a32070f8d 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1097,6 +1097,40 @@ int inet_sk_rebuild_header(struct sock *sk) EXPORT_SYMBOL(inet_sk_rebuild_header); +static int inet_gso_send_check(struct sk_buff *skb) +{ + struct iphdr *iph; + struct net_protocol *ops; + int proto; + int ihl; + int err = -EINVAL; + + if (unlikely(!pskb_may_pull(skb, sizeof(*iph)))) + goto out; + + iph = skb->nh.iph; + ihl = iph->ihl * 4; + if (ihl < sizeof(*iph)) + goto out; + + if (unlikely(!pskb_may_pull(skb, ihl))) + goto out; + + skb->h.raw = __skb_pull(skb, ihl); + iph = skb->nh.iph; + proto = iph->protocol & (MAX_INET_PROTOS - 1); + err = -EPROTONOSUPPORT; + + rcu_read_lock(); + ops = rcu_dereference(inet_protos[proto]); + if (likely(ops && ops->gso_send_check)) + err = ops->gso_send_check(skb); + rcu_read_unlock(); + +out: + return err; +} + static struct sk_buff *inet_gso_segment(struct sk_buff *skb, int features) { struct sk_buff *segs = ERR_PTR(-EINVAL); @@ -1162,6 +1196,7 @@ static struct net_protocol igmp_protocol = { static struct net_protocol tcp_protocol = { .handler = tcp_v4_rcv, .err_handler = tcp_v4_err, + .gso_send_check = tcp_v4_gso_send_check, .gso_segment = tcp_tso_segment, .no_policy = 1, }; @@ -1208,6 +1243,7 @@ static int ipv4_proc_init(void); static struct packet_type ip_packet_type = { .type = __constant_htons(ETH_P_IP), .func = ip_rcv, + .gso_send_check = inet_gso_send_check, .gso_segment = inet_gso_segment, }; diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c index 6c642d11d4ca..773b12ba4e3c 100644 --- a/net/ipv4/fib_rules.c +++ b/net/ipv4/fib_rules.c @@ -457,13 +457,13 @@ int inet_dump_rules(struct sk_buff *skb, struct netlink_callback *cb) rcu_read_lock(); hlist_for_each_entry(r, node, &fib_rules, hlist) { - if (idx < s_idx) - continue; + goto next; if (inet_fill_rule(skb, r, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, RTM_NEWRULE, NLM_F_MULTI) < 0) break; +next: idx++; } rcu_read_unlock(); diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index ca0e714613fb..7c9f9a6421b8 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -209,7 +209,7 @@ static inline int ip_finish_output(struct sk_buff *skb) return dst_output(skb); } #endif - if (skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->gso_size) + if (skb->len > dst_mtu(skb->dst) && !skb_is_gso(skb)) return ip_fragment(skb, ip_finish_output2); else return ip_finish_output2(skb); @@ -1095,7 +1095,7 @@ ssize_t ip_append_page(struct sock *sk, struct page *page, while (size > 0) { int i; - if (skb_shinfo(skb)->gso_size) + if (skb_is_gso(skb)) len = size; else { diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 5a886e6efbbe..a891133f00e4 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -496,6 +496,24 @@ void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb) } } +int tcp_v4_gso_send_check(struct sk_buff *skb) +{ + struct iphdr *iph; + struct tcphdr *th; + + if (!pskb_may_pull(skb, sizeof(*th))) + return -EINVAL; + + iph = skb->nh.iph; + th = skb->h.th; + + th->check = 0; + th->check = ~tcp_v4_check(th, skb->len, iph->saddr, iph->daddr, 0); + skb->csum = offsetof(struct tcphdr, check); + skb->ip_summed = CHECKSUM_HW; + return 0; +} + /* * This routine will send an RST to the other tcp. * diff --git a/net/ipv4/xfrm4_output.c b/net/ipv4/xfrm4_output.c index 193363e22932..d16f863cf687 100644 --- a/net/ipv4/xfrm4_output.c +++ b/net/ipv4/xfrm4_output.c @@ -134,7 +134,7 @@ static int xfrm4_output_finish(struct sk_buff *skb) } #endif - if (!skb_shinfo(skb)->gso_size) + if (!skb_is_gso(skb)) return xfrm4_output_finish2(skb); skb->protocol = htons(ETH_P_IP); diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 2c5b44575af0..3bc74ce78800 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -147,7 +147,7 @@ static int ip6_output2(struct sk_buff *skb) int ip6_output(struct sk_buff *skb) { - if ((skb->len > dst_mtu(skb->dst) && !skb_shinfo(skb)->gso_size) || + if ((skb->len > dst_mtu(skb->dst) && !skb_is_gso(skb)) || dst_allfrag(skb->dst)) return ip6_fragment(skb, ip6_output2); else @@ -229,7 +229,7 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl, skb->priority = sk->sk_priority; mtu = dst_mtu(dst); - if ((skb->len <= mtu) || ipfragok || skb_shinfo(skb)->gso_size) { + if ((skb->len <= mtu) || ipfragok || skb_is_gso(skb)) { IP6_INC_STATS(IPSTATS_MIB_OUTREQUESTS); return NF_HOOK(PF_INET6, NF_IP6_LOCAL_OUT, skb, NULL, dst->dev, dst_output); diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 0c17dec11c8d..43327264e69c 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -57,29 +57,11 @@ DEFINE_SNMP_STAT(struct ipstats_mib, ipv6_statistics) __read_mostly; -static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features) +static struct inet6_protocol *ipv6_gso_pull_exthdrs(struct sk_buff *skb, + int proto) { - struct sk_buff *segs = ERR_PTR(-EINVAL); - struct ipv6hdr *ipv6h; - struct inet6_protocol *ops; - int proto; + struct inet6_protocol *ops = NULL; - if (unlikely(skb_shinfo(skb)->gso_type & - ~(SKB_GSO_UDP | - SKB_GSO_DODGY | - SKB_GSO_TCP_ECN | - SKB_GSO_TCPV6 | - 0))) - goto out; - - if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h)))) - goto out; - - ipv6h = skb->nh.ipv6h; - proto = ipv6h->nexthdr; - __skb_pull(skb, sizeof(*ipv6h)); - - rcu_read_lock(); for (;;) { struct ipv6_opt_hdr *opth; int len; @@ -88,30 +70,80 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features) ops = rcu_dereference(inet6_protos[proto]); if (unlikely(!ops)) - goto unlock; + break; if (!(ops->flags & INET6_PROTO_GSO_EXTHDR)) break; } if (unlikely(!pskb_may_pull(skb, 8))) - goto unlock; + break; opth = (void *)skb->data; len = opth->hdrlen * 8 + 8; if (unlikely(!pskb_may_pull(skb, len))) - goto unlock; + break; proto = opth->nexthdr; __skb_pull(skb, len); } - skb->h.raw = skb->data; - if (likely(ops->gso_segment)) - segs = ops->gso_segment(skb, features); + return ops; +} + +static int ipv6_gso_send_check(struct sk_buff *skb) +{ + struct ipv6hdr *ipv6h; + struct inet6_protocol *ops; + int err = -EINVAL; + + if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h)))) + goto out; -unlock: + ipv6h = skb->nh.ipv6h; + __skb_pull(skb, sizeof(*ipv6h)); + err = -EPROTONOSUPPORT; + + rcu_read_lock(); + ops = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr); + if (likely(ops && ops->gso_send_check)) { + skb->h.raw = skb->data; + err = ops->gso_send_check(skb); + } + rcu_read_unlock(); + +out: + return err; +} + +static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb, int features) +{ + struct sk_buff *segs = ERR_PTR(-EINVAL); + struct ipv6hdr *ipv6h; + struct inet6_protocol *ops; + + if (unlikely(skb_shinfo(skb)->gso_type & + ~(SKB_GSO_UDP | + SKB_GSO_DODGY | + SKB_GSO_TCP_ECN | + SKB_GSO_TCPV6 | + 0))) + goto out; + + if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h)))) + goto out; + + ipv6h = skb->nh.ipv6h; + __skb_pull(skb, sizeof(*ipv6h)); + segs = ERR_PTR(-EPROTONOSUPPORT); + + rcu_read_lock(); + ops = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr); + if (likely(ops && ops->gso_segment)) { + skb->h.raw = skb->data; + segs = ops->gso_segment(skb, features); + } rcu_read_unlock(); if (unlikely(IS_ERR(segs))) @@ -130,6 +162,7 @@ out: static struct packet_type ipv6_packet_type = { .type = __constant_htons(ETH_P_IPV6), .func = ipv6_rcv, + .gso_send_check = ipv6_gso_send_check, .gso_segment = ipv6_gso_segment, }; diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 5bdcb9002cf7..923989d0520d 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -552,6 +552,24 @@ static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb) } } +static int tcp_v6_gso_send_check(struct sk_buff *skb) +{ + struct ipv6hdr *ipv6h; + struct tcphdr *th; + + if (!pskb_may_pull(skb, sizeof(*th))) + return -EINVAL; + + ipv6h = skb->nh.ipv6h; + th = skb->h.th; + + th->check = 0; + th->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, skb->len, + IPPROTO_TCP, 0); + skb->csum = offsetof(struct tcphdr, check); + skb->ip_summed = CHECKSUM_HW; + return 0; +} static void tcp_v6_send_reset(struct sk_buff *skb) { @@ -1603,6 +1621,7 @@ struct proto tcpv6_prot = { static struct inet6_protocol tcpv6_protocol = { .handler = tcp_v6_rcv, .err_handler = tcp_v6_err, + .gso_send_check = tcp_v6_gso_send_check, .gso_segment = tcp_tso_segment, .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL, }; diff --git a/net/ipv6/xfrm6_output.c b/net/ipv6/xfrm6_output.c index 48fccb1eca08..0eea60ea9ebc 100644 --- a/net/ipv6/xfrm6_output.c +++ b/net/ipv6/xfrm6_output.c @@ -122,7 +122,7 @@ static int xfrm6_output_finish(struct sk_buff *skb) { struct sk_buff *segs; - if (!skb_shinfo(skb)->gso_size) + if (!skb_is_gso(skb)) return xfrm6_output_finish2(skb); skb->protocol = htons(ETH_P_IP); diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c index 389a4119e1b4..ecc796878f38 100644 --- a/net/netrom/af_netrom.c +++ b/net/netrom/af_netrom.c @@ -1382,14 +1382,12 @@ static int __init nr_proto_init(void) return -1; } - dev_nr = kmalloc(nr_ndevs * sizeof(struct net_device *), GFP_KERNEL); + dev_nr = kzalloc(nr_ndevs * sizeof(struct net_device *), GFP_KERNEL); if (dev_nr == NULL) { printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate device array\n"); return -1; } - memset(dev_nr, 0x00, nr_ndevs * sizeof(struct net_device *)); - for (i = 0; i < nr_ndevs; i++) { char name[IFNAMSIZ]; struct net_device *dev; diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index d0a67bb31363..c115295ab431 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c @@ -1490,14 +1490,13 @@ static int __init rose_proto_init(void) rose_callsign = null_ax25_address; - dev_rose = kmalloc(rose_ndevs * sizeof(struct net_device *), GFP_KERNEL); + dev_rose = kzalloc(rose_ndevs * sizeof(struct net_device *), GFP_KERNEL); if (dev_rose == NULL) { printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate device structure\n"); rc = -ENOMEM; goto out_proto_unregister; } - memset(dev_rose, 0x00, rose_ndevs * sizeof(struct net_device*)); for (i = 0; i < rose_ndevs; i++) { struct net_device *dev; char name[IFNAMSIZ]; diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 599423cc9d0d..0972247a839c 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -602,8 +602,8 @@ static int tca_action_flush(struct rtattr *rta, struct nlmsghdr *n, u32 pid) return err; rtattr_failure: - module_put(a->ops->owner); nlmsg_failure: + module_put(a->ops->owner); err_out: kfree_skb(skb); kfree(a); |