diff options
author | David S. Miller <davem@davemloft.net> | 2008-08-15 14:54:03 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2008-08-15 19:51:26 -0700 |
commit | 7447ef63cf2dfdc444f4c72ae13f604350b2e25f (patch) | |
tree | f329b96bbfb41f6e246d248ae7fca3beead7acc5 /drivers/net | |
parent | f22f8567cb0a530d8958d177e0f268783bd0d894 (diff) |
loopback: Remove rest of LOOPBACK_TSO code.
It hasn't been enabled for a long time and the generic GSO
engine is better documentation of what is expected of a
device implementing TSO.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/loopback.c | 62 |
1 files changed, 0 insertions, 62 deletions
diff --git a/drivers/net/loopback.c b/drivers/net/loopback.c index 46e87cc87251..489d53be2f5d 100644 --- a/drivers/net/loopback.c +++ b/drivers/net/loopback.c @@ -64,68 +64,6 @@ struct pcpu_lstats { unsigned long bytes; }; -/* KISS: just allocate small chunks and copy bits. - * - * So, in fact, this is documentation, explaining what we expect - * of largesending device modulo TCP checksum, which is ignored for loopback. - */ - -#ifdef LOOPBACK_TSO -static void emulate_large_send_offload(struct sk_buff *skb) -{ - struct iphdr *iph = ip_hdr(skb); - struct tcphdr *th = (struct tcphdr *)(skb_network_header(skb) + - (iph->ihl * 4)); - unsigned int doffset = (iph->ihl + th->doff) * 4; - unsigned int mtu = skb_shinfo(skb)->gso_size + doffset; - unsigned int offset = 0; - u32 seq = ntohl(th->seq); - u16 id = ntohs(iph->id); - - while (offset + doffset < skb->len) { - unsigned int frag_size = min(mtu, skb->len - offset) - doffset; - struct sk_buff *nskb = alloc_skb(mtu + 32, GFP_ATOMIC); - - if (!nskb) - break; - skb_reserve(nskb, 32); - skb_set_mac_header(nskb, -ETH_HLEN); - skb_reset_network_header(nskb); - iph = ip_hdr(nskb); - skb_copy_to_linear_data(nskb, skb_network_header(skb), - doffset); - if (skb_copy_bits(skb, - doffset + offset, - nskb->data + doffset, - frag_size)) - BUG(); - skb_put(nskb, doffset + frag_size); - nskb->ip_summed = CHECKSUM_UNNECESSARY; - nskb->dev = skb->dev; - nskb->priority = skb->priority; - nskb->protocol = skb->protocol; - nskb->dst = dst_clone(skb->dst); - memcpy(nskb->cb, skb->cb, sizeof(skb->cb)); - nskb->pkt_type = skb->pkt_type; - - th = (struct tcphdr *)(skb_network_header(nskb) + iph->ihl * 4); - iph->tot_len = htons(frag_size + doffset); - iph->id = htons(id); - iph->check = 0; - iph->check = ip_fast_csum((unsigned char *) iph, iph->ihl); - th->seq = htonl(seq); - if (offset + doffset + frag_size < skb->len) - th->fin = th->psh = 0; - netif_rx(nskb); - offset += frag_size; - seq += frag_size; - id++; - } - - dev_kfree_skb(skb); -} -#endif /* LOOPBACK_TSO */ - /* * The higher levels take care of making this non-reentrant (it's * called with bh's disabled). |