Import 2.3.18pre1
[davej-history.git] / net / ipv4 / tcp_output.c
dissimilarity index 69%
index 2d278aa..77f8b98 100644 (file)
-/*
- * INET                An implementation of the TCP/IP protocol suite for the LINUX
- *             operating system.  INET is implemented using the  BSD Socket
- *             interface as the means of communication with the user level.
- *
- *             Implementation of the Transmission Control Protocol(TCP).
- *
- * Version:    $Id: tcp_output.c,v 1.63 1998/03/13 14:15:55 davem Exp $
- *
- * Authors:    Ross Biro, <bir7@leland.Stanford.Edu>
- *             Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
- *             Mark Evans, <evansmp@uhura.aston.ac.uk>
- *             Corey Minyard <wf-rch!minyard@relay.EU.net>
- *             Florian La Roche, <flla@stud.uni-sb.de>
- *             Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
- *             Linus Torvalds, <torvalds@cs.helsinki.fi>
- *             Alan Cox, <gw4pts@gw4pts.ampr.org>
- *             Matthew Dillon, <dillon@apollo.west.oic.com>
- *             Arnt Gulbrandsen, <agulbra@nvg.unit.no>
- *             Jorge Cwik, <jorge@laser.satlink.net>
- */
-
-/*
- * Changes:    Pedro Roque     :       Retransmit queue handled by TCP.
- *                             :       Fragmentation on mtu decrease
- *                             :       Segment collapse on retransmit
- *                             :       AF independence
- *
- *             Linus Torvalds  :       send_delayed_ack
- *             David S. Miller :       Charge memory using the right skb
- *                                     during syn/ack processing.
- *
- */
-
-#include <net/tcp.h>
-
-extern int sysctl_tcp_timestamps;
-extern int sysctl_tcp_window_scaling;
-
-/* Get rid of any delayed acks, we sent one already.. */
-static __inline__ void clear_delayed_acks(struct sock * sk)
-{
-       struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
-
-       tp->delayed_acks = 0;
-       if(tcp_in_quickack_mode(tp))
-               tp->ato = ((HZ/100)*2);
-       tcp_clear_xmit_timer(sk, TIME_DACK);
-}
-
-static __inline__ void update_send_head(struct sock *sk)
-{
-       struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
-       
-       tp->send_head = tp->send_head->next;
-       if (tp->send_head == (struct sk_buff *) &sk->write_queue)
-               tp->send_head = NULL;
-}
-
-/*
- *     This is the main buffer sending routine. We queue the buffer
- *     having checked it is sane seeming.
- */
-void tcp_send_skb(struct sock *sk, struct sk_buff *skb, int force_queue)
-{
-       struct tcphdr *th = skb->h.th;
-       struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
-       int size;
-
-       /* Length of packet (not counting length of pre-tcp headers). */
-       size = skb->len - ((unsigned char *) th - skb->data);
-
-       /* If there is a FIN or a SYN we add it onto the size. */
-       if (th->fin || th->syn) {
-               if(th->syn)
-                       size++;
-               if(th->fin)
-                       size++;
-       }
-
-       /* Actual processing. */
-       skb->seq = ntohl(th->seq);
-       skb->end_seq = skb->seq + size - 4*th->doff;
-
-       skb_queue_tail(&sk->write_queue, skb);
-
-       if (!force_queue && tp->send_head == NULL && tcp_snd_test(sk, skb)) {
-               struct sk_buff * buff;
-
-               /* This is going straight out. */
-               tp->last_ack_sent = tp->rcv_nxt;
-               th->ack_seq = htonl(tp->rcv_nxt);
-               th->window = htons(tcp_select_window(sk));
-               tcp_update_options((__u32 *)(th + 1),tp);
-
-               tp->af_specific->send_check(sk, th, size, skb);
-
-               buff = skb_clone(skb, GFP_KERNEL);
-               if (buff == NULL)
-                       goto queue;
-               
-               clear_delayed_acks(sk);
-               skb_set_owner_w(buff, sk);
-
-               tp->snd_nxt = skb->end_seq;
-               tp->packets_out++;
-
-               skb->when = jiffies;
-
-               tcp_statistics.TcpOutSegs++;
-               tp->af_specific->queue_xmit(buff);
-
-               if (!tcp_timer_is_set(sk, TIME_RETRANS))
-                       tcp_reset_xmit_timer(sk, TIME_RETRANS, tp->rto);
-
-               return;
-       }
-
-queue:
-       /* Remember where we must start sending. */
-       if (tp->send_head == NULL)
-               tp->send_head = skb;
-       if (!force_queue && tp->packets_out == 0 && !tp->pending) {
-               tp->pending = TIME_PROBE0;
-               tcp_reset_xmit_timer(sk, TIME_PROBE0, tp->rto);
-       }
-}
-
-/*
- *     Function to create two new tcp segments.
- *     Shrinks the given segment to the specified size and appends a new
- *     segment with the rest of the packet to the list.
- *     This won't be called frenquently, I hope... 
- */
-
-static int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len)
-{
-       struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
-       struct sk_buff *buff;
-       struct tcphdr *th, *nth;        
-       int nsize;
-       int tmp;
-
-       th = skb->h.th;
-
-       /* Size of new segment. */
-       nsize = skb->tail - ((unsigned char *)(th)+tp->tcp_header_len) - len;
-       if (nsize <= 0) {
-               printk(KERN_DEBUG "tcp_fragment: bug size <= 0\n");
-               return -1;
-       }
-
-       /* Get a new skb... force flag on. */
-       buff = sock_wmalloc(sk, nsize + 128 + sk->prot->max_header + 15, 1, 
-                           GFP_ATOMIC);
-       if (buff == NULL)
-               return -1;
-
-       /* Put headers on the new packet. */
-       tmp = tp->af_specific->build_net_header(sk, buff);
-       if (tmp < 0) {
-               kfree_skb(buff);
-               return -1;
-       }
-               
-       /* Move the TCP header over. */
-       nth = (struct tcphdr *) skb_put(buff, tp->tcp_header_len);
-       buff->h.th = nth;
-       memcpy(nth, th, tp->tcp_header_len);
-
-       /* Correct the new header. */
-       buff->seq = skb->seq + len;
-       buff->end_seq = skb->end_seq;
-       nth->seq = htonl(buff->seq);
-       nth->check = 0;
-       nth->doff  = th->doff;
-       
-       /* urg data is always an headache */
-       if (th->urg) {
-               if (th->urg_ptr > len) {
-                       th->urg = 0;
-                       nth->urg_ptr -= len;
-               } else {
-                       nth->urg = 0;
-               }
-       }
-
-       /* Copy data tail to our new buffer. */
-       buff->csum = csum_partial_copy(((u8 *)(th)+tp->tcp_header_len) + len,
-                                      skb_put(buff, nsize),
-                                      nsize, 0);
-
-       skb->end_seq -= nsize;
-       skb_trim(skb, skb->len - nsize);
-
-       /* Remember to checksum this packet afterwards. */
-       th->check = 0;
-       skb->csum = csum_partial((u8*)(th) + tp->tcp_header_len, skb->tail - ((u8 *) (th)+tp->tcp_header_len),
-                                0);
-
-       skb_append(skb, buff);
-
-       return 0;
-}
-
-static void tcp_wrxmit_prob(struct sock *sk, struct sk_buff *skb)
-{
-       struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
-
-       /* This is acked data. We can discard it. This cannot currently occur. */
-       tp->retransmits = 0;
-
-       printk(KERN_DEBUG "tcp_write_xmit: bug skb in write queue\n");
-
-       update_send_head(sk);
-
-       skb_unlink(skb);        
-       kfree_skb(skb);
-
-       if (!sk->dead)
-               sk->write_space(sk);
-}
-
-static int tcp_wrxmit_frag(struct sock *sk, struct sk_buff *skb, int size)
-{
-       struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
-       
-       SOCK_DEBUG(sk, "tcp_write_xmit: frag needed size=%d mss=%d\n",
-                  size, sk->mss);
-
-       if (tcp_fragment(sk, skb, sk->mss)) {
-               /* !tcp_frament Failed! */
-               tp->send_head = skb;
-               tp->packets_out--;
-               return -1;
-       }
-       return 0;
-}
-
-/*
- *     This routine writes packets to the network.
- *     It advances the send_head.
- *     This happens as incoming acks open up the remote window for us.
- */
-void tcp_write_xmit(struct sock *sk)
-{
-       struct sk_buff *skb;
-       struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
-       u16 rcv_wnd;
-       int sent_pkts = 0;
-
-       /* The bytes will have to remain here. In time closedown will
-        * empty the write queue and all will be happy.
-        */
-       if(sk->zapped)
-               return;
-
-       /*      Anything on the transmit queue that fits the window can
-        *      be added providing we are:
-        *
-        *      a) following SWS avoidance [and Nagle algorithm]
-        *      b) not exceeding our congestion window.
-        *      c) not retransmiting [Nagle]
-        */
-       rcv_wnd = htons(tcp_select_window(sk));
-       while((skb = tp->send_head) && tcp_snd_test(sk, skb)) {
-               struct tcphdr *th;
-               struct sk_buff *buff;
-               int size;
-
-               /* See if we really need to send the packet. (debugging code) */
-               if (!after(skb->end_seq, tp->snd_una)) {
-                       tcp_wrxmit_prob(sk, skb);
-                       continue;
-               }
-
-               /*      Put in the ack seq and window at this point rather
-                *      than earlier, in order to keep them monotonic.
-                *      We really want to avoid taking back window allocations.
-                *      That's legal, but RFC1122 says it's frowned on.
-                *      Ack and window will in general have changed since
-                *      this packet was put on the write queue.
-                */
-               th = skb->h.th;
-               size = skb->len - (((unsigned char *) th) - skb->data);
-               if (size - (th->doff << 2) > sk->mss) {
-                       if (tcp_wrxmit_frag(sk, skb, size))
-                               break;
-                       size = skb->len - (((unsigned char*)th) - skb->data);
-               }
-
-               tp->last_ack_sent = th->ack_seq = htonl(tp->rcv_nxt);
-               th->window = rcv_wnd;
-               tcp_update_options((__u32 *)(th + 1),tp);
-
-               tp->af_specific->send_check(sk, th, size, skb);
-
-#ifdef TCP_DEBUG
-               if (before(skb->end_seq, tp->snd_nxt))
-                       printk(KERN_DEBUG "tcp_write_xmit:"
-                              " sending already sent seq\n");
-#endif
-
-               buff = skb_clone(skb, GFP_ATOMIC);
-               if (buff == NULL)
-                       break;
-
-               /* Advance the send_head.  This one is going out. */
-               update_send_head(sk);
-               clear_delayed_acks(sk);
-
-               tp->packets_out++;
-               skb_set_owner_w(buff, sk);
-
-               tp->snd_nxt = skb->end_seq;
-
-               skb->when = jiffies;
-
-               sent_pkts = 1;
-               tp->af_specific->queue_xmit(buff);
-       }
-
-       if (sent_pkts && !tcp_timer_is_set(sk, TIME_RETRANS))
-               tcp_reset_xmit_timer(sk, TIME_RETRANS, tp->rto);
-}
-
-
-
-/* This function returns the amount that we can raise the
- * usable window based on the following constraints
- *  
- * 1. The window can never be shrunk once it is offered (RFC 793)
- * 2. We limit memory per socket
- *
- * RFC 1122:
- * "the suggested [SWS] avoidance algoritm for the receiver is to keep
- *  RECV.NEXT + RCV.WIN fixed until:
- *  RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)"
- *
- * i.e. don't raise the right edge of the window until you can raise
- * it at least MSS bytes.
- *
- * Unfortunately, the recomended algorithm breaks header prediction,
- * since header prediction assumes th->window stays fixed.
- *
- * Strictly speaking, keeping th->window fixed violates the receiver
- * side SWS prevention criteria. The problem is that under this rule
- * a stream of single byte packets will cause the right side of the
- * window to always advance by a single byte.
- * 
- * Of course, if the sender implements sender side SWS prevention
- * then this will not be a problem.
- * 
- * BSD seems to make the following compromise:
- * 
- *     If the free space is less than the 1/4 of the maximum
- *     space available and the free space is less than 1/2 mss,
- *     then set the window to 0.
- *     Otherwise, just prevent the window from shrinking
- *     and from being larger than the largest representable value.
- *
- * This prevents incremental opening of the window in the regime
- * where TCP is limited by the speed of the reader side taking
- * data out of the TCP receive queue. It does nothing about
- * those cases where the window is constrained on the sender side
- * because the pipeline is full.
- *
- * BSD also seems to "accidentally" limit itself to windows that are a
- * multiple of MSS, at least until the free space gets quite small.
- * This would appear to be a side effect of the mbuf implementation.
- * Combining these two algorithms results in the observed behavior
- * of having a fixed window size at almost all times.
- *
- * Below we obtain similar behavior by forcing the offered window to
- * a multiple of the mss when it is feasible to do so.
- *
- * FIXME: In our current implementation the value returned by sock_rpsace(sk)
- * is the total space we have allocated to the socket to store skbuf's.
- * The current design assumes that up to half of that space will be
- * taken by headers, and the remaining space will be available for TCP data.
- * This should be accounted for correctly instead.
- */
-u32 __tcp_select_window(struct sock *sk)
-{
-       struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
-       unsigned int mss = sk->mss;
-       unsigned int free_space;
-       u32 window, cur_win;
-
-       free_space = (sk->rcvbuf - atomic_read(&sk->rmem_alloc)) / 2;
-       if (tp->window_clamp) {
-               free_space = min(tp->window_clamp, free_space);
-               mss = min(tp->window_clamp, mss);
-       } else {
-               printk("tcp_select_window: tp->window_clamp == 0.\n");
-       }
-
-       if (mss < 1) {
-               mss = 1;
-               printk("tcp_select_window: sk->mss fell to 0.\n");
-       }
-       
-       cur_win = tcp_receive_window(tp);
-       if (free_space < sk->rcvbuf/4 && free_space < mss/2) {
-               window = 0;
-       } else {
-               /* Get the largest window that is a nice multiple of mss.
-                * Window clamp already applied above.
-                * If our current window offering is within 1 mss of the
-                * free space we just keep it. This prevents the divide
-                * and multiply from happening most of the time.
-                * We also don't do any window rounding when the free space
-                * is too small.
-                */
-               window = tp->rcv_wnd;
-               if ((window <= (free_space - mss)) || (window > free_space))
-                       window = (free_space/mss)*mss;
-       }
-       return window;
-}
-
-static int tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb)
-{
-       struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
-       struct tcphdr *th1, *th2;
-       int size1, size2, avail;
-       struct sk_buff *buff = skb->next;
-
-       th1 = skb->h.th;
-
-       if (th1->urg)
-               return -1;
-
-       avail = skb_tailroom(skb);
-
-       /* Size of TCP payload. */
-       size1 = skb->tail - ((u8 *) (th1)+(th1->doff<<2));
-       
-       th2 = buff->h.th;
-       size2 = buff->tail - ((u8 *) (th2)+(th2->doff<<2)); 
-
-       if (size2 > avail || size1 + size2 > sk->mss )
-               return -1;
-
-       /* Ok.  We will be able to collapse the packet. */
-       skb_unlink(buff);
-       memcpy(skb_put(skb, size2), ((char *) th2) + (th2->doff << 2), size2);
-       
-       /* Update sizes on original skb, both TCP and IP. */
-       skb->end_seq += buff->end_seq - buff->seq;
-       if (th2->urg) {
-               th1->urg = 1;
-               th1->urg_ptr = th2->urg_ptr + size1;
-       }
-       if (th2->fin)
-               th1->fin = 1;
-
-       /* ... and off you go. */
-       kfree_skb(buff);
-       tp->packets_out--;
-
-       /* Header checksum will be set by the retransmit procedure
-        * after calling rebuild header.
-        */
-       th1->check = 0;
-       skb->csum = csum_partial((u8*)(th1)+(th1->doff<<2), size1 + size2, 0);
-       return 0;
-}
-
-/* Do a simple retransmit without using the backoff mechanisms in
- * tcp_timer. This is used to speed up path mtu recovery. Note that
- * these simple retransmit aren't counted in the usual tcp retransmit
- * backoff counters. 
- * The socket is already locked here.
- */ 
-void tcp_simple_retransmit(struct sock *sk)
-{
-       struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
-
-       /* Clear delay ack timer. */
-       tcp_clear_xmit_timer(sk, TIME_DACK);
-       tp->retrans_head = NULL; 
-       /* Don't muck with the congestion window here. */
-       tp->dup_acks = 0;
-       tp->high_seq = tp->snd_nxt;
-       /* FIXME: make the current rtt sample invalid */
-       tcp_do_retransmit(sk, 0); 
-}
-
-/*
- *     A socket has timed out on its send queue and wants to do a
- *     little retransmitting.
- *     retrans_head can be different from the head of the write_queue
- *     if we are doing fast retransmit.
- */
-
-void tcp_do_retransmit(struct sock *sk, int all)
-{
-       struct sk_buff * skb;
-       int ct=0;
-       struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
-
-       if (tp->retrans_head == NULL)
-               tp->retrans_head = skb_peek(&sk->write_queue);
-
-       if (tp->retrans_head == tp->send_head)
-               tp->retrans_head = NULL;
-       
-       while ((skb = tp->retrans_head) != NULL) {
-               struct sk_buff *buff;
-               struct tcphdr *th;
-               int tcp_size;
-               int size;
-
-               /* In general it's OK just to use the old packet.  However we
-                * need to use the current ack and window fields.  Urg and
-                * urg_ptr could possibly stand to be updated as well, but we
-                * don't keep the necessary data.  That shouldn't be a problem,
-                * if the other end is doing the right thing.  Since we're
-                * changing the packet, we have to issue a new IP identifier.
-                */
-
-               th = skb->h.th;
-
-               tcp_size = skb->tail - ((unsigned char *)(th)+tp->tcp_header_len);
-
-               if (tcp_size > sk->mss) {
-                       if (tcp_fragment(sk, skb, sk->mss)) {
-                               printk(KERN_DEBUG "tcp_fragment failed\n");
-                               return;
-                       }
-                       tp->packets_out++;
-               }
-
-               if (!th->syn &&
-                   tcp_size < (sk->mss >> 1) &&
-                   skb->next != tp->send_head &&
-                   skb->next != (struct sk_buff *)&sk->write_queue)
-                       tcp_retrans_try_collapse(sk, skb);
-
-               if (tp->af_specific->rebuild_header(sk, skb)) {
-#ifdef TCP_DEBUG
-                       printk(KERN_DEBUG "tcp_do_rebuild_header failed\n");
-#endif
-                       break;
-               }
-
-               SOCK_DEBUG(sk, "retransmit sending seq=%x\n", skb->seq);
-
-               /* Update ack and window. */
-               tp->last_ack_sent = th->ack_seq = htonl(tp->rcv_nxt);
-               th->window = ntohs(tcp_select_window(sk));
-               tcp_update_options((__u32 *)(th+1),tp);
-
-               size = skb->tail - (unsigned char *) th;
-               tp->af_specific->send_check(sk, th, size, skb);
-
-               skb->when = jiffies;
-
-               buff = skb_clone(skb, GFP_ATOMIC);
-               if (buff == NULL)
-                       break;
-
-               skb_set_owner_w(buff, sk);
-
-               clear_delayed_acks(sk);
-               tp->af_specific->queue_xmit(buff);
-               
-               /* Count retransmissions. */
-               ct++;
-               sk->prot->retransmits++;
-               tcp_statistics.TcpRetransSegs++;
-
-               /* Only one retransmit requested. */
-               if (!all)
-                       break;
-
-               /* This should cut it off before we send too many packets. */
-               if (ct >= tp->snd_cwnd)
-                       break;
-
-               /* Advance the pointer. */
-               tp->retrans_head = skb->next;
-               if ((tp->retrans_head == tp->send_head) ||
-                   (tp->retrans_head == (struct sk_buff *) &sk->write_queue))
-                       tp->retrans_head = NULL;
-       }
-}
-
-/* Send a fin.  The caller locks the socket for us.  This cannot be
- * allowed to fail queueing a FIN frame under any circumstances.
- */
-void tcp_send_fin(struct sock *sk)
-{
-       struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);    
-       
-       /* Optimization, tack on the FIN if we have a queue of
-        * unsent frames.
-        */
-       if(tp->send_head != NULL) {
-               struct sk_buff *tail = skb_peek_tail(&sk->write_queue);
-               struct tcphdr *th = tail->h.th;
-               int data_len;
-
-               /* Unfortunately tcp_write_xmit won't check for going over
-                * the MSS due to the FIN sequence number, so we have to
-                * watch out for it here.
-                */
-               data_len = (tail->tail - (((unsigned char *)th)+tp->tcp_header_len));
-               if(data_len >= sk->mss)
-                       goto build_new_frame; /* ho hum... */
-
-               /* tcp_write_xmit() will checksum the header etc. for us. */
-               th->fin = 1;
-               tail->end_seq++;
-       } else {
-               struct sk_buff *buff;
-               struct tcphdr *th;
-
-build_new_frame:
-               buff = sock_wmalloc(sk,
-                                   (BASE_ACK_SIZE + tp->tcp_header_len +
-                                    sizeof(struct sk_buff)),
-                                   1, GFP_KERNEL);
-               if (buff == NULL) {
-                       /* We can only fail due to low memory situations, not
-                        * due to going over our sndbuf limits (due to the
-                        * force flag passed to sock_wmalloc).  So just keep
-                        * trying.  We cannot allow this fail.  The socket is
-                        * still locked, so we need not check if the connection
-                        * was reset in the meantime etc.
-                        */
-                       goto build_new_frame;
-               }
-
-               /* Administrivia. */
-               buff->csum = 0;
-
-               /* Put in the IP header and routing stuff.
-                *
-                * FIXME:
-                * We can fail if the interface for the route
-                * this socket takes goes down right before
-                * we get here.  ANK is there a way to point
-                * this into a "black hole" route in such a
-                * case?  Ideally, we should still be able to
-                * queue this and let the retransmit timer
-                * keep trying until the destination becomes
-                * reachable once more.  -DaveM
-                */
-               if(tp->af_specific->build_net_header(sk, buff) < 0) {
-                       kfree_skb(buff);
-                       goto update_write_seq;
-               }
-               th = (struct tcphdr *) skb_put(buff, tp->tcp_header_len);
-               buff->h.th = th;
-
-               memcpy(th, (void *) &(sk->dummy_th), sizeof(*th));
-               th->seq = htonl(tp->write_seq);
-               th->fin = 1;
-               tcp_build_options((__u32 *)(th + 1), tp);
-
-               /* This makes sure we do things like abide by the congestion
-                * window and other constraints which prevent us from sending.
-                */
-               tcp_send_skb(sk, buff, 0);
-       }
-update_write_seq:
-       /* So that we recognize the ACK coming back for
-        * this FIN as being legitimate.
-        */
-       tp->write_seq++;
-}
-
-/* WARNING: This routine must only be called when we have already sent
- * a SYN packet that crossed the incoming SYN that caused this routine
- * to get called. If this assumption fails then the initial rcv_wnd
- * and rcv_wscale values will not be correct.
- *
- * XXX When you have time Dave, redo this to use tcp_send_skb() just
- * XXX like tcp_send_fin() above now does.... -DaveM
- */
-int tcp_send_synack(struct sock *sk)
-{
-       struct tcp_opt * tp = &(sk->tp_pinfo.af_tcp);
-       struct sk_buff * skb;   
-       struct sk_buff * buff;
-       struct tcphdr *th;
-       int tmp;
-       
-       skb = sock_wmalloc(sk, MAX_SYN_SIZE + sizeof(struct sk_buff), 1, GFP_ATOMIC);
-       if (skb == NULL) 
-               return -ENOMEM;
-
-       tmp = tp->af_specific->build_net_header(sk, skb);
-       if (tmp < 0) {
-               kfree_skb(skb);
-               return tmp;
-       }
-
-       th =(struct tcphdr *) skb_put(skb, sizeof(struct tcphdr));
-       skb->h.th = th;
-       memset(th, 0, sizeof(struct tcphdr));
-
-       th->syn = 1;
-       th->ack = 1;
-
-       th->source = sk->dummy_th.source;
-       th->dest = sk->dummy_th.dest;
-              
-       skb->seq = tp->snd_una;
-       skb->end_seq = skb->seq + 1 /* th->syn */ ;
-       th->seq = ntohl(skb->seq);
-
-       /* This is a resend of a previous SYN, now with an ACK.
-        * we must reuse the previously offered window.
-        */
-       th->window = htons(tp->rcv_wnd);
-
-       tp->last_ack_sent = th->ack_seq = htonl(tp->rcv_nxt);
-
-       tmp = tcp_syn_build_options(skb, sk->mss,
-               tp->tstamp_ok, tp->wscale_ok, tp->rcv_wscale);
-       skb->csum = 0;
-       th->doff = (sizeof(*th) + tmp)>>2;
-
-       tp->af_specific->send_check(sk, th, sizeof(*th)+tmp, skb);
-
-       skb_queue_tail(&sk->write_queue, skb);
-       
-       buff = skb_clone(skb, GFP_ATOMIC);
-       if (buff) {
-               skb_set_owner_w(buff, sk);
-
-               tp->packets_out++;
-               skb->when = jiffies;
-
-               tp->af_specific->queue_xmit(buff);
-               tcp_statistics.TcpOutSegs++;
-
-               tcp_reset_xmit_timer(sk, TIME_RETRANS, TCP_TIMEOUT_INIT);
-       }
-       return 0;
-}
-
-/*
- * Send out a delayed ack, the caller does the policy checking
- * to see if we should even be here.  See tcp_input.c:tcp_ack_snd_check()
- * for details.
- */
-
-void tcp_send_delayed_ack(struct tcp_opt *tp, int max_timeout)
-{
-       unsigned long timeout;
-
-       /* Stay within the limit we were given */
-       timeout = tp->ato;
-       if (timeout > max_timeout)
-               timeout = max_timeout;
-       timeout += jiffies;
-
-       /* Use new timeout only if there wasn't a older one earlier. */
-       if ((!tp->delack_timer.prev || !del_timer(&tp->delack_timer)) ||
-           (timeout < tp->delack_timer.expires))
-               tp->delack_timer.expires = timeout;
-
-       add_timer(&tp->delack_timer);
-}
-
-
-
-/*
- *     This routine sends an ack and also updates the window. 
- */
-void tcp_send_ack(struct sock *sk)
-{
-       struct sk_buff *buff;
-       struct tcp_opt *tp=&(sk->tp_pinfo.af_tcp);
-       struct tcphdr *th;
-       int tmp;
-
-       if(sk->zapped)
-               return; /* We have been reset, we may not send again. */
-
-       /* We need to grab some memory, and put together an ack,
-        * and then put it into the queue to be sent.
-        */
-       buff = sock_wmalloc(sk, BASE_ACK_SIZE + tp->tcp_header_len, 1, GFP_ATOMIC);
-       if (buff == NULL) {
-               /*      Force it to send an ack. We don't have to do this
-                *      (ACK is unreliable) but it's much better use of
-                *      bandwidth on slow links to send a spare ack than
-                *      resend packets.
-                */
-               tcp_send_delayed_ack(tp, HZ/2);
-               return;
-       }
-
-       clear_delayed_acks(sk);
-
-       /* Assemble a suitable TCP frame. */
-       buff->csum = 0;
-
-       /* Put in the IP header and routing stuff. */
-       tmp = tp->af_specific->build_net_header(sk, buff);
-       if (tmp < 0) {
-               kfree_skb(buff);
-               return;
-       }
-
-       th = (struct tcphdr *)skb_put(buff,tp->tcp_header_len);
-       memcpy(th, &sk->dummy_th, sizeof(struct tcphdr));
-
-       /* Swap the send and the receive. */
-       th->window      = ntohs(tcp_select_window(sk));
-       th->seq         = ntohl(tp->snd_nxt);
-       tp->last_ack_sent = tp->rcv_nxt;
-       th->ack_seq     = htonl(tp->rcv_nxt);
-       tcp_build_and_update_options((__u32 *)(th + 1), tp);
-
-       /* Fill in the packet and send it. */
-       tp->af_specific->send_check(sk, th, tp->tcp_header_len, buff);
-       tp->af_specific->queue_xmit(buff);
-       tcp_statistics.TcpOutSegs++;
-}
-
-/*
- *     This routine sends a packet with an out of date sequence
- *     number. It assumes the other end will try to ack it.
- */
-
-void tcp_write_wakeup(struct sock *sk)
-{
-       struct sk_buff *buff, *skb;
-       struct tcphdr *t1;
-       struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
-       int tmp;
-
-       if (sk->zapped)
-               return; /* After a valid reset we can send no more. */
-
-       /*      Write data can still be transmitted/retransmitted in the
-        *      following states.  If any other state is encountered, return.
-        *      [listen/close will never occur here anyway]
-        */
-       if ((1 << sk->state) &
-           ~(TCPF_ESTABLISHED|TCPF_CLOSE_WAIT|TCPF_FIN_WAIT1|TCPF_LAST_ACK|TCPF_CLOSING))
-               return;
-
-       if (before(tp->snd_nxt, tp->snd_una + tp->snd_wnd) && (skb=tp->send_head)) {
-               struct tcphdr *th;
-               unsigned long win_size;
-
-               /* We are probing the opening of a window
-                * but the window size is != 0
-                * must have been a result SWS avoidance ( sender )
-                */
-               win_size = tp->snd_wnd - (tp->snd_nxt - tp->snd_una);
-               if (win_size < skb->end_seq - skb->seq) {
-                       if (tcp_fragment(sk, skb, win_size)) {
-                               printk(KERN_DEBUG "tcp_write_wakeup: "
-                                      "fragment failed\n");
-                               return;
-                       }
-               }
-
-               th = skb->h.th;
-               tcp_update_options((__u32 *)(th + 1), tp);
-               tp->af_specific->send_check(sk, th, th->doff * 4 + win_size, skb);
-               buff = skb_clone(skb, GFP_ATOMIC);
-               if (buff == NULL)
-                       return;
-
-               skb_set_owner_w(buff, sk);
-               tp->packets_out++;
-
-               clear_delayed_acks(sk);
-
-               if (!tcp_timer_is_set(sk, TIME_RETRANS))
-                       tcp_reset_xmit_timer(sk, TIME_RETRANS, tp->rto);
-
-               skb->when = jiffies;
-               update_send_head(sk);
-               tp->snd_nxt = skb->end_seq;
-       } else {
-               buff = sock_wmalloc(sk, MAX_ACK_SIZE, 1, GFP_ATOMIC);
-               if (buff == NULL) 
-                       return;
-
-               buff->csum = 0;
-
-               /* Put in the IP header and routing stuff. */
-               tmp = tp->af_specific->build_net_header(sk, buff);
-               if (tmp < 0) {
-                       kfree_skb(buff);
-                       return;
-               }
-
-               t1 = (struct tcphdr *) skb_put(buff, tp->tcp_header_len);
-               memcpy(t1,(void *) &sk->dummy_th, sizeof(*t1));
-
-               /*      Use a previous sequence.
-                *      This should cause the other end to send an ack.
-                */
-        
-               t1->seq = htonl(tp->snd_nxt-1);
-               t1->ack_seq = htonl(tp->rcv_nxt);
-               t1->window = htons(tcp_select_window(sk));
-               tcp_build_and_update_options((__u32 *)(t1 + 1), tp);
-
-               tp->af_specific->send_check(sk, t1, tp->tcp_header_len, buff);
-       }
-
-       /* Send it. */
-       tp->af_specific->queue_xmit(buff);
-       tcp_statistics.TcpOutSegs++;
-}
-
-/*
- *     A window probe timeout has occurred.
- *     If window is not closed send a partial packet
- *     else a zero probe.
- */
-
-void tcp_send_probe0(struct sock *sk)
-{
-       struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
-
-       tcp_write_wakeup(sk);
-       tp->pending = TIME_PROBE0;
-       tp->backoff++;
-       tp->probes_out++;
-       tcp_reset_xmit_timer (sk, TIME_PROBE0, 
-                             min(tp->rto << tp->backoff, 120*HZ));
-}
+/*
+ * INET                An implementation of the TCP/IP protocol suite for the LINUX
+ *             operating system.  INET is implemented using the  BSD Socket
+ *             interface as the means of communication with the user level.
+ *
+ *             Implementation of the Transmission Control Protocol(TCP).
+ *
+ * Version:    $Id: tcp_output.c,v 1.113 1999/09/07 02:31:39 davem Exp $
+ *
+ * Authors:    Ross Biro, <bir7@leland.Stanford.Edu>
+ *             Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
+ *             Mark Evans, <evansmp@uhura.aston.ac.uk>
+ *             Corey Minyard <wf-rch!minyard@relay.EU.net>
+ *             Florian La Roche, <flla@stud.uni-sb.de>
+ *             Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
+ *             Linus Torvalds, <torvalds@cs.helsinki.fi>
+ *             Alan Cox, <gw4pts@gw4pts.ampr.org>
+ *             Matthew Dillon, <dillon@apollo.west.oic.com>
+ *             Arnt Gulbrandsen, <agulbra@nvg.unit.no>
+ *             Jorge Cwik, <jorge@laser.satlink.net>
+ */
+
+/*
+ * Changes:    Pedro Roque     :       Retransmit queue handled by TCP.
+ *                             :       Fragmentation on mtu decrease
+ *                             :       Segment collapse on retransmit
+ *                             :       AF independence
+ *
+ *             Linus Torvalds  :       send_delayed_ack
+ *             David S. Miller :       Charge memory using the right skb
+ *                                     during syn/ack processing.
+ *             David S. Miller :       Output engine completely rewritten.
+ *             Andrea Arcangeli:       SYNACK carry ts_recent in tsecr.
+ *
+ */
+
+#include <net/tcp.h>
+
+#include <linux/smp_lock.h>
+
+extern int sysctl_tcp_timestamps;
+extern int sysctl_tcp_window_scaling;
+extern int sysctl_tcp_sack;
+
+/* People can turn this off for buggy TCP's found in printers etc. */
+int sysctl_tcp_retrans_collapse = 1;
+
+/* Get rid of any delayed acks, we sent one already.. */
+static __inline__ void clear_delayed_acks(struct sock * sk)
+{
+       struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
+
+       tp->delayed_acks = 0;
+       if(tcp_in_quickack_mode(tp))
+               tcp_exit_quickack_mode(tp);
+       tcp_clear_xmit_timer(sk, TIME_DACK);
+}
+
+static __inline__ void update_send_head(struct sock *sk)
+{
+       struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
+       
+       tp->send_head = tp->send_head->next;
+       if (tp->send_head == (struct sk_buff *) &sk->write_queue)
+               tp->send_head = NULL;
+}
+
+/* Calculate mss to advertise in SYN segment.
+   RFC1122, RFC1063, draft-ietf-tcpimpl-pmtud-01 state that:
+
+   1. It is independent of path mtu.
+   2. Ideally, it is maximal possible segment size i.e. 65535-40.
+   3. For IPv4 it is reasonable to calculate it from maximal MTU of
+      attached devices, because some buggy hosts are confused by
+      large MSS.
+   4. We do not make 3, we advertise MSS, calculated from first
+      hop device mtu, but allow to raise it to ip_rt_min_advmss.
+      This may be overriden via information stored in routing table.
+   5. Value 65535 for MSS is valid in IPv6 and means "as large as possible,
+      probably even Jumbo".
+ */
+static __u16 tcp_advertise_mss(struct sock *sk)
+{
+       struct dst_entry *dst = __sk_dst_get(sk);
+       int mss;
+
+       if (dst) {
+               mss = dst->advmss;
+       } else {
+               struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
+
+               /* No dst. It is bad. Guess some reasonable value.
+                * Actually, this case should not be possible.
+                * SANITY.
+                */
+               BUG_TRAP(dst!=NULL);
+
+               mss = tp->mss_cache;
+               mss += (tp->tcp_header_len - sizeof(struct tcphdr)) +
+                       tp->ext_header_len;
+
+               /* Minimal MSS to include full set of of TCP/IP options
+                  plus 8 bytes of data. It corresponds to mtu 128.
+                */
+               if (mss < 88)
+                       mss = 88;
+       }
+
+       return (__u16)mss;
+}
+
+/* This routine actually transmits TCP packets queued in by
+ * tcp_do_sendmsg().  This is used by both the initial
+ * transmission and possible later retransmissions.
+ * All SKB's seen here are completely headerless.  It is our
+ * job to build the TCP header, and pass the packet down to
+ * IP so it can do the same plus pass the packet off to the
+ * device.
+ *
+ * We are working here with either a clone of the original
+ * SKB, or a fresh unique copy made by the retransmit engine.
+ */
+void tcp_transmit_skb(struct sock *sk, struct sk_buff *skb)
+{
+       if(skb != NULL) {
+               struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
+               struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
+               int tcp_header_size = tp->tcp_header_len;
+               struct tcphdr *th;
+               int sysctl_flags;
+
+#define SYSCTL_FLAG_TSTAMPS    0x1
+#define SYSCTL_FLAG_WSCALE     0x2
+#define SYSCTL_FLAG_SACK       0x4
+
+               sysctl_flags = 0;
+               if(tcb->flags & TCPCB_FLAG_SYN) {
+                       tcp_header_size = sizeof(struct tcphdr) + TCPOLEN_MSS;
+                       if(sysctl_tcp_timestamps) {
+                               tcp_header_size += TCPOLEN_TSTAMP_ALIGNED;
+                               sysctl_flags |= SYSCTL_FLAG_TSTAMPS;
+                       }
+                       if(sysctl_tcp_window_scaling) {
+                               tcp_header_size += TCPOLEN_WSCALE_ALIGNED;
+                               sysctl_flags |= SYSCTL_FLAG_WSCALE;
+                       }
+                       if(sysctl_tcp_sack) {
+                               sysctl_flags |= SYSCTL_FLAG_SACK;
+                               if(!(sysctl_flags & SYSCTL_FLAG_TSTAMPS))
+                                       tcp_header_size += TCPOLEN_SACKPERM_ALIGNED;
+                       }
+               } else if(tp->sack_ok && tp->num_sacks) {
+                       /* A SACK is 2 pad bytes, a 2 byte header, plus
+                        * 2 32-bit sequence numbers for each SACK block.
+                        */
+                       tcp_header_size += (TCPOLEN_SACK_BASE_ALIGNED +
+                                           (tp->num_sacks * TCPOLEN_SACK_PERBLOCK));
+               }
+               th = (struct tcphdr *) skb_push(skb, tcp_header_size);
+               skb->h.th = th;
+               skb_set_owner_w(skb, sk);
+
+               /* Build TCP header and checksum it. */
+               th->source              = sk->sport;
+               th->dest                = sk->dport;
+               th->seq                 = htonl(TCP_SKB_CB(skb)->seq);
+               th->ack_seq             = htonl(tp->rcv_nxt);
+               th->doff                = (tcp_header_size >> 2);
+               th->res1                = 0;
+               *(((__u8 *)th) + 13)    = tcb->flags;
+               th->check               = 0;
+               th->urg_ptr             = ntohs(tcb->urg_ptr);
+               if(tcb->flags & TCPCB_FLAG_SYN) {
+                       /* RFC1323: The window in SYN & SYN/ACK segments
+                        * is never scaled.
+                        */
+                       th->window      = htons(tp->rcv_wnd);
+                       tcp_syn_build_options((__u32 *)(th + 1),
+                                             tcp_advertise_mss(sk),
+                                             (sysctl_flags & SYSCTL_FLAG_TSTAMPS),
+                                             (sysctl_flags & SYSCTL_FLAG_SACK),
+                                             (sysctl_flags & SYSCTL_FLAG_WSCALE),
+                                             tp->rcv_wscale,
+                                             TCP_SKB_CB(skb)->when,
+                                             tp->ts_recent);
+               } else {
+                       th->window      = htons(tcp_select_window(sk));
+                       tcp_build_and_update_options((__u32 *)(th + 1),
+                                                    tp, TCP_SKB_CB(skb)->when);
+               }
+               tp->af_specific->send_check(sk, th, skb->len, skb);
+
+               clear_delayed_acks(sk);
+               tp->last_ack_sent = tp->rcv_nxt;
+               tcp_statistics.TcpOutSegs++;
+               tp->af_specific->queue_xmit(skb);
+       }
+#undef SYSCTL_FLAG_TSTAMPS
+#undef SYSCTL_FLAG_WSCALE
+#undef SYSCTL_FLAG_SACK
+}
+
+/* This is the main buffer sending routine. We queue the buffer
+ * and decide whether to queue or transmit now.
+ */
+void tcp_send_skb(struct sock *sk, struct sk_buff *skb, int force_queue)
+{
+       struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
+
+       /* Advance write_seq and place onto the write_queue. */
+       tp->write_seq += (TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq);
+       __skb_queue_tail(&sk->write_queue, skb);
+
+       if (!force_queue && tp->send_head == NULL && tcp_snd_test(sk, skb)) {
+               /* Send it out now. */
+               TCP_SKB_CB(skb)->when = tcp_time_stamp;
+               tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
+               tp->packets_out++;
+               tcp_transmit_skb(sk, skb_clone(skb, GFP_KERNEL));
+               if(!tcp_timer_is_set(sk, TIME_RETRANS))
+                       tcp_reset_xmit_timer(sk, TIME_RETRANS, tp->rto);
+       } else {
+               /* Queue it, remembering where we must start sending. */
+               if (tp->send_head == NULL)
+                       tp->send_head = skb;
+               if (!force_queue && tp->packets_out == 0 && !tp->pending) {
+                       tp->pending = TIME_PROBE0;
+                       tcp_reset_xmit_timer(sk, TIME_PROBE0, tp->rto);
+               }
+       }
+}
+
+/* Function to create two new TCP segments.  Shrinks the given segment
+ * to the specified size and appends a new segment with the rest of the
+ * packet to the list.  This won't be called frequently, I hope. 
+ * Remember, these are still headerless SKBs at this point.
+ */
+static int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len)
+{
+       struct sk_buff *buff;
+       int nsize = skb->len - len;
+       u16 flags;
+
+       /* Get a new skb... force flag on. */
+       buff = sock_wmalloc(sk,
+                           (nsize + MAX_HEADER + sk->prot->max_header),
+                           1, GFP_ATOMIC);
+       if (buff == NULL)
+               return -1; /* We'll just try again later. */
+
+       /* Reserve space for headers. */
+       skb_reserve(buff, MAX_HEADER + sk->prot->max_header);
+               
+       /* Correct the sequence numbers. */
+       TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
+       TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq;
+       
+       /* PSH and FIN should only be set in the second packet. */
+       flags = TCP_SKB_CB(skb)->flags;
+       TCP_SKB_CB(skb)->flags = flags & ~(TCPCB_FLAG_FIN | TCPCB_FLAG_PSH);
+       if(flags & TCPCB_FLAG_URG) {
+               u16 old_urg_ptr = TCP_SKB_CB(skb)->urg_ptr;
+
+               /* Urgent data is always a pain in the ass. */
+               if(old_urg_ptr > len) {
+                       TCP_SKB_CB(skb)->flags &= ~(TCPCB_FLAG_URG);
+                       TCP_SKB_CB(skb)->urg_ptr = 0;
+                       TCP_SKB_CB(buff)->urg_ptr = old_urg_ptr - len;
+               } else {
+                       flags &= ~(TCPCB_FLAG_URG);
+               }
+       }
+       if(!(flags & TCPCB_FLAG_URG))
+               TCP_SKB_CB(buff)->urg_ptr = 0;
+       TCP_SKB_CB(buff)->flags = flags;
+       TCP_SKB_CB(buff)->sacked = 0;
+
+       /* Copy and checksum data tail into the new buffer. */
+       buff->csum = csum_partial_copy(skb->data + len, skb_put(buff, nsize),
+                                      nsize, 0);
+
+       /* This takes care of the FIN sequence number too. */
+       TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq;
+       skb_trim(skb, len);
+
+       /* Rechecksum original buffer. */
+       skb->csum = csum_partial(skb->data, skb->len, 0);
+
+       /* Looks stupid, but our code really uses when of
+        * skbs, which it never sent before. --ANK
+        */
+       TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when;
+
+       /* Link BUFF into the send queue. */
+       __skb_append(skb, buff);
+
+       return 0;
+}
+
+/* This function synchronize snd mss to current pmtu/exthdr set.
+
+   tp->user_mss is mss set by user by TCP_MAXSEG. It does NOT counts
+   for TCP options, but includes only bare TCP header.
+
+   tp->mss_clamp is mss negotiated at connection setup.
+   It is minumum of user_mss and mss received with SYN.
+   It also does not include TCP options.
+
+   tp->pmtu_cookie is last pmtu, seen by this function.
+
+   tp->mss_cache is current effective sending mss, including
+   all tcp options except for SACKs. It is evaluated,
+   taking into account current pmtu, but never exceeds
+   tp->mss_clamp.
+
+   NOTE1. rfc1122 clearly states that advertised MSS
+   DOES NOT include either tcp or ip options.
+
+   NOTE2. tp->pmtu_cookie and tp->mss_cache are READ ONLY outside
+   this function.                      --ANK (980731)
+ */
+
+int tcp_sync_mss(struct sock *sk, u32 pmtu)
+{
+       struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
+       int mss_now;
+
+       /* Calculate base mss without TCP options:
+          It is MMS_S - sizeof(tcphdr) of rfc1122
+        */
+
+       mss_now = pmtu - tp->af_specific->net_header_len - sizeof(struct tcphdr);
+
+       /* Clamp it (mss_clamp does not include tcp options) */
+       if (mss_now > tp->mss_clamp)
+               mss_now = tp->mss_clamp;
+
+       /* Now subtract TCP options size, not including SACKs */
+       mss_now -= tp->tcp_header_len - sizeof(struct tcphdr);
+
+       /* Now subtract optional transport overhead */
+       mss_now -= tp->ext_header_len;
+
+       /* It we got too small (or even negative) value,
+          clamp it by 8 from below. Why 8 ?
+          Well, it could be 1 with the same success,
+          but if IP accepted segment of length 1,
+          it would love 8 even more 8)         --ANK (980731)
+        */
+       if (mss_now < 8)
+               mss_now = 8;
+
+       /* And store cached results */
+       tp->pmtu_cookie = pmtu;
+       tp->mss_cache = mss_now;
+       return mss_now;
+}
+
+
+/* This routine writes packets to the network.  It advances the
+ * send_head.  This happens as incoming acks open up the remote
+ * window for us.
+ */
+void tcp_write_xmit(struct sock *sk)
+{
+       struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
+       unsigned int mss_now;
+
+       /* Account for SACKS, we may need to fragment due to this.
+        * It is just like the real MSS changing on us midstream.
+        * We also handle things correctly when the user adds some
+        * IP options mid-stream.  Silly to do, but cover it.
+        */
+       mss_now = tcp_current_mss(sk); 
+
+       /* If we are zapped, the bytes will have to remain here.
+        * In time closedown will empty the write queue and all
+        * will be happy.
+        */
+       if(!sk->zapped) {
+               struct sk_buff *skb;
+               int sent_pkts = 0;
+
+               /* Anything on the transmit queue that fits the window can
+                * be added providing we are:
+                *
+                * a) following SWS avoidance [and Nagle algorithm]
+                * b) not exceeding our congestion window.
+                * c) not retransmitting [Nagle]
+                */
+               while((skb = tp->send_head) && tcp_snd_test(sk, skb)) {
+                       if (skb->len > mss_now) {
+                               if (tcp_fragment(sk, skb, mss_now))
+                                       break;
+                       }
+
+                       /* Advance the send_head.  This one is going out. */
+                       update_send_head(sk);
+                       TCP_SKB_CB(skb)->when = tcp_time_stamp;
+                       tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
+                       tp->packets_out++;
+                       tcp_transmit_skb(sk, skb_clone(skb, GFP_ATOMIC));
+                       sent_pkts = 1;
+               }
+
+               /* If we sent anything, make sure the retransmit
+                * timer is active.
+                */
+               if (sent_pkts && !tcp_timer_is_set(sk, TIME_RETRANS))
+                       tcp_reset_xmit_timer(sk, TIME_RETRANS, tp->rto);
+       }
+}
+
+/* This function returns the amount that we can raise the
+ * usable window based on the following constraints
+ *  
+ * 1. The window can never be shrunk once it is offered (RFC 793)
+ * 2. We limit memory per socket
+ *
+ * RFC 1122:
+ * "the suggested [SWS] avoidance algorithm for the receiver is to keep
+ *  RECV.NEXT + RCV.WIN fixed until:
+ *  RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)"
+ *
+ * i.e. don't raise the right edge of the window until you can raise
+ * it at least MSS bytes.
+ *
+ * Unfortunately, the recommended algorithm breaks header prediction,
+ * since header prediction assumes th->window stays fixed.
+ *
+ * Strictly speaking, keeping th->window fixed violates the receiver
+ * side SWS prevention criteria. The problem is that under this rule
+ * a stream of single byte packets will cause the right side of the
+ * window to always advance by a single byte.
+ * 
+ * Of course, if the sender implements sender side SWS prevention
+ * then this will not be a problem.
+ * 
+ * BSD seems to make the following compromise:
+ * 
+ *     If the free space is less than the 1/4 of the maximum
+ *     space available and the free space is less than 1/2 mss,
+ *     then set the window to 0.
+ *     Otherwise, just prevent the window from shrinking
+ *     and from being larger than the largest representable value.
+ *
+ * This prevents incremental opening of the window in the regime
+ * where TCP is limited by the speed of the reader side taking
+ * data out of the TCP receive queue. It does nothing about
+ * those cases where the window is constrained on the sender side
+ * because the pipeline is full.
+ *
+ * BSD also seems to "accidentally" limit itself to windows that are a
+ * multiple of MSS, at least until the free space gets quite small.
+ * This would appear to be a side effect of the mbuf implementation.
+ * Combining these two algorithms results in the observed behavior
+ * of having a fixed window size at almost all times.
+ *
+ * Below we obtain similar behavior by forcing the offered window to
+ * a multiple of the mss when it is feasible to do so.
+ *
+ * Note, we don't "adjust" for TIMESTAMP or SACK option bytes.
+ * Regular options like TIMESTAMP are taken into account.
+ */
+u32 __tcp_select_window(struct sock *sk)
+{
+       struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
+       /* MSS for the peer's data.  Previous verions used mss_clamp
+        * here.  I don't know if the value based on our guesses
+        * of peer's MSS is better for the performance.  It's more correct
+        * but may be worse for the performance because of rcv_mss
+        * fluctuations.  --SAW  1998/11/1
+        */
+       unsigned int mss = tp->rcv_mss;
+       int free_space;
+       u32 window;
+
+       /* Sometimes free_space can be < 0. */
+       free_space = tcp_space(sk); 
+       if (free_space > ((int) tp->window_clamp))
+               free_space = tp->window_clamp;
+       if (tp->window_clamp < mss)
+               mss = tp->window_clamp; 
+       
+       if ((free_space < (tcp_full_space(sk) / 2)) && 
+               (free_space < ((int) (mss/2)))) {
+               window = 0;
+               tp->pred_flags = 0; 
+       } else {
+               /* Get the largest window that is a nice multiple of mss.
+                * Window clamp already applied above.
+                * If our current window offering is within 1 mss of the
+                * free space we just keep it. This prevents the divide
+                * and multiply from happening most of the time.
+                * We also don't do any window rounding when the free space
+                * is too small.
+                */
+               window = tp->rcv_wnd;
+               if ((((int) window) <= (free_space - ((int) mss))) ||
+                               (((int) window) > free_space))
+                       window = (((unsigned int) free_space)/mss)*mss;
+       }
+       return window;
+}
+
+/* Attempt to collapse two adjacent SKB's during retransmission. */
+static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int mss_now)
+{
+       struct sk_buff *next_skb = skb->next;
+
+       /* The first test we must make is that neither of these two
+        * SKB's are still referenced by someone else.
+        */
+       if(!skb_cloned(skb) && !skb_cloned(next_skb)) {
+               int skb_size = skb->len, next_skb_size = next_skb->len;
+               u16 flags = TCP_SKB_CB(skb)->flags;
+
+               /* Punt if the first SKB has URG set. */
+               if(flags & TCPCB_FLAG_URG)
+                       return;
+       
+               /* Also punt if next skb has been SACK'd. */
+               if(TCP_SKB_CB(next_skb)->sacked & TCPCB_SACKED_ACKED)
+                       return;
+
+               /* Punt if not enough space exists in the first SKB for
+                * the data in the second, or the total combined payload
+                * would exceed the MSS.
+                */
+               if ((next_skb_size > skb_tailroom(skb)) ||
+                   ((skb_size + next_skb_size) > mss_now))
+                       return;
+
+               /* Ok.  We will be able to collapse the packet. */
+               __skb_unlink(next_skb, next_skb->list);
+
+               if(skb->len % 4) {
+                       /* Must copy and rechecksum all data. */
+                       memcpy(skb_put(skb, next_skb_size), next_skb->data, next_skb_size);
+                       skb->csum = csum_partial(skb->data, skb->len, 0);
+               } else {
+                       /* Optimize, actually we could also combine next_skb->csum
+                        * to skb->csum using a single add w/carry operation too.
+                        */
+                       skb->csum = csum_partial_copy(next_skb->data,
+                                                     skb_put(skb, next_skb_size),
+                                                     next_skb_size, skb->csum);
+               }
+       
+               /* Update sequence range on original skb. */
+               TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq;
+
+               /* Merge over control information. */
+               flags |= TCP_SKB_CB(next_skb)->flags; /* This moves PSH/FIN etc. over */
+               if(flags & TCPCB_FLAG_URG) {
+                       u16 urgptr = TCP_SKB_CB(next_skb)->urg_ptr;
+                       TCP_SKB_CB(skb)->urg_ptr = urgptr + skb_size;
+               }
+               TCP_SKB_CB(skb)->flags = flags;
+
+               /* All done, get rid of second SKB and account for it so
+                * packet counting does not break.
+                */
+               kfree_skb(next_skb);
+               sk->tp_pinfo.af_tcp.packets_out--;
+       }
+}
+
+/* Do a simple retransmit without using the backoff mechanisms in
+ * tcp_timer. This is used for path mtu discovery. 
+ * The socket is already locked here.
+ */ 
+void tcp_simple_retransmit(struct sock *sk)
+{
+       struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
+       struct sk_buff *skb, *old_next_skb;
+       unsigned int mss = tcp_current_mss(sk);
+
+       /* Don't muck with the congestion window here. */
+       tp->dup_acks = 0;
+       tp->high_seq = tp->snd_nxt;
+       tp->retrans_head = NULL;
+
+       /* Input control flow will see that this was retransmitted
+        * and not use it for RTT calculation in the absence of
+        * the timestamp option.
+        */
+       for (old_next_skb = skb = skb_peek(&sk->write_queue);
+            ((skb != tp->send_head) &&
+             (skb != (struct sk_buff *)&sk->write_queue));
+            skb = skb->next) {
+               int resend_skb = 0;
+
+               /* Our goal is to push out the packets which we
+                * sent already, but are being chopped up now to
+                * account for the PMTU information we have.
+                *
+                * As we resend the queue, packets are fragmented
+                * into two pieces, and when we try to send the
+                * second piece it may be collapsed together with
+                * a subsequent packet, and so on.  -DaveM
+                */
+               if (old_next_skb != skb || skb->len > mss)
+                       resend_skb = 1;
+               old_next_skb = skb->next;
+               if (resend_skb != 0)
+                       tcp_retransmit_skb(sk, skb);
+       }
+}
+
+static __inline__ void update_retrans_head(struct sock *sk)
+{
+       struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
+       
+       tp->retrans_head = tp->retrans_head->next;
+       if((tp->retrans_head == tp->send_head) ||
+          (tp->retrans_head == (struct sk_buff *) &sk->write_queue)) {
+               tp->retrans_head = NULL;
+               tp->rexmt_done = 1;
+       }
+}
+
+/* This retransmits one SKB.  Policy decisions and retransmit queue
+ * state updates are done by the caller.  Returns non-zero if an
+ * error occurred which prevented the send.
+ */
+int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
+{
+       struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
+       unsigned int cur_mss = tcp_current_mss(sk);
+
+       if(skb->len > cur_mss) {
+               if(tcp_fragment(sk, skb, cur_mss))
+                       return 1; /* We'll try again later. */
+
+               /* New SKB created, account for it. */
+               tp->packets_out++;
+       }
+
+       /* Collapse two adjacent packets if worthwhile and we can. */
+       if(!(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_SYN) &&
+          (skb->len < (cur_mss >> 1)) &&
+          (skb->next != tp->send_head) &&
+          (skb->next != (struct sk_buff *)&sk->write_queue) &&
+          (sysctl_tcp_retrans_collapse != 0))
+               tcp_retrans_try_collapse(sk, skb, cur_mss);
+
+       if(tp->af_specific->rebuild_header(sk))
+               return 1; /* Routing failure or similar. */
+
+       /* Some Solaris stacks overoptimize and ignore the FIN on a
+        * retransmit when old data is attached.  So strip it off
+        * since it is cheap to do so and saves bytes on the network.
+        */
+       if(skb->len > 0 &&
+          (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) &&
+          tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) {
+               TCP_SKB_CB(skb)->seq = TCP_SKB_CB(skb)->end_seq - 1;
+               skb_trim(skb, 0);
+               skb->csum = 0;
+       }
+
+       /* Ok, we're gonna send it out, update state. */
+       TCP_SKB_CB(skb)->sacked |= TCPCB_SACKED_RETRANS;
+       tp->retrans_out++;
+
+       /* Make a copy, if the first transmission SKB clone we made
+        * is still in somebody's hands, else make a clone.
+        */
+       TCP_SKB_CB(skb)->when = tcp_time_stamp;
+       if(skb_cloned(skb))
+               skb = skb_copy(skb, GFP_ATOMIC);
+       else
+               skb = skb_clone(skb, GFP_ATOMIC);
+
+       tcp_transmit_skb(sk, skb);
+
+       /* Update global TCP statistics and return success. */
+       sk->prot->retransmits++;
+       tcp_statistics.TcpRetransSegs++;
+
+       return 0;
+}
+
+/* This gets called after a retransmit timeout, and the initially
+ * retransmitted data is acknowledged.  It tries to continue
+ * resending the rest of the retransmit queue, until either
+ * we've sent it all or the congestion window limit is reached.
+ * If doing SACK, the first ACK which comes back for a timeout
+ * based retransmit packet might feed us FACK information again.
+ * If so, we use it to avoid unnecessarily retransmissions.
+ */
+void tcp_xmit_retransmit_queue(struct sock *sk)
+{
+       struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
+       struct sk_buff *skb;
+
+       if (tp->retrans_head == NULL &&
+           tp->rexmt_done == 0)
+               tp->retrans_head = skb_peek(&sk->write_queue);
+       if (tp->retrans_head == tp->send_head)
+               tp->retrans_head = NULL;
+
+       /* Each time, advance the retrans_head if we got
+        * a packet out or we skipped one because it was
+        * SACK'd.  -DaveM
+        */
+       while ((skb = tp->retrans_head) != NULL) {
+               /* If it has been ack'd by a SACK block, we don't
+                * retransmit it.
+                */
+               if(!(TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED)) {
+                       /* Send it out, punt if error occurred. */
+                       if(tcp_retransmit_skb(sk, skb))
+                               break;
+
+                       update_retrans_head(sk);
+               
+                       /* Stop retransmitting if we've hit the congestion
+                        * window limit.
+                        */
+                       if (tp->retrans_out >= tp->snd_cwnd)
+                               break;
+               } else {
+                       update_retrans_head(sk);
+               }
+       }
+}
+
+/* Using FACK information, retransmit all missing frames at the receiver
+ * up to the forward most SACK'd packet (tp->fackets_out) if the packet
+ * has not been retransmitted already.
+ */
+void tcp_fack_retransmit(struct sock *sk)
+{
+       struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
+       struct sk_buff *skb = skb_peek(&sk->write_queue);
+       int packet_cnt = 0;
+
+       while((skb != NULL) &&
+             (skb != tp->send_head) &&
+             (skb != (struct sk_buff *)&sk->write_queue)) {
+               __u8 sacked = TCP_SKB_CB(skb)->sacked;
+
+               if(sacked & (TCPCB_SACKED_ACKED | TCPCB_SACKED_RETRANS))
+                       goto next_packet;
+
+               /* Ok, retransmit it. */
+               if(tcp_retransmit_skb(sk, skb))
+                       break;
+
+               if(tcp_packets_in_flight(tp) >= tp->snd_cwnd)
+                       break;
+next_packet:
+               packet_cnt++;
+               if(packet_cnt >= tp->fackets_out)
+                       break;
+               skb = skb->next;
+       }
+}
+
+/* Send a fin.  The caller locks the socket for us.  This cannot be
+ * allowed to fail queueing a FIN frame under any circumstances.
+ */
+void tcp_send_fin(struct sock *sk)
+{
+       struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);    
+       struct sk_buff *skb = skb_peek_tail(&sk->write_queue);
+       unsigned int mss_now;
+       
+       /* Optimization, tack on the FIN if we have a queue of
+        * unsent frames.  But be careful about outgoing SACKS
+        * and IP options.
+        */
+       mss_now = tcp_current_mss(sk); 
+
+       if((tp->send_head != NULL) && (skb->len < mss_now)) {
+               /* tcp_write_xmit() takes care of the rest. */
+               TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_FIN;
+               TCP_SKB_CB(skb)->end_seq++;
+               tp->write_seq++;
+
+               /* Special case to avoid Nagle bogosity.  If this
+                * segment is the last segment, and it was queued
+                * due to Nagle/SWS-avoidance, send it out now.
+                */
+               if(tp->send_head == skb &&
+                  !sk->nonagle &&
+                  skb->len < (tp->rcv_mss >> 1) &&
+                  tp->packets_out &&
+                  !(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_URG)) {
+                       update_send_head(sk);
+                       TCP_SKB_CB(skb)->when = tcp_time_stamp;
+                       tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
+                       tp->packets_out++;
+                       tcp_transmit_skb(sk, skb_clone(skb, GFP_ATOMIC));
+                       if(!tcp_timer_is_set(sk, TIME_RETRANS))
+                               tcp_reset_xmit_timer(sk, TIME_RETRANS, tp->rto);
+               }
+       } else {
+               /* Socket is locked, keep trying until memory is available. */
+               do {
+                       skb = sock_wmalloc(sk,
+                                          (MAX_HEADER +
+                                           sk->prot->max_header),
+                                          1, GFP_KERNEL);
+               } while (skb == NULL);
+
+               /* Reserve space for headers and prepare control bits. */
+               skb_reserve(skb, MAX_HEADER + sk->prot->max_header);
+               skb->csum = 0;
+               TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_FIN);
+               TCP_SKB_CB(skb)->sacked = 0;
+               TCP_SKB_CB(skb)->urg_ptr = 0;
+
+               /* FIN eats a sequence byte, write_seq advanced by tcp_send_skb(). */
+               TCP_SKB_CB(skb)->seq = tp->write_seq;
+               TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1;
+               tcp_send_skb(sk, skb, 0);
+       }
+}
+
+/* We get here when a process closes a file descriptor (either due to
+ * an explicit close() or as a byproduct of exit()'ing) and there
+ * was unread data in the receive queue.  This behavior is recommended
+ * by draft-ietf-tcpimpl-prob-03.txt section 3.10.  -DaveM
+ */
+void tcp_send_active_reset(struct sock *sk, int priority)
+{
+       struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
+       struct sk_buff *skb;
+
+       /* NOTE: No TCP options attached and we never retransmit this. */
+       skb = alloc_skb(MAX_HEADER + sk->prot->max_header, priority);
+       if (!skb)
+               return;
+
+       /* Reserve space for headers and prepare control bits. */
+       skb_reserve(skb, MAX_HEADER + sk->prot->max_header);
+       skb->csum = 0;
+       TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_RST);
+       TCP_SKB_CB(skb)->sacked = 0;
+       TCP_SKB_CB(skb)->urg_ptr = 0;
+
+       /* Send it off. */
+       TCP_SKB_CB(skb)->seq = tp->write_seq;
+       TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq;
+       TCP_SKB_CB(skb)->when = tcp_time_stamp;
+       tcp_transmit_skb(sk, skb);
+}
+
+/* WARNING: This routine must only be called when we have already sent
+ * a SYN packet that crossed the incoming SYN that caused this routine
+ * to get called. If this assumption fails then the initial rcv_wnd
+ * and rcv_wscale values will not be correct.
+ */
+int tcp_send_synack(struct sock *sk)
+{
+       struct tcp_opt* tp = &(sk->tp_pinfo.af_tcp);
+       struct sk_buff* skb;    
+
+       skb = sock_wmalloc(sk, (MAX_HEADER + sk->prot->max_header),
+                          1, GFP_ATOMIC);
+       if (skb == NULL) 
+               return -ENOMEM;
+
+       /* Reserve space for headers and prepare control bits. */
+       skb_reserve(skb, MAX_HEADER + sk->prot->max_header);
+       skb->csum = 0;
+       TCP_SKB_CB(skb)->flags = (TCPCB_FLAG_ACK | TCPCB_FLAG_SYN);
+       TCP_SKB_CB(skb)->sacked = 0;
+       TCP_SKB_CB(skb)->urg_ptr = 0;
+
+       /* SYN eats a sequence byte. */
+       TCP_SKB_CB(skb)->seq = tp->snd_una;
+       TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1;
+       __skb_queue_tail(&sk->write_queue, skb);
+       TCP_SKB_CB(skb)->when = tcp_time_stamp;
+       tp->packets_out++;
+       tcp_transmit_skb(sk, skb_clone(skb, GFP_ATOMIC));
+       return 0;
+}
+
+/*
+ * Prepare a SYN-ACK.
+ */
+struct sk_buff * tcp_make_synack(struct sock *sk, struct dst_entry *dst,
+                                struct open_request *req)
+{
+       struct tcphdr *th;
+       int tcp_header_size;
+       struct sk_buff *skb;
+
+       skb = sock_wmalloc(sk, MAX_HEADER + sk->prot->max_header, 1, GFP_ATOMIC);
+       if (skb == NULL)
+               return NULL;
+
+       /* Reserve space for headers. */
+       skb_reserve(skb, MAX_HEADER + sk->prot->max_header);
+
+       skb->dst = dst_clone(dst);
+
+       tcp_header_size = (sizeof(struct tcphdr) + TCPOLEN_MSS +
+                          (req->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0) +
+                          (req->wscale_ok ? TCPOLEN_WSCALE_ALIGNED : 0) +
+                          /* SACK_PERM is in the place of NOP NOP of TS */
+                          ((req->sack_ok && !req->tstamp_ok) ? TCPOLEN_SACKPERM_ALIGNED : 0));
+       skb->h.th = th = (struct tcphdr *) skb_push(skb, tcp_header_size);
+
+       memset(th, 0, sizeof(struct tcphdr));
+       th->syn = 1;
+       th->ack = 1;
+       th->source = sk->sport;
+       th->dest = req->rmt_port;
+       TCP_SKB_CB(skb)->seq = req->snt_isn;
+       TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1;
+       th->seq = htonl(TCP_SKB_CB(skb)->seq);
+       th->ack_seq = htonl(req->rcv_isn + 1);
+       if (req->rcv_wnd == 0) { /* ignored for retransmitted syns */
+               __u8 rcv_wscale; 
+               /* Set this up on the first call only */
+               req->window_clamp = skb->dst->window;
+               /* tcp_full_space because it is guaranteed to be the first packet */
+               tcp_select_initial_window(tcp_full_space(sk), 
+                       dst->advmss - (req->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
+                       &req->rcv_wnd,
+                       &req->window_clamp,
+                       req->wscale_ok,
+                       &rcv_wscale);
+               req->rcv_wscale = rcv_wscale; 
+       }
+
+       /* RFC1323: The window in SYN & SYN/ACK segments is never scaled. */
+       th->window = htons(req->rcv_wnd);
+
+       TCP_SKB_CB(skb)->when = tcp_time_stamp;
+       tcp_syn_build_options((__u32 *)(th + 1), dst->advmss, req->tstamp_ok,
+                             req->sack_ok, req->wscale_ok, req->rcv_wscale,
+                             TCP_SKB_CB(skb)->when,
+                             req->ts_recent);
+
+       skb->csum = 0;
+       th->doff = (tcp_header_size >> 2);
+       tcp_statistics.TcpOutSegs++;
+       return skb;
+}
+
+int tcp_connect(struct sock *sk, struct sk_buff *buff)
+{
+       struct dst_entry *dst = __sk_dst_get(sk);
+       struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
+
+       /* Reserve space for headers. */
+       skb_reserve(buff, MAX_HEADER + sk->prot->max_header);
+
+       /* We'll fix this up when we get a response from the other end.
+        * See tcp_input.c:tcp_rcv_state_process case TCP_SYN_SENT.
+        */
+       tp->tcp_header_len = sizeof(struct tcphdr) +
+               (sysctl_tcp_timestamps ? TCPOLEN_TSTAMP_ALIGNED : 0);
+
+       /* If user gave his TCP_MAXSEG, record it to clamp */
+       if (tp->user_mss)
+               tp->mss_clamp = tp->user_mss;
+       tcp_sync_mss(sk, dst->pmtu);
+
+       tp->window_clamp = dst->window;
+
+       tcp_select_initial_window(tcp_full_space(sk),
+               dst->advmss - (tp->tcp_header_len - sizeof(struct tcphdr)),
+               &tp->rcv_wnd,
+               &tp->window_clamp,
+               sysctl_tcp_window_scaling,
+               &tp->rcv_wscale);
+
+       /* Socket identity change complete, no longer
+        * in TCP_CLOSE, so enter ourselves into the
+        * hash tables.
+        */
+       tcp_set_state(sk,TCP_SYN_SENT);
+       if (tp->af_specific->hash_connecting(sk))
+               goto err_out;
+
+       sk->err = 0;
+       tp->snd_wnd = 0;
+       tp->snd_wl1 = 0;
+       tp->snd_wl2 = tp->write_seq;
+       tp->snd_una = tp->write_seq;
+       tp->rcv_nxt = 0;
+       tp->rcv_wup = 0;
+       tp->copied_seq = 0;
+
+       tp->rto = TCP_TIMEOUT_INIT;
+       tcp_init_xmit_timers(sk);
+       tp->retransmits = 0;
+       tp->fackets_out = 0;
+       tp->retrans_out = 0;
+
+       TCP_SKB_CB(buff)->flags = TCPCB_FLAG_SYN;
+       TCP_SKB_CB(buff)->sacked = 0;
+       TCP_SKB_CB(buff)->urg_ptr = 0;
+       buff->csum = 0;
+       TCP_SKB_CB(buff)->seq = tp->write_seq++;
+       TCP_SKB_CB(buff)->end_seq = tp->write_seq;
+       tp->snd_nxt = tp->write_seq;
+
+       /* Send it off. */
+       TCP_SKB_CB(buff)->when = tcp_time_stamp;
+       __skb_queue_tail(&sk->write_queue, buff);
+       tp->packets_out++;
+       tcp_transmit_skb(sk, skb_clone(buff, GFP_KERNEL));
+       tcp_statistics.TcpActiveOpens++;
+
+       /* Timer for repeating the SYN until an answer. */
+       tcp_reset_xmit_timer(sk, TIME_RETRANS, tp->rto);
+       return 0;
+
+err_out:
+       tcp_set_state(sk,TCP_CLOSE);
+       kfree_skb(buff);
+       return -EADDRNOTAVAIL;
+}
+
+/* Send out a delayed ack, the caller does the policy checking
+ * to see if we should even be here.  See tcp_input.c:tcp_ack_snd_check()
+ * for details.
+ */
+void tcp_send_delayed_ack(struct sock *sk, int max_timeout)
+{
+       struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
+       unsigned long timeout;
+
+       /* Stay within the limit we were given */
+       timeout = tp->ato;
+       if (timeout > max_timeout)
+               timeout = max_timeout;
+       timeout += jiffies;
+
+       /* Use new timeout only if there wasn't a older one earlier. */
+       spin_lock_bh(&sk->timer_lock);
+       if (!tp->delack_timer.prev || !del_timer(&tp->delack_timer)) {
+               sock_hold(sk);
+               tp->delack_timer.expires = timeout;
+       } else {
+               if (time_before(timeout, tp->delack_timer.expires))
+                       tp->delack_timer.expires = timeout;
+       }
+       add_timer(&tp->delack_timer);
+       spin_unlock_bh(&sk->timer_lock);
+}
+
+/* This routine sends an ack and also updates the window. */
+void tcp_send_ack(struct sock *sk)
+{
+       /* If we have been reset, we may not send again. */
+       if(!sk->zapped) {
+               struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
+               struct sk_buff *buff;
+
+               /* We are not putting this on the write queue, so
+                * tcp_transmit_skb() will set the ownership to this
+                * sock.
+                */
+               buff = alloc_skb(MAX_HEADER + sk->prot->max_header, GFP_ATOMIC);
+               if (buff == NULL) {
+                       /* Force it to send an ack. We don't have to do this
+                        * (ACK is unreliable) but it's much better use of
+                        * bandwidth on slow links to send a spare ack than
+                        * resend packets.
+                        *
+                        * This is the one possible way that we can delay an
+                        * ACK and have tp->ato indicate that we are in
+                        * quick ack mode, so clear it.
+                        */
+                       if(tcp_in_quickack_mode(tp))
+                               tcp_exit_quickack_mode(tp);
+                       tcp_send_delayed_ack(sk, HZ/2);
+                       return;
+               }
+
+               /* Reserve space for headers and prepare control bits. */
+               skb_reserve(buff, MAX_HEADER + sk->prot->max_header);
+               buff->csum = 0;
+               TCP_SKB_CB(buff)->flags = TCPCB_FLAG_ACK;
+               TCP_SKB_CB(buff)->sacked = 0;
+               TCP_SKB_CB(buff)->urg_ptr = 0;
+
+               /* Send it off, this clears delayed acks for us. */
+               TCP_SKB_CB(buff)->seq = TCP_SKB_CB(buff)->end_seq = tp->snd_nxt;
+               TCP_SKB_CB(buff)->when = tcp_time_stamp;
+               tcp_transmit_skb(sk, buff);
+       }
+}
+
+/* This routine sends a packet with an out of date sequence
+ * number. It assumes the other end will try to ack it.
+ */
+void tcp_write_wakeup(struct sock *sk)
+{
+       /* After a valid reset we can send no more. */
+       if (!sk->zapped) {
+               struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
+               struct sk_buff *skb;
+
+               /* Write data can still be transmitted/retransmitted in the
+                * following states.  If any other state is encountered, return.
+                * [listen/close will never occur here anyway]
+                */
+               if ((1 << sk->state) &
+                   ~(TCPF_ESTABLISHED|TCPF_CLOSE_WAIT|TCPF_FIN_WAIT1|
+                     TCPF_FIN_WAIT2|TCPF_LAST_ACK|TCPF_CLOSING))
+                       return;
+
+               if (before(tp->snd_nxt, tp->snd_una + tp->snd_wnd) &&
+                   ((skb = tp->send_head) != NULL)) {
+                       unsigned long win_size;
+
+                       /* We are probing the opening of a window
+                        * but the window size is != 0
+                        * must have been a result SWS avoidance ( sender )
+                        */
+                       win_size = tp->snd_wnd - (tp->snd_nxt - tp->snd_una);
+                       if (win_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq) {
+                               if (tcp_fragment(sk, skb, win_size))
+                                       return; /* Let a retransmit get it. */
+                       }
+                       update_send_head(sk);
+                       TCP_SKB_CB(skb)->when = tcp_time_stamp;
+                       tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
+                       tp->packets_out++;
+                       tcp_transmit_skb(sk, skb_clone(skb, GFP_ATOMIC));
+                       if (!tcp_timer_is_set(sk, TIME_RETRANS))
+                               tcp_reset_xmit_timer(sk, TIME_RETRANS, tp->rto);
+               } else {
+                       /* We don't queue it, tcp_transmit_skb() sets ownership. */
+                       skb = alloc_skb(MAX_HEADER + sk->prot->max_header,
+                                       GFP_ATOMIC);
+                       if (skb == NULL) 
+                               return;
+
+                       /* Reserve space for headers and set control bits. */
+                       skb_reserve(skb, MAX_HEADER + sk->prot->max_header);
+                       skb->csum = 0;
+                       TCP_SKB_CB(skb)->flags = TCPCB_FLAG_ACK;
+                       TCP_SKB_CB(skb)->sacked = 0;
+                       TCP_SKB_CB(skb)->urg_ptr = 0;
+
+                       /* Use a previous sequence.  This should cause the other
+                        * end to send an ack.  Don't queue or clone SKB, just
+                        * send it.
+                        */
+                       TCP_SKB_CB(skb)->seq = tp->snd_nxt - 1;
+                       TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq;
+                       TCP_SKB_CB(skb)->when = tcp_time_stamp;
+                       tcp_transmit_skb(sk, skb);
+               }
+       }
+}
+
+/* A window probe timeout has occurred.  If window is not closed send
+ * a partial packet else a zero probe.
+ */
+void tcp_send_probe0(struct sock *sk)
+{
+       struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
+
+       tcp_write_wakeup(sk);
+       tp->pending = TIME_PROBE0;
+       tp->backoff++;
+       tp->probes_out++;
+       tcp_reset_xmit_timer (sk, TIME_PROBE0, 
+                             min(tp->rto << tp->backoff, 120*HZ));
+}
close