Import 1.3.63
[davej-history.git] / net / ipv4 / tcp_output.c
blobc8bf6ea6ddb192ab7c9e949a22851abc28cbb769
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * Version: @(#)tcp_input.c 1.0.16 05/25/93
10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 * Corey Minyard <wf-rch!minyard@relay.EU.net>
14 * Florian La Roche, <flla@stud.uni-sb.de>
15 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
16 * Linus Torvalds, <torvalds@cs.helsinki.fi>
17 * Alan Cox, <gw4pts@gw4pts.ampr.org>
18 * Matthew Dillon, <dillon@apollo.west.oic.com>
19 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
20 * Jorge Cwik, <jorge@laser.satlink.net>
23 #include <linux/config.h>
24 #include <net/tcp.h>
27 * This is the main buffer sending routine. We queue the buffer
28 * having checked it is sane seeming.
31 voidtcp_send_skb(struct sock *sk,struct sk_buff *skb)
33 int size;
34 struct tcphdr * th = skb->h.th;
37 * length of packet (not counting length of pre-tcp headers)
40 size = skb->len - ((unsigned char*) th - skb->data);
43 * Sanity check it..
46 if(size <sizeof(struct tcphdr) || size > skb->len)
48 printk("tcp_send_skb: bad skb (skb = %p, data = %p, th = %p, len = %lu)\n",
49 skb, skb->data, th, skb->len);
50 kfree_skb(skb, FREE_WRITE);
51 return;
55 * If we have queued a header size packet.. (these crash a few
56 * tcp stacks if ack is not set)
59 if(size ==sizeof(struct tcphdr))
61 /* If it's got a syn or fin it's notionally included in the size..*/
62 if(!th->syn && !th->fin)
64 printk("tcp_send_skb: attempt to queue a bogon.\n");
65 kfree_skb(skb,FREE_WRITE);
66 return;
71 * Actual processing.
74 tcp_statistics.TcpOutSegs++;
75 skb->seq =ntohl(th->seq);
76 skb->end_seq = skb->seq + size -4*th->doff;
79 * We must queue if
81 * a) The right edge of this frame exceeds the window
82 * b) We are retransmitting (Nagle's rule)
83 * c) We have too many packets 'in flight'
86 if(after(skb->end_seq, sk->window_seq) ||
87 (sk->retransmits && sk->ip_xmit_timeout == TIME_WRITE) ||
88 sk->packets_out >= sk->cong_window)
90 /* checksum will be supplied by tcp_write_xmit. So
91 * we shouldn't need to set it at all. I'm being paranoid */
92 th->check =0;
93 if(skb->next != NULL)
95 printk("tcp_send_partial: next != NULL\n");
96 skb_unlink(skb);
98 skb_queue_tail(&sk->write_queue, skb);
100 if(before(sk->window_seq, sk->write_queue.next->end_seq) &&
101 sk->send_head == NULL && sk->ack_backlog ==0)
102 tcp_reset_xmit_timer(sk, TIME_PROBE0, sk->rto);
104 else
107 * This is going straight out
110 th->ack_seq =htonl(sk->acked_seq);
111 th->window =htons(tcp_select_window(sk));
113 tcp_send_check(th, sk->saddr, sk->daddr, size, skb);
115 sk->sent_seq = sk->write_seq;
118 * This is mad. The tcp retransmit queue is put together
119 * by the ip layer. This causes half the problems with
120 * unroutable FIN's and other things.
123 sk->prot->queue_xmit(sk, skb->dev, skb,0);
126 sk->ack_backlog =0;
127 sk->bytes_rcv =0;
130 * Set for next retransmit based on expected ACK time.
131 * FIXME: We set this every time which means our
132 * retransmits are really about a window behind.
135 tcp_reset_xmit_timer(sk, TIME_WRITE, sk->rto);
140 * Locking problems lead us to a messy situation where we can have
141 * multiple partially complete buffers queued up. This is really bad
142 * as we don't want to be sending partial buffers. Fix this with
143 * a semaphore or similar to lock tcp_write per socket.
145 * These routines are pretty self descriptive.
148 struct sk_buff *tcp_dequeue_partial(struct sock * sk)
150 struct sk_buff * skb;
151 unsigned long flags;
153 save_flags(flags);
154 cli();
155 skb = sk->partial;
156 if(skb) {
157 sk->partial = NULL;
158 del_timer(&sk->partial_timer);
160 restore_flags(flags);
161 return skb;
165 * Empty the partial queue
168 voidtcp_send_partial(struct sock *sk)
170 struct sk_buff *skb;
172 if(sk == NULL)
173 return;
174 while((skb =tcp_dequeue_partial(sk)) != NULL)
175 tcp_send_skb(sk, skb);
179 * Queue a partial frame
182 voidtcp_enqueue_partial(struct sk_buff * skb,struct sock * sk)
184 struct sk_buff * tmp;
185 unsigned long flags;
187 save_flags(flags);
188 cli();
189 tmp = sk->partial;
190 if(tmp)
191 del_timer(&sk->partial_timer);
192 sk->partial = skb;
193 init_timer(&sk->partial_timer);
195 * Wait up to 1 second for the buffer to fill.
197 sk->partial_timer.expires = jiffies+HZ;
198 sk->partial_timer.function = (void(*)(unsigned long)) tcp_send_partial;
199 sk->partial_timer.data = (unsigned long) sk;
200 add_timer(&sk->partial_timer);
201 restore_flags(flags);
202 if(tmp)
203 tcp_send_skb(sk, tmp);
207 * This routine takes stuff off of the write queue,
208 * and puts it in the xmit queue. This happens as incoming acks
209 * open up the remote window for us.
212 voidtcp_write_xmit(struct sock *sk)
214 struct sk_buff *skb;
217 * The bytes will have to remain here. In time closedown will
218 * empty the write queue and all will be happy
221 if(sk->zapped)
222 return;
225 * Anything on the transmit queue that fits the window can
226 * be added providing we are not
228 * a) retransmitting (Nagle's rule)
229 * b) exceeding our congestion window.
232 while((skb =skb_peek(&sk->write_queue)) != NULL &&
233 before(skb->end_seq, sk->window_seq +1) &&
234 (sk->retransmits ==0||
235 sk->ip_xmit_timeout != TIME_WRITE ||
236 before(skb->end_seq, sk->rcv_ack_seq +1))
237 && sk->packets_out < sk->cong_window)
239 IS_SKB(skb);
240 skb_unlink(skb);
243 * See if we really need to send the packet.
246 if(before(skb->end_seq, sk->rcv_ack_seq +1))
249 * This is acked data. We can discard it. This
250 * cannot currently occur.
253 sk->retransmits =0;
254 kfree_skb(skb, FREE_WRITE);
255 if(!sk->dead)
256 sk->write_space(sk);
258 else
260 struct tcphdr *th;
261 struct iphdr *iph;
262 int size;
264 * put in the ack seq and window at this point rather than earlier,
265 * in order to keep them monotonic. We really want to avoid taking
266 * back window allocations. That's legal, but RFC1122 says it's frowned on.
267 * Ack and window will in general have changed since this packet was put
268 * on the write queue.
270 iph = skb->ip_hdr;
271 th = (struct tcphdr *)(((char*)iph) +(iph->ihl <<2));
272 size = skb->len - (((unsigned char*) th) - skb->data);
273 #ifndef CONFIG_NO_PATH_MTU_DISCOVERY
274 if(size > sk->mtu -sizeof(struct iphdr))
276 iph->frag_off &= ~htons(IP_DF);
277 ip_send_check(iph);
279 #endif
281 th->ack_seq =htonl(sk->acked_seq);
282 th->window =htons(tcp_select_window(sk));
284 tcp_send_check(th, sk->saddr, sk->daddr, size, skb);
286 sk->sent_seq = skb->end_seq;
289 * IP manages our queue for some crazy reason
292 sk->prot->queue_xmit(sk, skb->dev, skb, skb->free);
295 sk->ack_backlog =0;
296 sk->bytes_rcv =0;
299 * Again we slide the timer wrongly
302 tcp_reset_xmit_timer(sk, TIME_WRITE, sk->rto);
309 * A socket has timed out on its send queue and wants to do a
310 * little retransmitting. Currently this means TCP.
313 voidtcp_do_retransmit(struct sock *sk,int all)
315 struct sk_buff * skb;
316 struct proto *prot;
317 struct device *dev;
318 int ct=0;
319 struct rtable *rt;
321 prot = sk->prot;
322 skb = sk->send_head;
324 while(skb != NULL)
326 struct tcphdr *th;
327 struct iphdr *iph;
328 int size;
330 dev = skb->dev;
331 IS_SKB(skb);
332 skb->when = jiffies;
334 /* dl1bke 960201 - @%$$! Hope this cures strange race conditions */
335 /* with AX.25 mode VC. (esp. DAMA) */
336 /* if the buffer is locked we should not retransmit */
337 /* anyway, so we don't need all the fuss to prepare */
338 /* the buffer in this case. */
339 /* (the skb_pull() changes skb->data while we may */
340 /* actually try to send the data. Ouch. A side */
341 /* effect is that we'll send some unnecessary data, */
342 /* but the alternative is disasterous... */
344 if(skb_device_locked(skb))
345 break;
348 * Discard the surplus MAC header
351 skb_pull(skb,((unsigned char*)skb->ip_hdr)-skb->data);
354 * In general it's OK just to use the old packet. However we
355 * need to use the current ack and window fields. Urg and
356 * urg_ptr could possibly stand to be updated as well, but we
357 * don't keep the necessary data. That shouldn't be a problem,
358 * if the other end is doing the right thing. Since we're
359 * changing the packet, we have to issue a new IP identifier.
362 iph = (struct iphdr *)skb->data;
363 th = (struct tcphdr *)(((char*)iph) + (iph->ihl <<2));
364 size =ntohs(iph->tot_len) - (iph->ihl<<2);
367 * Note: We ought to check for window limits here but
368 * currently this is done (less efficiently) elsewhere.
372 * Put a MAC header back on (may cause ARPing)
376 /* ANK: UGLY, but the bug, that was here, should be fixed.
378 struct options * opt = (struct options*)skb->proto_priv;
379 rt =ip_check_route(&sk->ip_route_cache, opt->srr?opt->faddr:iph->daddr, skb->localroute);
382 iph->id =htons(ip_id_count++);
383 #ifndef CONFIG_NO_PATH_MTU_DISCOVERY
384 if(rt &&ntohs(iph->tot_len) > rt->rt_mtu)
385 iph->frag_off &= ~htons(IP_DF);
386 #endif
387 ip_send_check(iph);
389 if(rt==NULL)/* Deep poo */
391 if(skb->sk)
393 skb->sk->err_soft=ENETUNREACH;
394 skb->sk->error_report(skb->sk);
397 else
399 dev=rt->rt_dev;
400 skb->raddr=rt->rt_gateway;
401 skb->dev=dev;
402 skb->arp=1;
403 if(rt->rt_hh)
405 memcpy(skb_push(skb,dev->hard_header_len),rt->rt_hh->hh_data,dev->hard_header_len);
406 if(!rt->rt_hh->hh_uptodate)
408 skb->arp =0;
409 #if RT_CACHE_DEBUG >= 2
410 printk("tcp_do_retransmit: hh miss %08x via %08x\n", iph->daddr, rt->rt_gateway);
411 #endif
414 else if(dev->hard_header)
416 if(dev->hard_header(skb, dev, ETH_P_IP, NULL, NULL, skb->len)<0)
417 skb->arp=0;
421 * This is not the right way to handle this. We have to
422 * issue an up to date window and ack report with this
423 * retransmit to keep the odd buggy tcp that relies on
424 * the fact BSD does this happy.
425 * We don't however need to recalculate the entire
426 * checksum, so someone wanting a small problem to play
427 * with might like to implement RFC1141/RFC1624 and speed
428 * this up by avoiding a full checksum.
431 th->ack_seq =htonl(sk->acked_seq);
432 sk->ack_backlog =0;
433 sk->bytes_rcv =0;
434 th->window =ntohs(tcp_select_window(sk));
435 tcp_send_check(th, sk->saddr, sk->daddr, size, skb);
438 * If the interface is (still) up and running, kick it.
441 if(dev->flags & IFF_UP)
444 * If the packet is still being sent by the device/protocol
445 * below then don't retransmit. This is both needed, and good -
446 * especially with connected mode AX.25 where it stops resends
447 * occurring of an as yet unsent anyway frame!
448 * We still add up the counts as the round trip time wants
449 * adjusting.
451 if(sk && !skb_device_locked(skb))
453 /* Remove it from any existing driver queue first! */
454 skb_unlink(skb);
455 /* Now queue it */
456 ip_statistics.IpOutRequests++;
457 dev_queue_xmit(skb, dev, sk->priority);
463 * Count retransmissions
466 ct++;
467 sk->prot->retransmits ++;
468 tcp_statistics.TcpRetransSegs++;
472 * Only one retransmit requested.
475 if(!all)
476 break;
479 * This should cut it off before we send too many packets.
482 if(ct >= sk->cong_window)
483 break;
484 skb = skb->link3;
489 * This routine will send an RST to the other tcp.
492 voidtcp_send_reset(unsigned long saddr,unsigned long daddr,struct tcphdr *th,
493 struct proto *prot,struct options *opt,struct device *dev,int tos,int ttl)
495 struct sk_buff *buff;
496 struct tcphdr *t1;
497 int tmp;
498 struct device *ndev=NULL;
501 * Cannot reset a reset (Think about it).
504 if(th->rst)
505 return;
508 * We need to grab some memory, and put together an RST,
509 * and then put it into the queue to be sent.
512 buff =sock_wmalloc(NULL, MAX_RESET_SIZE,1, GFP_ATOMIC);
513 if(buff == NULL)
514 return;
516 buff->sk = NULL;
517 buff->dev = dev;
518 buff->localroute =0;
519 buff->csum =0;
522 * Put in the IP header and routing stuff.
525 tmp = prot->build_header(buff, saddr, daddr, &ndev, IPPROTO_TCP, opt,
526 sizeof(struct tcphdr),tos,ttl,NULL);
527 if(tmp <0)
529 buff->free =1;
530 sock_wfree(NULL, buff);
531 return;
534 t1 =(struct tcphdr *)skb_put(buff,sizeof(struct tcphdr));
535 memcpy(t1, th,sizeof(*t1));
538 * Swap the send and the receive.
541 t1->dest = th->source;
542 t1->source = th->dest;
543 t1->syn =0;
544 t1->fin =0;
545 t1->urg =0;
546 t1->rst =1;
547 t1->psh =0;
549 if(th->ack)
551 t1->ack =0;
552 t1->seq = th->ack_seq;
554 else
556 if(!th->syn)
557 t1->ack_seq = th->seq;
558 else
559 t1->ack_seq =htonl(ntohl(th->seq)+1);
562 tcp_send_check(t1, saddr, daddr,sizeof(*t1), buff);
563 prot->queue_xmit(NULL, ndev, buff,1);
564 tcp_statistics.TcpOutSegs++;
568 * Send a fin.
571 voidtcp_send_fin(struct sock *sk)
573 struct proto *prot =(struct proto *)sk->prot;
574 struct tcphdr *th =(struct tcphdr *)&sk->dummy_th;
575 struct tcphdr *t1;
576 struct sk_buff *buff;
577 struct device *dev=NULL;
578 int tmp;
580 release_sock(sk);/* in case the malloc sleeps. */
582 buff =sock_wmalloc(sk, MAX_RESET_SIZE,1, GFP_KERNEL);
583 sk->inuse =1;
585 if(buff == NULL)
587 /* This is a disaster if it occurs */
588 printk("tcp_send_fin: Impossible malloc failure");
589 return;
593 * Administrivia
596 buff->sk = sk;
597 buff->localroute = sk->localroute;
598 buff->csum =0;
601 * Put in the IP header and routing stuff.
604 tmp = prot->build_header(buff,sk->saddr, sk->daddr, &dev,
605 IPPROTO_TCP, sk->opt,
606 sizeof(struct tcphdr),sk->ip_tos,sk->ip_ttl,&sk->ip_route_cache);
607 if(tmp <0)
609 int t;
611 * Finish anyway, treat this as a send that got lost.
612 * (Not good).
615 buff->free =1;
616 sock_wfree(sk,buff);
617 sk->write_seq++;
618 t=del_timer(&sk->timer);
619 if(t)
620 add_timer(&sk->timer);
621 else
622 tcp_reset_msl_timer(sk, TIME_CLOSE, TCP_TIMEWAIT_LEN);
623 return;
627 * We ought to check if the end of the queue is a buffer and
628 * if so simply add the fin to that buffer, not send it ahead.
631 t1 =(struct tcphdr *)skb_put(buff,sizeof(struct tcphdr));
632 buff->dev = dev;
633 memcpy(t1, th,sizeof(*t1));
634 buff->seq = sk->write_seq;
635 sk->write_seq++;
636 buff->end_seq = sk->write_seq;
637 t1->seq =htonl(buff->seq);
638 t1->ack_seq =htonl(sk->acked_seq);
639 t1->window =htons(sk->window=tcp_select_window(sk));
640 t1->fin =1;
641 tcp_send_check(t1, sk->saddr, sk->daddr,sizeof(*t1), buff);
644 * If there is data in the write queue, the fin must be appended to
645 * the write queue.
648 if(skb_peek(&sk->write_queue) != NULL)
650 buff->free =0;
651 if(buff->next != NULL)
653 printk("tcp_send_fin: next != NULL\n");
654 skb_unlink(buff);
656 skb_queue_tail(&sk->write_queue, buff);
658 else
660 sk->sent_seq = sk->write_seq;
661 sk->prot->queue_xmit(sk, dev, buff,0);
662 tcp_reset_xmit_timer(sk, TIME_WRITE, sk->rto);
667 voidtcp_send_synack(struct sock * newsk,struct sock * sk,struct sk_buff * skb)
669 struct tcphdr *t1;
670 unsigned char*ptr;
671 struct sk_buff * buff;
672 struct device *ndev=NULL;
673 int tmp;
675 buff =sock_wmalloc(newsk, MAX_SYN_SIZE,1, GFP_ATOMIC);
676 if(buff == NULL)
678 sk->err = ENOMEM;
679 newsk->dead =1;
680 newsk->state = TCP_CLOSE;
681 /* And this will destroy it */
682 release_sock(newsk);
683 kfree_skb(skb, FREE_READ);
684 tcp_statistics.TcpAttemptFails++;
685 return;
688 buff->sk = newsk;
689 buff->localroute = newsk->localroute;
692 * Put in the IP header and routing stuff.
695 tmp = sk->prot->build_header(buff, newsk->saddr, newsk->daddr, &ndev,
696 IPPROTO_TCP, NULL, MAX_SYN_SIZE,sk->ip_tos,sk->ip_ttl,&newsk->ip_route_cache);
699 * Something went wrong.
702 if(tmp <0)
704 sk->err = tmp;
705 buff->free =1;
706 kfree_skb(buff,FREE_WRITE);
707 newsk->dead =1;
708 newsk->state = TCP_CLOSE;
709 release_sock(newsk);
710 skb->sk = sk;
711 kfree_skb(skb, FREE_READ);
712 tcp_statistics.TcpAttemptFails++;
713 return;
716 t1 =(struct tcphdr *)skb_put(buff,sizeof(struct tcphdr));
718 memcpy(t1, skb->h.th,sizeof(*t1));
719 buff->seq = newsk->write_seq++;
720 buff->end_seq = newsk->write_seq;
722 * Swap the send and the receive.
724 t1->dest = skb->h.th->source;
725 t1->source = newsk->dummy_th.source;
726 t1->seq =ntohl(buff->seq);
727 newsk->sent_seq = newsk->write_seq;
728 t1->window =ntohs(tcp_select_window(newsk));
729 t1->syn =1;
730 t1->ack =1;
731 t1->urg =0;
732 t1->rst =0;
733 t1->psh =0;
734 t1->ack_seq =htonl(newsk->acked_seq);
735 t1->doff =sizeof(*t1)/4+1;
736 ptr =skb_put(buff,4);
737 ptr[0] =2;
738 ptr[1] =4;
739 ptr[2] = ((newsk->mtu) >>8) &0xff;
740 ptr[3] =(newsk->mtu) &0xff;
741 buff->csum =csum_partial(ptr,4,0);
742 tcp_send_check(t1, newsk->saddr, newsk->daddr,sizeof(*t1)+4, buff);
743 newsk->prot->queue_xmit(newsk, ndev, buff,0);
744 tcp_reset_xmit_timer(newsk, TIME_WRITE , TCP_TIMEOUT_INIT);
745 skb->sk = newsk;
748 * Charge the sock_buff to newsk.
751 sk->rmem_alloc -= skb->truesize;
752 newsk->rmem_alloc += skb->truesize;
754 skb_queue_tail(&sk->receive_queue,skb);
755 sk->ack_backlog++;
756 release_sock(newsk);
757 tcp_statistics.TcpOutSegs++;
761 * This routine sends an ack and also updates the window.
764 voidtcp_send_ack(u32 sequence, u32 ack,
765 struct sock *sk,
766 struct tcphdr *th, u32 daddr)
768 struct sk_buff *buff;
769 struct tcphdr *t1;
770 struct device *dev = NULL;
771 int tmp;
773 if(sk->zapped)
774 return;/* We have been reset, we may not send again */
777 * We need to grab some memory, and put together an ack,
778 * and then put it into the queue to be sent.
781 buff =sock_wmalloc(sk, MAX_ACK_SIZE,1, GFP_ATOMIC);
782 if(buff == NULL)
785 * Force it to send an ack. We don't have to do this
786 * (ACK is unreliable) but it's much better use of
787 * bandwidth on slow links to send a spare ack than
788 * resend packets.
791 sk->ack_backlog++;
792 if(sk->ip_xmit_timeout != TIME_WRITE &&tcp_connected(sk->state))
794 tcp_reset_xmit_timer(sk, TIME_WRITE, HZ);
796 return;
800 * Assemble a suitable TCP frame
803 buff->sk = sk;
804 buff->localroute = sk->localroute;
805 buff->csum =0;
808 * Put in the IP header and routing stuff.
811 tmp = sk->prot->build_header(buff, sk->saddr, daddr, &dev,
812 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE,sk->ip_tos,sk->ip_ttl,&sk->ip_route_cache);
813 if(tmp <0)
815 buff->free =1;
816 sock_wfree(sk, buff);
817 return;
819 t1 =(struct tcphdr *)skb_put(buff,sizeof(struct tcphdr));
821 memcpy(t1, &sk->dummy_th,sizeof(*t1));
824 * Swap the send and the receive.
827 t1->dest = th->source;
828 t1->source = th->dest;
829 t1->seq =ntohl(sequence);
830 sk->window =tcp_select_window(sk);
831 t1->window =ntohs(sk->window);
834 * If we have nothing queued for transmit and the transmit timer
835 * is on we are just doing an ACK timeout and need to switch
836 * to a keepalive.
839 if(ack == sk->acked_seq) {
840 sk->ack_backlog =0;
841 sk->bytes_rcv =0;
842 sk->ack_timed =0;
844 if(sk->send_head == NULL &&skb_peek(&sk->write_queue) == NULL
845 && sk->ip_xmit_timeout == TIME_WRITE)
846 if(sk->keepopen)
847 tcp_reset_xmit_timer(sk,TIME_KEEPOPEN,TCP_TIMEOUT_LEN);
848 else
849 delete_timer(sk);
853 * Fill in the packet and send it
856 t1->ack_seq =htonl(ack);
857 tcp_send_check(t1, sk->saddr, daddr,sizeof(*t1), buff);
858 if(sk->debug)
859 printk("\rtcp_ack: seq %x ack %x\n", sequence, ack);
860 sk->prot->queue_xmit(sk, dev, buff,1);
861 tcp_statistics.TcpOutSegs++;
865 * This routine sends a packet with an out of date sequence
866 * number. It assumes the other end will try to ack it.
869 voidtcp_write_wakeup(struct sock *sk)
871 struct sk_buff *buff,*skb;
872 struct tcphdr *t1;
873 struct device *dev=NULL;
874 int tmp;
876 if(sk->zapped)
877 return;/* After a valid reset we can send no more */
880 * Write data can still be transmitted/retransmitted in the
881 * following states. If any other state is encountered, return.
882 * [listen/close will never occur here anyway]
885 if(sk->state != TCP_ESTABLISHED &&
886 sk->state != TCP_CLOSE_WAIT &&
887 sk->state != TCP_FIN_WAIT1 &&
888 sk->state != TCP_LAST_ACK &&
889 sk->state != TCP_CLOSING
892 return;
894 if(before(sk->sent_seq, sk->window_seq) &&
895 (skb=skb_peek(&sk->write_queue)))
898 * We are probing the opening of a window
899 * but the window size is != 0
900 * must have been a result SWS advoidance ( sender )
903 struct iphdr *iph;
904 struct tcphdr *th;
905 struct tcphdr *nth;
906 unsigned long win_size;
907 #if 0
908 unsigned long ow_size;
909 #endif
912 * How many bytes can we send ?
915 win_size = sk->window_seq - sk->sent_seq;
918 * Recover the buffer pointers
921 iph = (struct iphdr *)skb->ip_hdr;
922 th = (struct tcphdr *)(((char*)iph) +(iph->ihl <<2));
925 * Grab the data for a temporary frame
928 buff =sock_wmalloc(sk, win_size + th->doff *4+
929 (iph->ihl <<2) +
930 sk->prot->max_header +15,
931 1, GFP_ATOMIC);
932 if( buff == NULL )
933 return;
936 * If we strip the packet on the write queue we must
937 * be ready to retransmit this one
940 buff->free =/*0*/1;
942 buff->sk = sk;
943 buff->localroute = sk->localroute;
946 * Put headers on the new packet
949 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
950 IPPROTO_TCP, sk->opt, buff->truesize,
951 sk->ip_tos,sk->ip_ttl,&sk->ip_route_cache);
952 if(tmp <0)
954 sock_wfree(sk, buff);
955 return;
959 * Move the TCP header over
962 buff->dev = dev;
964 nth = (struct tcphdr *)skb_put(buff,sizeof(*th));
966 memcpy(nth, th,sizeof(*th));
969 * Correct the new header
972 nth->ack =1;
973 nth->ack_seq =htonl(sk->acked_seq);
974 nth->window =htons(tcp_select_window(sk));
975 nth->check =0;
978 * Copy TCP options and data start to our new buffer
981 buff->csum =csum_partial_copy((void*)(th +1),skb_put(buff,win_size),
982 win_size + th->doff*4-sizeof(*th),0);
985 * Remember our right edge sequence number.
988 buff->end_seq = sk->sent_seq + win_size;
989 sk->sent_seq = buff->end_seq;/* Hack */
990 if(th->urg &&ntohs(th->urg_ptr) < win_size)
991 nth->urg =0;
994 * Checksum the split buffer
997 tcp_send_check(nth, sk->saddr, sk->daddr,
998 nth->doff *4+ win_size , buff);
1000 else
1002 buff =sock_wmalloc(sk,MAX_ACK_SIZE,1, GFP_ATOMIC);
1003 if(buff == NULL)
1004 return;
1006 buff->free =1;
1007 buff->sk = sk;
1008 buff->localroute = sk->localroute;
1009 buff->csum =0;
1012 * Put in the IP header and routing stuff.
1015 tmp = sk->prot->build_header(buff, sk->saddr, sk->daddr, &dev,
1016 IPPROTO_TCP, sk->opt, MAX_ACK_SIZE,sk->ip_tos,sk->ip_ttl,&sk->ip_route_cache);
1017 if(tmp <0)
1019 sock_wfree(sk, buff);
1020 return;
1023 t1 = (struct tcphdr *)skb_put(buff,sizeof(struct tcphdr));
1024 memcpy(t1,(void*) &sk->dummy_th,sizeof(*t1));
1027 * Use a previous sequence.
1028 * This should cause the other end to send an ack.
1031 t1->seq =htonl(sk->sent_seq-1);
1032 /* t1->fin = 0; -- We are sending a 'previous' sequence, and 0 bytes of data - thus no FIN bit */
1033 t1->ack_seq =htonl(sk->acked_seq);
1034 t1->window =htons(tcp_select_window(sk));
1035 tcp_send_check(t1, sk->saddr, sk->daddr,sizeof(*t1), buff);
1040 * Send it.
1043 sk->prot->queue_xmit(sk, dev, buff,1);
1044 tcp_statistics.TcpOutSegs++;
1048 * A window probe timeout has occurred.
1051 voidtcp_send_probe0(struct sock *sk)
1053 if(sk->zapped)
1054 return;/* After a valid reset we can send no more */
1056 tcp_write_wakeup(sk);
1058 sk->backoff++;
1059 sk->rto =min(sk->rto <<1,120*HZ);
1060 sk->retransmits++;
1061 sk->prot->retransmits ++;
1062 tcp_reset_xmit_timer(sk, TIME_PROBE0, sk->rto);
close