2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 6 * Implementation of the Transmission Control Protocol(TCP). 8 * Version: @(#)tcp_input.c 1.0.16 05/25/93 10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu> 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 12 * Mark Evans, <evansmp@uhura.aston.ac.uk> 13 * Corey Minyard <wf-rch!minyard@relay.EU.net> 14 * Florian La Roche, <flla@stud.uni-sb.de> 15 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> 16 * Linus Torvalds, <torvalds@cs.helsinki.fi> 17 * Alan Cox, <gw4pts@gw4pts.ampr.org> 18 * Matthew Dillon, <dillon@apollo.west.oic.com> 19 * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 20 * Jorge Cwik, <jorge@laser.satlink.net> 24 * Changes: Pedro Roque : Retransmit queue handled by TCP. 25 * : Fragmentation on mtu decrease 26 * : Segment collapse on retransmit 29 * Linus Torvalds : send_delayed_ack 36 * Get rid of any delayed acks, we sent one already.. 38 static __inline__
voidclear_delayed_acks(struct sock
* sk
) 43 tcp_clear_xmit_timer(sk
, TIME_DACK
); 46 static __inline__
voidupdate_send_head(struct sock
*sk
) 48 struct tcp_opt
*tp
= &sk
->tp_pinfo
.af_tcp
; 50 tp
->send_head
= tp
->send_head
->next
; 52 if(tp
->send_head
== (struct sk_buff
*) &sk
->write_queue
) 59 static __inline__
inttcp_snd_test(struct sock
*sk
,struct sk_buff
*skb
) 61 struct tcp_opt
*tp
= &(sk
->tp_pinfo
.af_tcp
); 66 * RFC 1122 - section 4.2.3.4 70 * a) The right edge of this frame exceeds the window 71 * b) There are packets in flight and we have a small segment 72 * [SWS avoidance and Nagle algorithm] 73 * (part of SWS is done on packetization) 74 * c) We are retransmiting [Nagle] 75 * d) We have too many packets 'in flight' 78 len
= skb
->end_seq
- skb
->seq
; 80 if(!sk
->nonagle
&& len
< (sk
->mss
>>1) && sk
->packets_out
) 85 return(nagle_check
&& sk
->packets_out
< sk
->cong_window
&& 86 !after(skb
->end_seq
, tp
->snd_una
+ tp
->snd_wnd
) && 91 * This is the main buffer sending routine. We queue the buffer 92 * having checked it is sane seeming. 95 inttcp_send_skb(struct sock
*sk
,struct sk_buff
*skb
) 97 struct tcphdr
* th
= skb
->h
.th
; 98 struct tcp_opt
*tp
= &(sk
->tp_pinfo
.af_tcp
); 102 * length of packet (not counting length of pre-tcp headers) 105 size
= skb
->len
- ((unsigned char*) th
- skb
->data
); 111 if(size
<sizeof(struct tcphdr
) || size
> skb
->len
) 113 printk("tcp_send_skb: bad skb (skb = %p, data = %p, th = %p, len = %u)\n", 114 skb
, skb
->data
, th
, skb
->len
); 115 kfree_skb(skb
, FREE_WRITE
); 120 * If we have queued a header size packet.. (these crash a few 121 * tcp stacks if ack is not set) 124 if(size
==sizeof(struct tcphdr
)) 127 * If it's got a syn or fin discard 129 if(!th
->syn
&& !th
->fin
) 131 printk("tcp_send_skb: attempt to queue a bogon.\n"); 132 kfree_skb(skb
,FREE_WRITE
); 142 tcp_statistics
.TcpOutSegs
++; 143 skb
->seq
=ntohl(th
->seq
); 144 skb
->end_seq
= skb
->seq
+ size
-4*th
->doff
; 147 if(tp
->send_head
|| !tcp_snd_test(sk
, skb
)) 150 * Remember where we must start sending 153 if(tp
->send_head
== NULL
) 156 skb_queue_tail(&sk
->write_queue
, skb
); 158 if(sk
->packets_out
==0&& !tp
->pending
) 160 tp
->pending
= TIME_PROBE0
; 161 tcp_reset_xmit_timer(sk
, TIME_PROBE0
, tp
->rto
); 167 struct sk_buff
* buff
; 170 * This is going straight out 173 skb_queue_tail(&sk
->write_queue
, skb
); 175 clear_delayed_acks(sk
); 177 th
->ack_seq
=htonl(tp
->rcv_nxt
); 178 th
->window
=htons(tcp_select_window(sk
)); 180 tp
->af_specific
->send_check(sk
, th
, size
, skb
); 182 tp
->snd_nxt
= skb
->end_seq
; 184 atomic_inc(&sk
->packets_out
); 188 buff
=skb_clone(skb
, GFP_ATOMIC
); 189 atomic_add(buff
->truesize
, &sk
->wmem_alloc
); 191 tp
->af_specific
->queue_xmit(sk
, skb
->dev
, buff
,1); 193 if(!tcp_timer_is_set(sk
, TIME_RETRANS
)) 194 tcp_reset_xmit_timer(sk
, TIME_RETRANS
, tp
->rto
); 201 * Function to create two new tcp segments. 202 * Shrinks the given segment to the specified size and appends a new 203 * segment with the rest of the packet to the list. 204 * This won't be called frenquently, I hope... 207 static inttcp_fragment(struct sock
*sk
,struct sk_buff
*skb
, u32 len
) 209 struct tcp_opt
*tp
= &(sk
->tp_pinfo
.af_tcp
); 210 struct sk_buff
*buff
; 211 struct tcphdr
*th
, *nth
; 217 /* size of new segment */ 218 nsize
= skb
->tail
- ((unsigned char*) (th
+1)) - len
; 222 printk(KERN_DEBUG
"tcp_fragment: bug size <= 0\n"); 227 * Get a new skb... force flag on 229 buff
=sock_wmalloc(sk
, nsize
+128+ sk
->prot
->max_header
+15,1, 236 buff
->localroute
= sk
->localroute
; 239 * Put headers on the new packet 242 tmp
= tp
->af_specific
->build_net_header(sk
, buff
); 246 sock_wfree(sk
, buff
); 251 * Move the TCP header over 254 nth
= (struct tcphdr
*)skb_put(buff
,sizeof(*th
)); 258 memcpy(nth
, th
,sizeof(*th
)); 261 * Correct the new header 264 buff
->seq
= skb
->seq
+ len
; 265 buff
->end_seq
= skb
->end_seq
; 266 nth
->seq
=htonl(buff
->seq
); 270 /* urg data is always an headache */ 273 if(th
->urg_ptr
> len
) 285 * Copy TCP options and data start to our new buffer 288 buff
->csum
=csum_partial_copy(((u8
*)(th
+1)) + len
, 289 skb_put(buff
, nsize
), 293 skb
->end_seq
-= nsize
; 295 skb_trim(skb
, skb
->len
- nsize
); 297 /* remember to checksum this packet afterwards */ 299 skb
->csum
=csum_partial((u8
*) (th
+1), skb
->tail
- ((u8
*) (th
+1)), 302 skb_append(skb
, buff
); 307 static voidtcp_wrxmit_prob(struct sock
*sk
,struct sk_buff
*skb
) 310 * This is acked data. We can discard it. This 311 * cannot currently occur. 316 printk(KERN_DEBUG
"tcp_write_xmit: bug skb in write queue\n"); 318 update_send_head(sk
); 323 kfree_skb(skb
, FREE_WRITE
); 329 static inttcp_wrxmit_frag(struct sock
*sk
,struct sk_buff
*skb
,int size
) 331 struct tcp_opt
*tp
= &sk
->tp_pinfo
.af_tcp
; 333 printk(KERN_DEBUG
"tcp_write_xmit: frag needed size=%d mss=%d\n", 336 if(tcp_fragment(sk
, skb
, sk
->mss
)) 338 /* !tcp_frament Failed! */ 340 atomic_dec(&sk
->packets_out
); 346 * If tcp_fragment succeded then 347 * the send head is the resulting 350 tp
->send_head
= skb
->next
; 356 * This routine writes packets to the network. 357 * It advances the send_head. 358 * This happens as incoming acks open up the remote window for us. 361 voidtcp_write_xmit(struct sock
*sk
) 364 struct tcp_opt
*tp
= &sk
->tp_pinfo
.af_tcp
; 369 * The bytes will have to remain here. In time closedown will 370 * empty the write queue and all will be happy 377 * Anything on the transmit queue that fits the window can 378 * be added providing we are: 380 * a) following SWS avoidance [and Nagle algorithm] 381 * b) not exceeding our congestion window. 382 * c) not retransmiting [Nagle] 387 rcv_wnd
=htons(tcp_select_window(sk
)); 389 while((skb
= tp
->send_head
) &&tcp_snd_test(sk
, skb
)) 392 struct sk_buff
*buff
; 398 * See if we really need to send the packet. 401 if(!after(skb
->end_seq
, tp
->snd_una
)) 403 tcp_wrxmit_prob(sk
, skb
); 409 * Advance the send_head 410 * This one is going out. 413 update_send_head(sk
); 415 atomic_inc(&sk
->packets_out
); 419 * put in the ack seq and window at this point rather than earlier, 420 * in order to keep them monotonic. We really want to avoid taking 421 * back window allocations. That's legal, but RFC1122 says it's frowned on. 422 * Ack and window will in general have changed since this packet was put 423 * on the write queue. 427 size
= skb
->len
- (((unsigned char*) th
) - skb
->data
); 429 if(size
- (th
->doff
<<2) > sk
->mss
) 431 if(tcp_wrxmit_frag(sk
, skb
, size
)) 435 th
->ack_seq
=htonl(tp
->rcv_nxt
); 436 th
->window
= rcv_wnd
; 438 tp
->af_specific
->send_check(sk
, th
, size
, skb
); 441 if(before(skb
->end_seq
, tp
->snd_nxt
)) 442 printk(KERN_DEBUG
"tcp_write_xmit:" 443 " sending already sent seq\n"); 446 tp
->snd_nxt
= skb
->end_seq
; 449 clear_delayed_acks(sk
); 451 buff
=skb_clone(skb
, GFP_ATOMIC
); 452 atomic_add(buff
->truesize
, &sk
->wmem_alloc
); 455 tp
->af_specific
->queue_xmit(sk
, skb
->dev
, buff
,1); 459 if(sent_pkts
&& !tcp_timer_is_set(sk
, TIME_RETRANS
)) 461 tcp_reset_xmit_timer(sk
, TIME_RETRANS
, tp
->rto
); 470 * This function returns the amount that we can raise the 471 * usable window based on the following constraints 473 * 1. The window can never be shrunk once it is offered (RFC 793) 474 * 2. We limit memory per socket 478 unsigned shorttcp_select_window(struct sock
*sk
) 480 struct tcp_opt
*tp
= &sk
->tp_pinfo
.af_tcp
; 482 long free_space
=sock_rspace(sk
); 490 free_space
=min(sk
->window_clamp
, free_space
); 491 mss
=min(sk
->window_clamp
, mss
); 495 * compute the actual window i.e. 496 * old_window - received_bytes_on_that_win 499 cur_win
= tp
->rcv_wup
- (tp
->rcv_nxt
- tp
->rcv_wnd
); 500 window
= tp
->rcv_wnd
; 505 printk(KERN_DEBUG
"TSW: win < 0 w=%d 1=%u 2=%u\n", 506 tp
->rcv_wnd
, tp
->rcv_nxt
, tp
->rcv_wup
); 511 * "the suggested [SWS] avoidance algoritm for the receiver is to keep 512 * RECV.NEXT + RCV.WIN fixed until: 513 * RCV.BUFF - RCV.USER - RCV.WINDOW >= min(1/2 RCV.BUFF, MSS)" 515 * i.e. don't raise the right edge of the window until you can't raise 520 * It would be a good idea if it didn't break header prediction. 521 * and BSD made the header predition standard... 522 * It expects the same value in the header i.e. th->window to be 526 usable
= free_space
- cur_win
; 532 if( window
< usable
) 535 * Window is not blocking the sender 536 * and we have enought free space for it 539 if(cur_win
> (sk
->mss
<<1)) 547 * We are offering too much, cut it down... 548 * but don't shrink the window 551 window
=max(usable
, cur_win
); 555 if((usable
- window
) >= mss
) 562 tp
->rcv_wnd
= window
; 563 tp
->rcv_wup
= tp
->rcv_nxt
; 567 static inttcp_retrans_try_collapse(struct sock
*sk
,struct sk_buff
*skb
) 569 struct tcphdr
*th1
, *th2
; 570 int size1
, size2
, avail
; 571 struct sk_buff
*buff
= skb
->next
; 578 avail
=skb_tailroom(skb
); 581 * size of tcp payload 584 size1
= skb
->tail
- (u8
*) (th1
+1); 588 size2
= buff
->tail
- (u8
*) (th2
+1); 590 if(size2
> avail
|| size1
+ size2
> sk
->mss
) 594 * ok. we will be able to collapse the packet 599 memcpy(skb_put(skb
, size2
), ((char*) th2
) + (th2
->doff
<<2), size2
); 602 * update sizes on original skb. both TCP and IP 605 skb
->end_seq
+= size2
; 610 th1
->urg_ptr
= th2
->urg_ptr
+ size1
; 614 * ... and off you go. 618 kfree_skb(buff
, FREE_WRITE
); 619 atomic_dec(&sk
->packets_out
); 622 * Header checksum will be set by the retransmit procedure 623 * after calling rebuild header 627 skb
->csum
=csum_partial((u8
*) (th1
+1), size1
+ size2
,0); 634 * A socket has timed out on its send queue and wants to do a 635 * little retransmitting. 636 * retransmit_head can be different from the head of the write_queue 637 * if we are doing fast retransmit. 640 voidtcp_do_retransmit(struct sock
*sk
,int all
) 642 struct sk_buff
* skb
; 644 struct tcp_opt
*tp
= &(sk
->tp_pinfo
.af_tcp
); 648 if(tp
->retrans_head
== NULL
) 649 tp
->retrans_head
=skb_peek(&sk
->write_queue
); 651 if(tp
->retrans_head
== tp
->send_head
) 652 tp
->retrans_head
= NULL
; 654 while((skb
= tp
->retrans_head
) != NULL
) 656 struct sk_buff
*buff
; 664 * In general it's OK just to use the old packet. However we 665 * need to use the current ack and window fields. Urg and 666 * urg_ptr could possibly stand to be updated as well, but we 667 * don't keep the necessary data. That shouldn't be a problem, 668 * if the other end is doing the right thing. Since we're 669 * changing the packet, we have to issue a new IP identifier. 674 tcp_size
= skb
->tail
- ((unsigned char*) (th
+1)); 676 if(tcp_size
> sk
->mss
) 678 if(tcp_fragment(sk
, skb
, sk
->mss
)) 680 printk(KERN_DEBUG
"tcp_fragment failed\n"); 683 atomic_inc(&sk
->packets_out
); 687 tcp_size
< (sk
->mss
>>1) && 688 skb
->next
!= tp
->send_head
&& 689 skb
->next
!= (struct sk_buff
*)&sk
->write_queue
) 691 tcp_retrans_try_collapse(sk
, skb
); 694 if(tp
->af_specific
->rebuild_header(sk
, skb
)) 697 printk(KERN_DEBUG
"tcp_do_rebuild_header failed\n"); 703 printk("retransmit sending\n"); 706 * update ack and window 709 th
->ack_seq
=htonl(tp
->rcv_nxt
); 710 th
->window
=ntohs(tcp_select_window(sk
)); 712 size
= skb
->tail
- (unsigned char*) th
; 713 tp
->af_specific
->send_check(sk
, th
, size
, skb
); 716 buff
=skb_clone(skb
, GFP_ATOMIC
); 717 atomic_add(buff
->truesize
, &sk
->wmem_alloc
); 719 clear_delayed_acks(sk
); 721 tp
->af_specific
->queue_xmit(sk
, skb
->dev
, buff
,1); 724 * Count retransmissions 728 sk
->prot
->retransmits
++; 729 tcp_statistics
.TcpRetransSegs
++; 732 * Record the high sequence number to help avoid doing 733 * to much fast retransmission. 737 tp
->high_seq
= tp
->snd_nxt
; 740 * Only one retransmit requested. 747 * This should cut it off before we send too many packets. 750 if(ct
>= sk
->cong_window
) 754 * Advance the pointer 757 tp
->retrans_head
= skb
->next
; 758 if((tp
->retrans_head
== tp
->send_head
) || 759 (tp
->retrans_head
== (struct sk_buff
*) &sk
->write_queue
)) 761 tp
->retrans_head
= NULL
; 772 voidtcp_send_fin(struct sock
*sk
) 774 struct tcphdr
*th
=(struct tcphdr
*)&sk
->dummy_th
; 775 struct tcp_opt
*tp
= &(sk
->tp_pinfo
.af_tcp
); 777 struct sk_buff
*buff
; 781 buff
=sock_wmalloc(sk
, MAX_RESET_SIZE
,1, GFP_KERNEL
); 785 /* This is a disaster if it occurs */ 786 printk("tcp_send_fin: Impossible malloc failure"); 795 buff
->localroute
= sk
->localroute
; 799 * Put in the IP header and routing stuff. 802 tmp
= tp
->af_specific
->build_net_header(sk
, buff
); 808 * Finish anyway, treat this as a send that got lost. 815 t
=del_timer(&sk
->timer
); 817 add_timer(&sk
->timer
); 819 tcp_reset_msl_timer(sk
, TIME_CLOSE
, TCP_TIMEWAIT_LEN
); 824 * We ought to check if the end of the queue is a buffer and 825 * if so simply add the fin to that buffer, not send it ahead. 828 t1
=(struct tcphdr
*)skb_put(buff
,sizeof(struct tcphdr
)); 831 memcpy(t1
, th
,sizeof(*t1
)); 832 buff
->seq
= sk
->write_seq
; 834 buff
->end_seq
= sk
->write_seq
; 835 t1
->seq
=htonl(buff
->seq
); 836 t1
->ack_seq
=htonl(tp
->rcv_nxt
); 837 t1
->window
=htons(tcp_select_window(sk
)); 840 tp
->af_specific
->send_check(sk
, t1
,sizeof(*t1
), buff
); 843 * The fin can only be transmited after the data. 846 skb_queue_tail(&sk
->write_queue
, buff
); 848 if(tp
->send_head
== NULL
) 850 struct sk_buff
*skb1
; 852 atomic_inc(&sk
->packets_out
); 853 tp
->snd_nxt
= sk
->write_seq
; 854 buff
->when
= jiffies
; 856 skb1
=skb_clone(buff
, GFP_KERNEL
); 857 atomic_add(skb1
->truesize
, &sk
->wmem_alloc
); 859 tp
->af_specific
->queue_xmit(sk
, skb1
->dev
, skb1
,1); 861 if(!tcp_timer_is_set(sk
, TIME_RETRANS
)) 862 tcp_reset_xmit_timer(sk
, TIME_RETRANS
, tp
->rto
); 866 inttcp_send_synack(struct sock
*sk
) 868 struct tcp_opt
* tp
= &(sk
->tp_pinfo
.af_tcp
); 869 struct sk_buff
* skb
; 870 struct sk_buff
* buff
; 875 skb
=sock_wmalloc(sk
, MAX_SYN_SIZE
,1, GFP_ATOMIC
); 883 skb
->localroute
= sk
->localroute
; 885 tmp
= tp
->af_specific
->build_net_header(sk
, skb
); 890 kfree_skb(skb
, FREE_WRITE
); 894 th
=(struct tcphdr
*)skb_put(skb
,sizeof(struct tcphdr
)); 896 memset(th
,0,sizeof(struct tcphdr
)); 901 th
->source
= sk
->dummy_th
.source
; 902 th
->dest
= sk
->dummy_th
.dest
; 904 skb
->seq
= tp
->snd_una
; 905 skb
->end_seq
= skb
->seq
+1/* th->syn */; 906 th
->seq
=ntohl(skb
->seq
); 908 th
->window
=ntohs(tp
->rcv_wnd
); 910 th
->ack_seq
=htonl(tp
->rcv_nxt
); 911 th
->doff
=sizeof(*th
)/4+1; 913 ptr
=skb_put(skb
, TCPOLEN_MSS
); 915 ptr
[1] = TCPOLEN_MSS
; 916 ptr
[2] = ((sk
->mss
) >>8) &0xff; 917 ptr
[3] = (sk
->mss
) &0xff; 918 skb
->csum
=csum_partial(ptr
, TCPOLEN_MSS
,0); 920 tp
->af_specific
->send_check(sk
, th
,sizeof(*th
)+4, skb
); 922 skb_queue_tail(&sk
->write_queue
, skb
); 924 atomic_inc(&sk
->packets_out
); 927 buff
=skb_clone(skb
, GFP_ATOMIC
); 929 atomic_add(skb
->truesize
, &sk
->wmem_alloc
); 931 tp
->af_specific
->queue_xmit(sk
, skb
->dev
, buff
,1); 933 tcp_reset_xmit_timer(sk
, TIME_RETRANS
, TCP_TIMEOUT_INIT
); 935 tcp_statistics
.TcpOutSegs
++; 941 * Set up the timers for sending a delayed ack.. 943 * rules for delaying an ack: 944 * - delay time <= 0.5 HZ 945 * - must send at least every 2 full sized packets 946 * - we don't have a window update to send 949 voidtcp_send_delayed_ack(struct sock
* sk
,int max_timeout
) 951 struct tcp_opt
*tp
= &sk
->tp_pinfo
.af_tcp
; 952 unsigned long timeout
, now
; 954 /* Calculate new timeout */ 958 if(timeout
> max_timeout
|| sk
->bytes_rcv
> (sk
->mss
<<2)) 965 /* Use new timeout only if there wasn't a older one earlier */ 966 if(!del_timer(&tp
->delack_timer
) || timeout
< tp
->delack_timer
.expires
) 968 tp
->delack_timer
.expires
= timeout
; 971 add_timer(&tp
->delack_timer
); 977 * This routine sends an ack and also updates the window. 980 voidtcp_send_ack(struct sock
*sk
) 982 struct sk_buff
*buff
; 983 struct tcp_opt
*tp
=&(sk
->tp_pinfo
.af_tcp
); 990 /* We have been reset, we may not send again */ 995 * We need to grab some memory, and put together an ack, 996 * and then put it into the queue to be sent. 999 buff
=sock_wmalloc(sk
, MAX_ACK_SIZE
,1, GFP_ATOMIC
); 1003 * Force it to send an ack. We don't have to do this 1004 * (ACK is unreliable) but it's much better use of 1005 * bandwidth on slow links to send a spare ack than 1009 tcp_send_delayed_ack(sk
, HZ
/2); 1013 clear_delayed_acks(sk
); 1016 * Assemble a suitable TCP frame 1020 buff
->localroute
= sk
->localroute
; 1024 * Put in the IP header and routing stuff. 1027 tmp
= tp
->af_specific
->build_net_header(sk
, buff
); 1032 sock_wfree(sk
, buff
); 1036 th
=(struct tcphdr
*)skb_put(buff
,sizeof(struct tcphdr
)); 1038 memcpy(th
, &sk
->dummy_th
,sizeof(struct tcphdr
)); 1041 * Swap the send and the receive. 1044 th
->window
=ntohs(tcp_select_window(sk
)); 1045 th
->seq
=ntohl(tp
->snd_nxt
); 1046 th
->ack_seq
=ntohl(tp
->rcv_nxt
); 1049 * Fill in the packet and send it 1052 tp
->af_specific
->send_check(sk
, th
,sizeof(struct tcphdr
), buff
); 1055 printk("\rtcp_send_ack: seq %x ack %x\n", 1056 tp
->snd_nxt
, tp
->rcv_nxt
); 1058 tp
->af_specific
->queue_xmit(sk
, buff
->dev
, buff
,1); 1060 tcp_statistics
.TcpOutSegs
++; 1064 * This routine sends a packet with an out of date sequence 1065 * number. It assumes the other end will try to ack it. 1068 voidtcp_write_wakeup(struct sock
*sk
) 1070 struct sk_buff
*buff
, *skb
; 1072 struct tcp_opt
*tp
= &(sk
->tp_pinfo
.af_tcp
); 1076 return;/* After a valid reset we can send no more */ 1079 * Write data can still be transmitted/retransmitted in the 1080 * following states. If any other state is encountered, return. 1081 * [listen/close will never occur here anyway] 1084 if(sk
->state
!= TCP_ESTABLISHED
&& 1085 sk
->state
!= TCP_CLOSE_WAIT
&& 1086 sk
->state
!= TCP_FIN_WAIT1
&& 1087 sk
->state
!= TCP_LAST_ACK
&& 1088 sk
->state
!= TCP_CLOSING
1094 if(before(tp
->snd_nxt
, tp
->snd_una
+ tp
->snd_wnd
) && 1095 (skb
=tp
->send_head
)) 1098 * We are probing the opening of a window 1099 * but the window size is != 0 1100 * must have been a result SWS avoidance ( sender ) 1104 unsigned long win_size
; 1106 win_size
= tp
->snd_wnd
- (tp
->snd_nxt
- tp
->snd_una
); 1108 if(win_size
< skb
->end_seq
- skb
->seq
) 1110 if(tcp_fragment(sk
, skb
, win_size
)) 1112 printk(KERN_DEBUG
"tcp_write_wakeup: " 1113 "fragment failed\n"); 1121 tp
->af_specific
->send_check(sk
, th
, th
->doff
*4+ win_size
, 1124 buff
=skb_clone(skb
, GFP_ATOMIC
); 1126 atomic_add(buff
->truesize
, &sk
->wmem_alloc
); 1127 atomic_inc(&sk
->packets_out
); 1129 clear_delayed_acks(sk
); 1131 if(!tcp_timer_is_set(sk
, TIME_RETRANS
)) 1132 tcp_reset_xmit_timer(sk
, TIME_RETRANS
, tp
->rto
); 1134 skb
->when
= jiffies
; 1136 update_send_head(sk
); 1138 tp
->snd_nxt
= skb
->end_seq
; 1142 buff
=sock_wmalloc(sk
,MAX_ACK_SIZE
,1, GFP_ATOMIC
); 1148 buff
->localroute
= sk
->localroute
; 1152 * Put in the IP header and routing stuff. 1155 tmp
= tp
->af_specific
->build_net_header(sk
, buff
); 1159 sock_wfree(sk
, buff
); 1163 t1
= (struct tcphdr
*)skb_put(buff
,sizeof(struct tcphdr
)); 1164 memcpy(t1
,(void*) &sk
->dummy_th
,sizeof(*t1
)); 1167 * Use a previous sequence. 1168 * This should cause the other end to send an ack. 1171 t1
->seq
=htonl(tp
->snd_nxt
-1); 1172 /* t1->fin = 0; -- We are sending a 'previous' sequence, and 0 bytes of data - thus no FIN bit */ 1173 t1
->ack_seq
=htonl(tp
->rcv_nxt
); 1174 t1
->window
=htons(tcp_select_window(sk
)); 1176 tp
->af_specific
->send_check(sk
, t1
,sizeof(*t1
), buff
); 1183 tp
->af_specific
->queue_xmit(sk
, buff
->dev
, buff
,1); 1184 tcp_statistics
.TcpOutSegs
++; 1188 * A window probe timeout has occurred. 1189 * If window is not closed send a partial packet 1190 * else a zero probe. 1193 voidtcp_send_probe0(struct sock
*sk
) 1195 struct tcp_opt
*tp
= &(sk
->tp_pinfo
.af_tcp
); 1198 return;/* After a valid reset we can send no more */ 1201 tcp_write_wakeup(sk
); 1203 tp
->pending
= TIME_PROBE0
; 1208 tcp_reset_xmit_timer(sk
, TIME_PROBE0
, 1209 min(tp
->rto
<< tp
->backoff
,120*HZ
));