2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 6 * Implementation of the Transmission Control Protocol(TCP). 8 * Version: @(#)tcp_input.c 1.0.16 05/25/93 10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu> 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 12 * Mark Evans, <evansmp@uhura.aston.ac.uk> 13 * Corey Minyard <wf-rch!minyard@relay.EU.net> 14 * Florian La Roche, <flla@stud.uni-sb.de> 15 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu> 16 * Linus Torvalds, <torvalds@cs.helsinki.fi> 17 * Alan Cox, <gw4pts@gw4pts.ampr.org> 18 * Matthew Dillon, <dillon@apollo.west.oic.com> 19 * Arnt Gulbrandsen, <agulbra@nvg.unit.no> 20 * Jorge Cwik, <jorge@laser.satlink.net> 23 #include <linux/config.h> 27 * This is the main buffer sending routine. We queue the buffer 28 * having checked it is sane seeming. 31 voidtcp_send_skb(struct sock
*sk
,struct sk_buff
*skb
) 34 struct tcphdr
* th
= skb
->h
.th
; 37 * length of packet (not counting length of pre-tcp headers) 40 size
= skb
->len
- ((unsigned char*) th
- skb
->data
); 46 if(size
<sizeof(struct tcphdr
) || size
> skb
->len
) 48 printk("tcp_send_skb: bad skb (skb = %p, data = %p, th = %p, len = %lu)\n", 49 skb
, skb
->data
, th
, skb
->len
); 50 kfree_skb(skb
, FREE_WRITE
); 55 * If we have queued a header size packet.. (these crash a few 56 * tcp stacks if ack is not set) 59 if(size
==sizeof(struct tcphdr
)) 61 /* If it's got a syn or fin it's notionally included in the size..*/ 62 if(!th
->syn
&& !th
->fin
) 64 printk("tcp_send_skb: attempt to queue a bogon.\n"); 65 kfree_skb(skb
,FREE_WRITE
); 74 tcp_statistics
.TcpOutSegs
++; 75 skb
->seq
=ntohl(th
->seq
); 76 skb
->end_seq
= skb
->seq
+ size
-4*th
->doff
; 81 * a) The right edge of this frame exceeds the window 82 * b) We are retransmitting (Nagle's rule) 83 * c) We have too many packets 'in flight' 86 if(after(skb
->end_seq
, sk
->window_seq
) || 87 (sk
->retransmits
&& sk
->ip_xmit_timeout
== TIME_WRITE
) || 88 sk
->packets_out
>= sk
->cong_window
) 90 /* checksum will be supplied by tcp_write_xmit. So 91 * we shouldn't need to set it at all. I'm being paranoid */ 95 printk("tcp_send_partial: next != NULL\n"); 98 skb_queue_tail(&sk
->write_queue
, skb
); 100 if(before(sk
->window_seq
, sk
->write_queue
.next
->end_seq
) && 101 sk
->send_head
== NULL
&& sk
->ack_backlog
==0) 102 tcp_reset_xmit_timer(sk
, TIME_PROBE0
, sk
->rto
); 107 * This is going straight out 110 th
->ack_seq
=htonl(sk
->acked_seq
); 111 th
->window
=htons(tcp_select_window(sk
)); 113 tcp_send_check(th
, sk
->saddr
, sk
->daddr
, size
, skb
); 115 sk
->sent_seq
= sk
->write_seq
; 118 * This is mad. The tcp retransmit queue is put together 119 * by the ip layer. This causes half the problems with 120 * unroutable FIN's and other things. 123 sk
->prot
->queue_xmit(sk
, skb
->dev
, skb
,0); 130 * Set for next retransmit based on expected ACK time. 131 * FIXME: We set this every time which means our 132 * retransmits are really about a window behind. 135 tcp_reset_xmit_timer(sk
, TIME_WRITE
, sk
->rto
); 140 * Locking problems lead us to a messy situation where we can have 141 * multiple partially complete buffers queued up. This is really bad 142 * as we don't want to be sending partial buffers. Fix this with 143 * a semaphore or similar to lock tcp_write per socket. 145 * These routines are pretty self descriptive. 148 struct sk_buff
*tcp_dequeue_partial(struct sock
* sk
) 150 struct sk_buff
* skb
; 158 del_timer(&sk
->partial_timer
); 160 restore_flags(flags
); 165 * Empty the partial queue 168 voidtcp_send_partial(struct sock
*sk
) 174 while((skb
=tcp_dequeue_partial(sk
)) != NULL
) 175 tcp_send_skb(sk
, skb
); 179 * Queue a partial frame 182 voidtcp_enqueue_partial(struct sk_buff
* skb
,struct sock
* sk
) 184 struct sk_buff
* tmp
; 191 del_timer(&sk
->partial_timer
); 193 init_timer(&sk
->partial_timer
); 195 * Wait up to 1 second for the buffer to fill. 197 sk
->partial_timer
.expires
= jiffies
+HZ
; 198 sk
->partial_timer
.function
= (void(*)(unsigned long)) tcp_send_partial
; 199 sk
->partial_timer
.data
= (unsigned long) sk
; 200 add_timer(&sk
->partial_timer
); 201 restore_flags(flags
); 203 tcp_send_skb(sk
, tmp
); 207 * This routine takes stuff off of the write queue, 208 * and puts it in the xmit queue. This happens as incoming acks 209 * open up the remote window for us. 212 voidtcp_write_xmit(struct sock
*sk
) 217 * The bytes will have to remain here. In time closedown will 218 * empty the write queue and all will be happy 225 * Anything on the transmit queue that fits the window can 226 * be added providing we are not 228 * a) retransmitting (Nagle's rule) 229 * b) exceeding our congestion window. 232 while((skb
=skb_peek(&sk
->write_queue
)) != NULL
&& 233 before(skb
->end_seq
, sk
->window_seq
+1) && 234 (sk
->retransmits
==0|| 235 sk
->ip_xmit_timeout
!= TIME_WRITE
|| 236 before(skb
->end_seq
, sk
->rcv_ack_seq
+1)) 237 && sk
->packets_out
< sk
->cong_window
) 243 * See if we really need to send the packet. 246 if(before(skb
->end_seq
, sk
->rcv_ack_seq
+1)) 249 * This is acked data. We can discard it. This 250 * cannot currently occur. 254 kfree_skb(skb
, FREE_WRITE
); 264 * put in the ack seq and window at this point rather than earlier, 265 * in order to keep them monotonic. We really want to avoid taking 266 * back window allocations. That's legal, but RFC1122 says it's frowned on. 267 * Ack and window will in general have changed since this packet was put 268 * on the write queue. 271 th
= (struct tcphdr
*)(((char*)iph
) +(iph
->ihl
<<2)); 272 size
= skb
->len
- (((unsigned char*) th
) - skb
->data
); 273 #ifndef CONFIG_NO_PATH_MTU_DISCOVERY 274 if(size
> sk
->mtu
-sizeof(struct iphdr
)) 276 iph
->frag_off
&= ~htons(IP_DF
); 281 th
->ack_seq
=htonl(sk
->acked_seq
); 282 th
->window
=htons(tcp_select_window(sk
)); 284 tcp_send_check(th
, sk
->saddr
, sk
->daddr
, size
, skb
); 286 sk
->sent_seq
= skb
->end_seq
; 289 * IP manages our queue for some crazy reason 292 sk
->prot
->queue_xmit(sk
, skb
->dev
, skb
, skb
->free
); 299 * Again we slide the timer wrongly 302 tcp_reset_xmit_timer(sk
, TIME_WRITE
, sk
->rto
); 309 * A socket has timed out on its send queue and wants to do a 310 * little retransmitting. Currently this means TCP. 313 voidtcp_do_retransmit(struct sock
*sk
,int all
) 315 struct sk_buff
* skb
; 334 /* dl1bke 960201 - @%$$! Hope this cures strange race conditions */ 335 /* with AX.25 mode VC. (esp. DAMA) */ 336 /* if the buffer is locked we should not retransmit */ 337 /* anyway, so we don't need all the fuss to prepare */ 338 /* the buffer in this case. */ 339 /* (the skb_pull() changes skb->data while we may */ 340 /* actually try to send the data. Ouch. A side */ 341 /* effect is that we'll send some unnecessary data, */ 342 /* but the alternative is disasterous... */ 344 if(skb_device_locked(skb
)) 348 * Discard the surplus MAC header 351 skb_pull(skb
,((unsigned char*)skb
->ip_hdr
)-skb
->data
); 354 * In general it's OK just to use the old packet. However we 355 * need to use the current ack and window fields. Urg and 356 * urg_ptr could possibly stand to be updated as well, but we 357 * don't keep the necessary data. That shouldn't be a problem, 358 * if the other end is doing the right thing. Since we're 359 * changing the packet, we have to issue a new IP identifier. 362 iph
= (struct iphdr
*)skb
->data
; 363 th
= (struct tcphdr
*)(((char*)iph
) + (iph
->ihl
<<2)); 364 size
=ntohs(iph
->tot_len
) - (iph
->ihl
<<2); 367 * Note: We ought to check for window limits here but 368 * currently this is done (less efficiently) elsewhere. 372 * Put a MAC header back on (may cause ARPing) 376 /* ANK: UGLY, but the bug, that was here, should be fixed. 378 struct options
* opt
= (struct options
*)skb
->proto_priv
; 379 rt
=ip_check_route(&sk
->ip_route_cache
, opt
->srr
?opt
->faddr
:iph
->daddr
, skb
->localroute
); 382 iph
->id
=htons(ip_id_count
++); 383 #ifndef CONFIG_NO_PATH_MTU_DISCOVERY 384 if(rt
&&ntohs(iph
->tot_len
) > rt
->rt_mtu
) 385 iph
->frag_off
&= ~htons(IP_DF
); 389 if(rt
==NULL
)/* Deep poo */ 393 skb
->sk
->err_soft
=ENETUNREACH
; 394 skb
->sk
->error_report(skb
->sk
); 400 skb
->raddr
=rt
->rt_gateway
; 405 memcpy(skb_push(skb
,dev
->hard_header_len
),rt
->rt_hh
->hh_data
,dev
->hard_header_len
); 406 if(!rt
->rt_hh
->hh_uptodate
) 409 #if RT_CACHE_DEBUG >= 2 410 printk("tcp_do_retransmit: hh miss %08x via %08x\n", iph
->daddr
, rt
->rt_gateway
); 414 else if(dev
->hard_header
) 416 if(dev
->hard_header(skb
, dev
, ETH_P_IP
, NULL
, NULL
, skb
->len
)<0) 421 * This is not the right way to handle this. We have to 422 * issue an up to date window and ack report with this 423 * retransmit to keep the odd buggy tcp that relies on 424 * the fact BSD does this happy. 425 * We don't however need to recalculate the entire 426 * checksum, so someone wanting a small problem to play 427 * with might like to implement RFC1141/RFC1624 and speed 428 * this up by avoiding a full checksum. 431 th
->ack_seq
=htonl(sk
->acked_seq
); 434 th
->window
=ntohs(tcp_select_window(sk
)); 435 tcp_send_check(th
, sk
->saddr
, sk
->daddr
, size
, skb
); 438 * If the interface is (still) up and running, kick it. 441 if(dev
->flags
& IFF_UP
) 444 * If the packet is still being sent by the device/protocol 445 * below then don't retransmit. This is both needed, and good - 446 * especially with connected mode AX.25 where it stops resends 447 * occurring of an as yet unsent anyway frame! 448 * We still add up the counts as the round trip time wants 451 if(sk
&& !skb_device_locked(skb
)) 453 /* Remove it from any existing driver queue first! */ 456 ip_statistics
.IpOutRequests
++; 457 dev_queue_xmit(skb
, dev
, sk
->priority
); 463 * Count retransmissions 467 sk
->prot
->retransmits
++; 468 tcp_statistics
.TcpRetransSegs
++; 472 * Only one retransmit requested. 479 * This should cut it off before we send too many packets. 482 if(ct
>= sk
->cong_window
) 489 * This routine will send an RST to the other tcp. 492 voidtcp_send_reset(unsigned long saddr
,unsigned long daddr
,struct tcphdr
*th
, 493 struct proto
*prot
,struct options
*opt
,struct device
*dev
,int tos
,int ttl
) 495 struct sk_buff
*buff
; 498 struct device
*ndev
=NULL
; 501 * Cannot reset a reset (Think about it). 508 * We need to grab some memory, and put together an RST, 509 * and then put it into the queue to be sent. 512 buff
=sock_wmalloc(NULL
, MAX_RESET_SIZE
,1, GFP_ATOMIC
); 522 * Put in the IP header and routing stuff. 525 tmp
= prot
->build_header(buff
, saddr
, daddr
, &ndev
, IPPROTO_TCP
, opt
, 526 sizeof(struct tcphdr
),tos
,ttl
,NULL
); 530 sock_wfree(NULL
, buff
); 534 t1
=(struct tcphdr
*)skb_put(buff
,sizeof(struct tcphdr
)); 535 memcpy(t1
, th
,sizeof(*t1
)); 538 * Swap the send and the receive. 541 t1
->dest
= th
->source
; 542 t1
->source
= th
->dest
; 552 t1
->seq
= th
->ack_seq
; 557 t1
->ack_seq
= th
->seq
; 559 t1
->ack_seq
=htonl(ntohl(th
->seq
)+1); 562 tcp_send_check(t1
, saddr
, daddr
,sizeof(*t1
), buff
); 563 prot
->queue_xmit(NULL
, ndev
, buff
,1); 564 tcp_statistics
.TcpOutSegs
++; 571 voidtcp_send_fin(struct sock
*sk
) 573 struct proto
*prot
=(struct proto
*)sk
->prot
; 574 struct tcphdr
*th
=(struct tcphdr
*)&sk
->dummy_th
; 576 struct sk_buff
*buff
; 577 struct device
*dev
=NULL
; 580 release_sock(sk
);/* in case the malloc sleeps. */ 582 buff
=sock_wmalloc(sk
, MAX_RESET_SIZE
,1, GFP_KERNEL
); 587 /* This is a disaster if it occurs */ 588 printk("tcp_send_fin: Impossible malloc failure"); 597 buff
->localroute
= sk
->localroute
; 601 * Put in the IP header and routing stuff. 604 tmp
= prot
->build_header(buff
,sk
->saddr
, sk
->daddr
, &dev
, 605 IPPROTO_TCP
, sk
->opt
, 606 sizeof(struct tcphdr
),sk
->ip_tos
,sk
->ip_ttl
,&sk
->ip_route_cache
); 611 * Finish anyway, treat this as a send that got lost. 618 t
=del_timer(&sk
->timer
); 620 add_timer(&sk
->timer
); 622 tcp_reset_msl_timer(sk
, TIME_CLOSE
, TCP_TIMEWAIT_LEN
); 627 * We ought to check if the end of the queue is a buffer and 628 * if so simply add the fin to that buffer, not send it ahead. 631 t1
=(struct tcphdr
*)skb_put(buff
,sizeof(struct tcphdr
)); 633 memcpy(t1
, th
,sizeof(*t1
)); 634 buff
->seq
= sk
->write_seq
; 636 buff
->end_seq
= sk
->write_seq
; 637 t1
->seq
=htonl(buff
->seq
); 638 t1
->ack_seq
=htonl(sk
->acked_seq
); 639 t1
->window
=htons(sk
->window
=tcp_select_window(sk
)); 641 tcp_send_check(t1
, sk
->saddr
, sk
->daddr
,sizeof(*t1
), buff
); 644 * If there is data in the write queue, the fin must be appended to 648 if(skb_peek(&sk
->write_queue
) != NULL
) 651 if(buff
->next
!= NULL
) 653 printk("tcp_send_fin: next != NULL\n"); 656 skb_queue_tail(&sk
->write_queue
, buff
); 660 sk
->sent_seq
= sk
->write_seq
; 661 sk
->prot
->queue_xmit(sk
, dev
, buff
,0); 662 tcp_reset_xmit_timer(sk
, TIME_WRITE
, sk
->rto
); 667 voidtcp_send_synack(struct sock
* newsk
,struct sock
* sk
,struct sk_buff
* skb
) 671 struct sk_buff
* buff
; 672 struct device
*ndev
=NULL
; 675 buff
=sock_wmalloc(newsk
, MAX_SYN_SIZE
,1, GFP_ATOMIC
); 680 newsk
->state
= TCP_CLOSE
; 681 /* And this will destroy it */ 683 kfree_skb(skb
, FREE_READ
); 684 tcp_statistics
.TcpAttemptFails
++; 689 buff
->localroute
= newsk
->localroute
; 692 * Put in the IP header and routing stuff. 695 tmp
= sk
->prot
->build_header(buff
, newsk
->saddr
, newsk
->daddr
, &ndev
, 696 IPPROTO_TCP
, NULL
, MAX_SYN_SIZE
,sk
->ip_tos
,sk
->ip_ttl
,&newsk
->ip_route_cache
); 699 * Something went wrong. 706 kfree_skb(buff
,FREE_WRITE
); 708 newsk
->state
= TCP_CLOSE
; 711 kfree_skb(skb
, FREE_READ
); 712 tcp_statistics
.TcpAttemptFails
++; 716 t1
=(struct tcphdr
*)skb_put(buff
,sizeof(struct tcphdr
)); 718 memcpy(t1
, skb
->h
.th
,sizeof(*t1
)); 719 buff
->seq
= newsk
->write_seq
++; 720 buff
->end_seq
= newsk
->write_seq
; 722 * Swap the send and the receive. 724 t1
->dest
= skb
->h
.th
->source
; 725 t1
->source
= newsk
->dummy_th
.source
; 726 t1
->seq
=ntohl(buff
->seq
); 727 newsk
->sent_seq
= newsk
->write_seq
; 728 t1
->window
=ntohs(tcp_select_window(newsk
)); 734 t1
->ack_seq
=htonl(newsk
->acked_seq
); 735 t1
->doff
=sizeof(*t1
)/4+1; 736 ptr
=skb_put(buff
,4); 739 ptr
[2] = ((newsk
->mtu
) >>8) &0xff; 740 ptr
[3] =(newsk
->mtu
) &0xff; 741 buff
->csum
=csum_partial(ptr
,4,0); 742 tcp_send_check(t1
, newsk
->saddr
, newsk
->daddr
,sizeof(*t1
)+4, buff
); 743 newsk
->prot
->queue_xmit(newsk
, ndev
, buff
,0); 744 tcp_reset_xmit_timer(newsk
, TIME_WRITE
, TCP_TIMEOUT_INIT
); 748 * Charge the sock_buff to newsk. 751 sk
->rmem_alloc
-= skb
->truesize
; 752 newsk
->rmem_alloc
+= skb
->truesize
; 754 skb_queue_tail(&sk
->receive_queue
,skb
); 757 tcp_statistics
.TcpOutSegs
++; 761 * This routine sends an ack and also updates the window. 764 voidtcp_send_ack(u32 sequence
, u32 ack
, 766 struct tcphdr
*th
, u32 daddr
) 768 struct sk_buff
*buff
; 770 struct device
*dev
= NULL
; 774 return;/* We have been reset, we may not send again */ 777 * We need to grab some memory, and put together an ack, 778 * and then put it into the queue to be sent. 781 buff
=sock_wmalloc(sk
, MAX_ACK_SIZE
,1, GFP_ATOMIC
); 785 * Force it to send an ack. We don't have to do this 786 * (ACK is unreliable) but it's much better use of 787 * bandwidth on slow links to send a spare ack than 792 if(sk
->ip_xmit_timeout
!= TIME_WRITE
&&tcp_connected(sk
->state
)) 794 tcp_reset_xmit_timer(sk
, TIME_WRITE
, HZ
); 800 * Assemble a suitable TCP frame 804 buff
->localroute
= sk
->localroute
; 808 * Put in the IP header and routing stuff. 811 tmp
= sk
->prot
->build_header(buff
, sk
->saddr
, daddr
, &dev
, 812 IPPROTO_TCP
, sk
->opt
, MAX_ACK_SIZE
,sk
->ip_tos
,sk
->ip_ttl
,&sk
->ip_route_cache
); 816 sock_wfree(sk
, buff
); 819 t1
=(struct tcphdr
*)skb_put(buff
,sizeof(struct tcphdr
)); 821 memcpy(t1
, &sk
->dummy_th
,sizeof(*t1
)); 824 * Swap the send and the receive. 827 t1
->dest
= th
->source
; 828 t1
->source
= th
->dest
; 829 t1
->seq
=ntohl(sequence
); 830 sk
->window
=tcp_select_window(sk
); 831 t1
->window
=ntohs(sk
->window
); 834 * If we have nothing queued for transmit and the transmit timer 835 * is on we are just doing an ACK timeout and need to switch 839 if(ack
== sk
->acked_seq
) { 844 if(sk
->send_head
== NULL
&&skb_peek(&sk
->write_queue
) == NULL
845 && sk
->ip_xmit_timeout
== TIME_WRITE
) 847 tcp_reset_xmit_timer(sk
,TIME_KEEPOPEN
,TCP_TIMEOUT_LEN
); 853 * Fill in the packet and send it 856 t1
->ack_seq
=htonl(ack
); 857 tcp_send_check(t1
, sk
->saddr
, daddr
,sizeof(*t1
), buff
); 859 printk("\rtcp_ack: seq %x ack %x\n", sequence
, ack
); 860 sk
->prot
->queue_xmit(sk
, dev
, buff
,1); 861 tcp_statistics
.TcpOutSegs
++; 865 * This routine sends a packet with an out of date sequence 866 * number. It assumes the other end will try to ack it. 869 voidtcp_write_wakeup(struct sock
*sk
) 871 struct sk_buff
*buff
,*skb
; 873 struct device
*dev
=NULL
; 877 return;/* After a valid reset we can send no more */ 880 * Write data can still be transmitted/retransmitted in the 881 * following states. If any other state is encountered, return. 882 * [listen/close will never occur here anyway] 885 if(sk
->state
!= TCP_ESTABLISHED
&& 886 sk
->state
!= TCP_CLOSE_WAIT
&& 887 sk
->state
!= TCP_FIN_WAIT1
&& 888 sk
->state
!= TCP_LAST_ACK
&& 889 sk
->state
!= TCP_CLOSING
894 if(before(sk
->sent_seq
, sk
->window_seq
) && 895 (skb
=skb_peek(&sk
->write_queue
))) 898 * We are probing the opening of a window 899 * but the window size is != 0 900 * must have been a result SWS advoidance ( sender ) 906 unsigned long win_size
; 908 unsigned long ow_size
; 912 * How many bytes can we send ? 915 win_size
= sk
->window_seq
- sk
->sent_seq
; 918 * Recover the buffer pointers 921 iph
= (struct iphdr
*)skb
->ip_hdr
; 922 th
= (struct tcphdr
*)(((char*)iph
) +(iph
->ihl
<<2)); 925 * Grab the data for a temporary frame 928 buff
=sock_wmalloc(sk
, win_size
+ th
->doff
*4+ 930 sk
->prot
->max_header
+15, 936 * If we strip the packet on the write queue we must 937 * be ready to retransmit this one 943 buff
->localroute
= sk
->localroute
; 946 * Put headers on the new packet 949 tmp
= sk
->prot
->build_header(buff
, sk
->saddr
, sk
->daddr
, &dev
, 950 IPPROTO_TCP
, sk
->opt
, buff
->truesize
, 951 sk
->ip_tos
,sk
->ip_ttl
,&sk
->ip_route_cache
); 954 sock_wfree(sk
, buff
); 959 * Move the TCP header over 964 nth
= (struct tcphdr
*)skb_put(buff
,sizeof(*th
)); 966 memcpy(nth
, th
,sizeof(*th
)); 969 * Correct the new header 973 nth
->ack_seq
=htonl(sk
->acked_seq
); 974 nth
->window
=htons(tcp_select_window(sk
)); 978 * Copy TCP options and data start to our new buffer 981 buff
->csum
=csum_partial_copy((void*)(th
+1),skb_put(buff
,win_size
), 982 win_size
+ th
->doff
*4-sizeof(*th
),0); 985 * Remember our right edge sequence number. 988 buff
->end_seq
= sk
->sent_seq
+ win_size
; 989 sk
->sent_seq
= buff
->end_seq
;/* Hack */ 990 if(th
->urg
&&ntohs(th
->urg_ptr
) < win_size
) 994 * Checksum the split buffer 997 tcp_send_check(nth
, sk
->saddr
, sk
->daddr
, 998 nth
->doff
*4+ win_size
, buff
); 1002 buff
=sock_wmalloc(sk
,MAX_ACK_SIZE
,1, GFP_ATOMIC
); 1008 buff
->localroute
= sk
->localroute
; 1012 * Put in the IP header and routing stuff. 1015 tmp
= sk
->prot
->build_header(buff
, sk
->saddr
, sk
->daddr
, &dev
, 1016 IPPROTO_TCP
, sk
->opt
, MAX_ACK_SIZE
,sk
->ip_tos
,sk
->ip_ttl
,&sk
->ip_route_cache
); 1019 sock_wfree(sk
, buff
); 1023 t1
= (struct tcphdr
*)skb_put(buff
,sizeof(struct tcphdr
)); 1024 memcpy(t1
,(void*) &sk
->dummy_th
,sizeof(*t1
)); 1027 * Use a previous sequence. 1028 * This should cause the other end to send an ack. 1031 t1
->seq
=htonl(sk
->sent_seq
-1); 1032 /* t1->fin = 0; -- We are sending a 'previous' sequence, and 0 bytes of data - thus no FIN bit */ 1033 t1
->ack_seq
=htonl(sk
->acked_seq
); 1034 t1
->window
=htons(tcp_select_window(sk
)); 1035 tcp_send_check(t1
, sk
->saddr
, sk
->daddr
,sizeof(*t1
), buff
); 1043 sk
->prot
->queue_xmit(sk
, dev
, buff
,1); 1044 tcp_statistics
.TcpOutSegs
++; 1048 * A window probe timeout has occurred. 1051 voidtcp_send_probe0(struct sock
*sk
) 1054 return;/* After a valid reset we can send no more */ 1056 tcp_write_wakeup(sk
); 1059 sk
->rto
=min(sk
->rto
<<1,120*HZ
); 1061 sk
->prot
->retransmits
++; 1062 tcp_reset_xmit_timer(sk
, TIME_PROBE0
, sk
->rto
);