2 * INET An implementation of the TCP/IP protocol suite for the LINUX 3 * operating system. INET is implemented using the BSD Socket 4 * interface as the means of communication with the user level. 6 * Definitions for the TCP module. 8 * Version: @(#)tcp.h 1.0.5 05/23/93 10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu> 11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG> 13 * This program is free software; you can redistribute it and/or 14 * modify it under the terms of the GNU General Public License 15 * as published by the Free Software Foundation; either version 16 * 2 of the License, or (at your option) any later version. 22 #define FASTRETRANS_DEBUG 1 24 /* Be paranoid about data immediately beyond right edge of window. */ 25 #undef TCP_FORMAL_WINDOW 27 /* Cancel timers, when they are not required. */ 28 #undef TCP_CLEAR_TIMERS 30 #include <linux/config.h> 31 #include <linux/tcp.h> 32 #include <linux/slab.h> 33 #include <net/checksum.h> 36 /* This is for all connections with a full identity, no wildcards. 37 * New scheme, half the table is for TIME_WAIT, the other half is 38 * for the rest. I'll experiment with dynamic table growth later. 40 struct tcp_ehash_bucket
{ 43 }__attribute__((__aligned__(8))); 45 /* This is for listening sockets, thus all sockets which possess wildcards. */ 46 #define TCP_LHTABLE_SIZE 32/* Yes, really, this is all you need. */ 48 /* There are a few simple rules, which allow for local port reuse by 49 * an application. In essence: 51 * 1) Sockets bound to different interfaces may share a local port. 52 * Failing that, goto test 2. 53 * 2) If all sockets have sk->reuse set, and none of them are in 54 * TCP_LISTEN state, the port may be shared. 55 * Failing that, goto test 3. 56 * 3) If all sockets are bound to a specific sk->rcv_saddr local 57 * address, and none of them are the same, the port may be 59 * Failing this, the port cannot be shared. 61 * The interesting point, is test #2. This is what an FTP server does 62 * all day. To optimize this case we use a specific flag bit defined 63 * below. As we add sockets to a bind bucket list, we perform a 64 * check of: (newsk->reuse && (newsk->state != TCP_LISTEN)) 65 * As long as all sockets added to a bind bucket pass this test, 66 * the flag bit will be set. 67 * The resulting situation is that tcp_v[46]_verify_bind() can just check 68 * for this flag bit, if it is set and the socket trying to bind has 69 * sk->reuse set, we don't even have to walk the owners list at all, 70 * we return that it is ok to bind this socket to the requested local port. 72 * Sounds like a lot of work, but it is worth it. In a more naive 73 * implementation (ie. current FreeBSD etc.) the entire list of ports 74 * must be walked for each data port opened by an ftp server. Needless 75 * to say, this does not scale at all. With a couple thousand FTP 76 * users logged onto your box, isn't it nice to know that new data 77 * ports are created in O(1) time? I thought so. ;-) -DaveM 79 struct tcp_bind_bucket
{ 81 unsigned short fastreuse
; 82 struct tcp_bind_bucket
*next
; 84 struct tcp_bind_bucket
**pprev
; 87 struct tcp_bind_hashbucket
{ 89 struct tcp_bind_bucket
*chain
; 92 externstruct tcp_hashinfo
{ 93 /* This is for sockets with full identity only. Sockets here will 94 * always be without wildcards and will have the following invariant: 96 * TCP_ESTABLISHED <= sk->state < TCP_CLOSE 98 * First half of the table is for sockets not in TIME_WAIT, second half 99 * is for TIME_WAIT sockets only. 101 struct tcp_ehash_bucket
*__tcp_ehash
; 103 /* Ok, let's try this, I give up, we do need a local binding 104 * TCP hash as well as the others for fast bind/connect. 106 struct tcp_bind_hashbucket
*__tcp_bhash
; 108 int __tcp_bhash_size
; 109 int __tcp_ehash_size
; 111 /* All sockets in TCP_LISTEN state will be in here. This is the only 112 * table where wildcard'd TCP sockets can exist. Hash function here 113 * is just local port number. 115 struct sock
*__tcp_listening_hash
[TCP_LHTABLE_SIZE
]; 117 /* All the above members are written once at bootup and 118 * never written again _or_ are predominantly read-access. 120 * Now align to a new cache line as all the following members 123 rwlock_t __tcp_lhash_lock
124 __attribute__((__aligned__(SMP_CACHE_BYTES
))); 125 atomic_t __tcp_lhash_users
; 126 wait_queue_head_t __tcp_lhash_wait
; 127 spinlock_t __tcp_portalloc_lock
; 130 #define tcp_ehash (tcp_hashinfo.__tcp_ehash) 131 #define tcp_bhash (tcp_hashinfo.__tcp_bhash) 132 #define tcp_ehash_size (tcp_hashinfo.__tcp_ehash_size) 133 #define tcp_bhash_size (tcp_hashinfo.__tcp_bhash_size) 134 #define tcp_listening_hash (tcp_hashinfo.__tcp_listening_hash) 135 #define tcp_lhash_lock (tcp_hashinfo.__tcp_lhash_lock) 136 #define tcp_lhash_users (tcp_hashinfo.__tcp_lhash_users) 137 #define tcp_lhash_wait (tcp_hashinfo.__tcp_lhash_wait) 138 #define tcp_portalloc_lock (tcp_hashinfo.__tcp_portalloc_lock) 140 extern kmem_cache_t
*tcp_bucket_cachep
; 141 externstruct tcp_bind_bucket
*tcp_bucket_create(struct tcp_bind_hashbucket
*head
, 142 unsigned short snum
); 143 externvoidtcp_bucket_unlock(struct sock
*sk
); 144 externint tcp_port_rover
; 145 externstruct sock
*tcp_v4_lookup_listener(u32 addr
,unsigned short hnum
,int dif
); 147 /* These are AF independent. */ 148 static __inline__
inttcp_bhashfn(__u16 lport
) 150 return(lport
& (tcp_bhash_size
-1)); 153 /* This is a TIME_WAIT bucket. It works around the memory consumption 154 * problems of sockets in such a state on heavily loaded servers, but 155 * without violating the protocol specification. 157 struct tcp_tw_bucket
{ 158 /* These _must_ match the beginning of struct sock precisely. 159 * XXX Yes I know this is gross, but I'd have to edit every single 160 * XXX networking file if I created a "struct sock_header". -DaveM 169 struct sock
*bind_next
; 170 struct sock
**bind_pprev
; 172 substate
;/* "zapped" is replaced with "substate" */ 174 unsigned short family
; 176 rcv_wscale
;/* It is also TW bucket specific */ 179 /* And these are ours. */ 187 long ts_recent_stamp
; 189 struct tcp_bind_bucket
*tb
; 190 struct tcp_tw_bucket
*next_death
; 191 struct tcp_tw_bucket
**pprev_death
; 193 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 194 struct in6_addr v6_daddr
; 195 struct in6_addr v6_rcv_saddr
; 199 extern kmem_cache_t
*tcp_timewait_cachep
; 201 staticinlinevoidtcp_tw_put(struct tcp_tw_bucket
*tw
) 203 if(atomic_dec_and_test(&tw
->refcnt
)) { 204 #ifdef INET_REFCNT_DEBUG 205 printk(KERN_DEBUG
"tw_bucket %p released\n", tw
); 207 kmem_cache_free(tcp_timewait_cachep
, tw
); 211 extern atomic_t tcp_orphan_count
; 212 externint tcp_tw_count
; 213 externvoidtcp_time_wait(struct sock
*sk
,int state
,int timeo
); 214 externvoidtcp_timewait_kill(struct tcp_tw_bucket
*tw
); 215 externvoidtcp_tw_schedule(struct tcp_tw_bucket
*tw
,int timeo
); 216 externvoidtcp_tw_deschedule(struct tcp_tw_bucket
*tw
); 219 /* Socket demux engine toys. */ 221 #define TCP_COMBINED_PORTS(__sport, __dport) \ 222 (((__u32)(__sport)<<16) | (__u32)(__dport)) 223 #else/* __LITTLE_ENDIAN */ 224 #define TCP_COMBINED_PORTS(__sport, __dport) \ 225 (((__u32)(__dport)<<16) | (__u32)(__sport)) 228 #if (BITS_PER_LONG == 64) 230 #define TCP_V4_ADDR_COOKIE(__name, __saddr, __daddr) \ 231 __u64 __name = (((__u64)(__saddr))<<32)|((__u64)(__daddr)); 232 #else/* __LITTLE_ENDIAN */ 233 #define TCP_V4_ADDR_COOKIE(__name, __saddr, __daddr) \ 234 __u64 __name = (((__u64)(__daddr))<<32)|((__u64)(__saddr)); 235 #endif/* __BIG_ENDIAN */ 236 #define TCP_IPV4_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\ 237 (((*((__u64 *)&((__sk)->daddr)))== (__cookie)) && \ 238 ((*((__u32 *)&((__sk)->dport)))== (__ports)) && \ 239 (!((__sk)->bound_dev_if) || ((__sk)->bound_dev_if == (__dif)))) 240 #else/* 32-bit arch */ 241 #define TCP_V4_ADDR_COOKIE(__name, __saddr, __daddr) 242 #define TCP_IPV4_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\ 243 (((__sk)->daddr == (__saddr)) && \ 244 ((__sk)->rcv_saddr == (__daddr)) && \ 245 ((*((__u32 *)&((__sk)->dport)))== (__ports)) && \ 246 (!((__sk)->bound_dev_if) || ((__sk)->bound_dev_if == (__dif)))) 247 #endif/* 64-bit arch */ 249 #define TCP_IPV6_MATCH(__sk, __saddr, __daddr, __ports, __dif) \ 250 (((*((__u32 *)&((__sk)->dport)))== (__ports)) && \ 251 ((__sk)->family == AF_INET6) && \ 252 !ipv6_addr_cmp(&(__sk)->net_pinfo.af_inet6.daddr, (__saddr)) && \ 253 !ipv6_addr_cmp(&(__sk)->net_pinfo.af_inet6.rcv_saddr, (__daddr)) && \ 254 (!((__sk)->bound_dev_if) || ((__sk)->bound_dev_if == (__dif)))) 256 /* These can have wildcards, don't try too hard. */ 257 static __inline__
inttcp_lhashfn(unsigned short num
) 259 return num
& (TCP_LHTABLE_SIZE
-1); 262 static __inline__
inttcp_sk_listen_hashfn(struct sock
*sk
) 264 returntcp_lhashfn(sk
->num
); 267 #define MAX_TCP_HEADER (128 + MAX_HEADER) 270 * Never offer a window over 32767 without using window scaling. Some 271 * poor stacks do signed 16bit maths! 273 #define MAX_TCP_WINDOW 32767 275 /* Minimal accepted MSS. It is (60+60+8) - (20+20). */ 276 #define TCP_MIN_MSS 88 278 /* Minimal RCV_MSS. */ 279 #define TCP_MIN_RCVMSS 536 281 /* After receiving this amount of duplicate ACKs fast retransmit starts. */ 282 #define TCP_FASTRETRANS_THRESH 3 284 /* Maximal reordering. */ 285 #define TCP_MAX_REORDERING 127 287 /* Maximal number of ACKs sent quickly to accelerate slow-start. */ 288 #define TCP_MAX_QUICKACKS 16 290 /* urg_data states */ 291 #define TCP_URG_VALID 0x0100 292 #define TCP_URG_NOTYET 0x0200 293 #define TCP_URG_READ 0x0400 295 #define TCP_RETR1 3/* 296 * This is how many retries it does before it 297 * tries to figure out if the gateway is 298 * down. Minimal RFC value is 3; it corresponds 299 * to ~3sec-8min depending on RTO. 302 #define TCP_RETR2 15/* 303 * This should take at least 304 * 90 minutes to time out. 305 * RFC1122 says that the limit is 100 sec. 306 * 15 is ~13-30min depending on RTO. 309 #define TCP_SYN_RETRIES 5/* number of times to retry active opening a 310 * connection: ~180sec is RFC minumum */ 312 #define TCP_SYNACK_RETRIES 5/* number of times to retry passive opening a 313 * connection: ~180sec is RFC minumum */ 316 #define TCP_ORPHAN_RETRIES 7/* number of times to retry on an orphaned 317 * socket. 7 is ~50sec-16min. 321 #define TCP_TIMEWAIT_LEN (60*HZ)/* how long to wait to destroy TIME-WAIT 322 * state, about 60 seconds */ 323 #define TCP_FIN_TIMEOUT TCP_TIMEWAIT_LEN 324 /* BSD style FIN_WAIT2 deadlock breaker. 325 * It used to be 3min, new value is 60sec, 326 * to combine FIN-WAIT-2 timeout with 330 #define TCP_DELACK_MAX (HZ/5)/* maximal time to delay before sending an ACK */ 332 #define TCP_DELACK_MIN (HZ/25)/* minimal time to delay before sending an ACK */ 333 #define TCP_ATO_MIN (HZ/25) 335 #define TCP_DELACK_MIN 4 336 #define TCP_ATO_MIN 4 338 #define TCP_RTO_MAX (120*HZ) 339 #define TCP_RTO_MIN (HZ/5) 340 #define TCP_TIMEOUT_INIT (3*HZ)/* RFC 1122 initial RTO value */ 342 #define TCP_RESOURCE_PROBE_INTERVAL (HZ/2)/* Maximal interval between probes 343 * for local resources. 346 #define TCP_KEEPALIVE_TIME (120*60*HZ)/* two hours */ 347 #define TCP_KEEPALIVE_PROBES 9/* Max of 9 keepalive probes */ 348 #define TCP_KEEPALIVE_INTVL (75*HZ) 350 #define MAX_TCP_KEEPIDLE 32767 351 #define MAX_TCP_KEEPINTVL 32767 352 #define MAX_TCP_KEEPCNT 127 353 #define MAX_TCP_SYNCNT 127 355 /* TIME_WAIT reaping mechanism. */ 356 #define TCP_TWKILL_SLOTS 8/* Please keep this a power of 2. */ 357 #define TCP_TWKILL_PERIOD (TCP_TIMEWAIT_LEN/TCP_TWKILL_SLOTS) 359 #define TCP_SYNQ_INTERVAL (HZ/5)/* Period of SYNACK timer */ 360 #define TCP_SYNQ_HSIZE 64/* Size of SYNACK hash table */ 362 #define TCP_PAWS_24DAYS (60 * 60 * 24 * 24) 363 #define TCP_PAWS_MSL 60/* Per-host timestamps are invalidated 364 * after this time. It should be equal 365 * (or greater than) TCP_TIMEWAIT_LEN 366 * to provide reliability equal to one 367 * provided by timewait state. 369 #define TCP_PAWS_WINDOW 1/* Replay window for per-host 370 * timestamps. It must be less than 371 * minimal timewait lifetime. 374 #define TCP_TW_RECYCLE_SLOTS_LOG 5 375 #define TCP_TW_RECYCLE_SLOTS (1<<TCP_TW_RECYCLE_SLOTS_LOG) 377 /* If time > 4sec, it is "slow" path, no recycling is required, 378 so that we select tick to get range about 4 seconds. 381 #if HZ <= 16 || HZ > 4096 382 # error Unsupported: HZ <= 16 or HZ > 4096 384 # define TCP_TW_RECYCLE_TICK (5+2-TCP_TW_RECYCLE_SLOTS_LOG) 386 # define TCP_TW_RECYCLE_TICK (6+2-TCP_TW_RECYCLE_SLOTS_LOG) 388 # define TCP_TW_RECYCLE_TICK (7+2-TCP_TW_RECYCLE_SLOTS_LOG) 390 # define TCP_TW_RECYCLE_TICK (8+2-TCP_TW_RECYCLE_SLOTS_LOG) 392 # define TCP_TW_RECYCLE_TICK (9+2-TCP_TW_RECYCLE_SLOTS_LOG) 394 # define TCP_TW_RECYCLE_TICK (10+2-TCP_TW_RECYCLE_SLOTS_LOG) 396 # define TCP_TW_RECYCLE_TICK (11+2-TCP_TW_RECYCLE_SLOTS_LOG) 398 # define TCP_TW_RECYCLE_TICK (12+2-TCP_TW_RECYCLE_SLOTS_LOG) 405 #define TCPOPT_NOP 1/* Padding */ 406 #define TCPOPT_EOL 0/* End of options */ 407 #define TCPOPT_MSS 2/* Segment size negotiating */ 408 #define TCPOPT_WINDOW 3/* Window scaling */ 409 #define TCPOPT_SACK_PERM 4/* SACK Permitted */ 410 #define TCPOPT_SACK 5/* SACK Block */ 411 #define TCPOPT_TIMESTAMP 8/* Better RTT estimations/PAWS */ 417 #define TCPOLEN_MSS 4 418 #define TCPOLEN_WINDOW 3 419 #define TCPOLEN_SACK_PERM 2 420 #define TCPOLEN_TIMESTAMP 10 422 /* But this is what stacks really send out. */ 423 #define TCPOLEN_TSTAMP_ALIGNED 12 424 #define TCPOLEN_WSCALE_ALIGNED 4 425 #define TCPOLEN_SACKPERM_ALIGNED 4 426 #define TCPOLEN_SACK_BASE 2 427 #define TCPOLEN_SACK_BASE_ALIGNED 4 428 #define TCPOLEN_SACK_PERBLOCK 8 430 #define TCP_TIME_RETRANS 1/* Retransmit timer */ 431 #define TCP_TIME_DACK 2/* Delayed ack timer */ 432 #define TCP_TIME_PROBE0 3/* Zero window probe timer */ 433 #define TCP_TIME_KEEPOPEN 4/* Keepalive timer */ 435 /* sysctl variables for tcp */ 436 externint sysctl_max_syn_backlog
; 437 externint sysctl_tcp_timestamps
; 438 externint sysctl_tcp_window_scaling
; 439 externint sysctl_tcp_sack
; 440 externint sysctl_tcp_fin_timeout
; 441 externint sysctl_tcp_tw_recycle
; 442 externint sysctl_tcp_keepalive_time
; 443 externint sysctl_tcp_keepalive_probes
; 444 externint sysctl_tcp_keepalive_intvl
; 445 externint sysctl_tcp_syn_retries
; 446 externint sysctl_tcp_synack_retries
; 447 externint sysctl_tcp_retries1
; 448 externint sysctl_tcp_retries2
; 449 externint sysctl_tcp_orphan_retries
; 450 externint sysctl_tcp_syncookies
; 451 externint sysctl_tcp_retrans_collapse
; 452 externint sysctl_tcp_stdurg
; 453 externint sysctl_tcp_rfc1337
; 454 externint sysctl_tcp_tw_recycle
; 455 externint sysctl_tcp_abort_on_overflow
; 456 externint sysctl_tcp_max_orphans
; 457 externint sysctl_tcp_max_tw_buckets
; 458 externint sysctl_tcp_fack
; 459 externint sysctl_tcp_reordering
; 460 externint sysctl_tcp_ecn
; 461 externint sysctl_tcp_dsack
; 462 externint sysctl_tcp_mem
[3]; 463 externint sysctl_tcp_wmem
[3]; 464 externint sysctl_tcp_rmem
[3]; 465 externint sysctl_tcp_app_win
; 466 externint sysctl_tcp_adv_win_scale
; 468 extern atomic_t tcp_memory_allocated
; 469 extern atomic_t tcp_sockets_allocated
; 470 externint tcp_memory_pressure
; 474 struct or_calltable
{ 476 int(*rtx_syn_ack
) (struct sock
*sk
,struct open_request
*req
,struct dst_entry
*); 477 void(*send_ack
) (struct sk_buff
*skb
,struct open_request
*req
); 478 void(*destructor
) (struct open_request
*req
); 479 void(*send_reset
) (struct sk_buff
*skb
); 482 struct tcp_v4_open_req
{ 485 struct ip_options
*opt
; 488 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) 489 struct tcp_v6_open_req
{ 490 struct in6_addr loc_addr
; 491 struct in6_addr rmt_addr
; 492 struct sk_buff
*pktopts
; 497 /* this structure is too big */ 498 struct open_request
{ 499 struct open_request
*dl_next
;/* Must be first member! */ 513 /* The following two fields can be easily recomputed I think -AK */ 514 __u32 window_clamp
;/* window clamp at creation time */ 515 __u32 rcv_wnd
;/* rcv_wnd offered first time */ 517 unsigned long expires
; 518 struct or_calltable
*class; 521 struct tcp_v4_open_req v4_req
; 522 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) 523 struct tcp_v6_open_req v6_req
; 528 /* SLAB cache for open requests. */ 529 extern kmem_cache_t
*tcp_openreq_cachep
; 531 #define tcp_openreq_alloc() kmem_cache_alloc(tcp_openreq_cachep, SLAB_ATOMIC) 532 #define tcp_openreq_fastfree(req) kmem_cache_free(tcp_openreq_cachep, req) 534 staticinlinevoidtcp_openreq_free(struct open_request
*req
) 536 req
->class->destructor(req
); 537 tcp_openreq_fastfree(req
); 540 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 541 #define TCP_INET_FAMILY(fam) ((fam) == AF_INET) 543 #define TCP_INET_FAMILY(fam) 1 547 * Pointers to address related TCP functions 548 * (i.e. things that depend on the address family) 550 * BUGGG_FUTURE: all the idea behind this struct is wrong. 551 * It mixes socket frontend with transport function. 552 * With port sharing between IPv6/v4 it gives the only advantage, 553 * only poor IPv6 needs to permanently recheck, that it 554 * is still IPv6 8)8) It must be cleaned up as soon as possible. 559 int(*queue_xmit
) (struct sk_buff
*skb
); 561 void(*send_check
) (struct sock
*sk
, 564 struct sk_buff
*skb
); 566 int(*rebuild_header
) (struct sock
*sk
); 568 int(*conn_request
) (struct sock
*sk
, 569 struct sk_buff
*skb
); 571 struct sock
* (*syn_recv_sock
) (struct sock
*sk
, 573 struct open_request
*req
, 574 struct dst_entry
*dst
); 576 int(*hash_connecting
) (struct sock
*sk
); 578 int(*remember_stamp
) (struct sock
*sk
); 580 __u16 net_header_len
; 582 int(*setsockopt
) (struct sock
*sk
, 588 int(*getsockopt
) (struct sock
*sk
, 595 void(*addr2sockaddr
) (struct sock
*sk
, 602 * The next routines deal with comparing 32 bit unsigned ints 603 * and worry about wraparound (automatic with unsigned arithmetic). 606 extern __inline
intbefore(__u32 seq1
, __u32 seq2
) 608 return(__s32
)(seq1
-seq2
) <0; 611 extern __inline
intafter(__u32 seq1
, __u32 seq2
) 613 return(__s32
)(seq2
-seq1
) <0; 617 /* is s2<=s1<=s3 ? */ 618 extern __inline
intbetween(__u32 seq1
, __u32 seq2
, __u32 seq3
) 620 return seq3
- seq2
>= seq1
- seq2
; 624 externstruct proto tcp_prot
; 626 externstruct tcp_mib tcp_statistics
[NR_CPUS
*2]; 627 #define TCP_INC_STATS(field) SNMP_INC_STATS(tcp_statistics, field) 628 #define TCP_INC_STATS_BH(field) SNMP_INC_STATS_BH(tcp_statistics, field) 629 #define TCP_INC_STATS_USER(field) SNMP_INC_STATS_USER(tcp_statistics, field) 631 externvoidtcp_put_port(struct sock
*sk
); 632 externvoid__tcp_put_port(struct sock
*sk
); 633 externvoidtcp_inherit_port(struct sock
*sk
,struct sock
*child
); 635 externvoidtcp_v4_err(struct sk_buff
*skb
, 638 externvoidtcp_shutdown(struct sock
*sk
,int how
); 640 externinttcp_v4_rcv(struct sk_buff
*skb
, 643 externinttcp_v4_remember_stamp(struct sock
*sk
); 645 externinttcp_v4_tw_remember_stamp(struct tcp_tw_bucket
*tw
); 647 externinttcp_sendmsg(struct sock
*sk
,struct msghdr
*msg
,int size
); 649 externinttcp_ioctl(struct sock
*sk
, 653 externinttcp_rcv_state_process(struct sock
*sk
, 658 externinttcp_rcv_established(struct sock
*sk
, 670 staticinlinevoidtcp_schedule_ack(struct tcp_opt
*tp
) 672 tp
->ack
.pending
|= TCP_ACK_SCHED
; 675 staticinlineinttcp_ack_scheduled(struct tcp_opt
*tp
) 677 return tp
->ack
.pending
&TCP_ACK_SCHED
; 680 static __inline__
voidtcp_dec_quickack_mode(struct tcp_opt
*tp
) 682 if(tp
->ack
.quick
&& --tp
->ack
.quick
==0) { 683 /* Leaving quickack mode we deflate ATO. */ 684 tp
->ack
.ato
= TCP_ATO_MIN
; 688 externvoidtcp_enter_quickack_mode(struct tcp_opt
*tp
); 690 static __inline__
voidtcp_delack_init(struct tcp_opt
*tp
) 692 memset(&tp
->ack
,0,sizeof(tp
->ack
)); 695 staticinlinevoidtcp_clear_options(struct tcp_opt
*tp
) 697 tp
->tstamp_ok
= tp
->sack_ok
= tp
->wscale_ok
= tp
->snd_wscale
=0; 709 externenum tcp_tw_status
tcp_timewait_state_process(struct tcp_tw_bucket
*tw
, 714 externstruct sock
*tcp_check_req(struct sock
*sk
,struct sk_buff
*skb
, 715 struct open_request
*req
, 716 struct open_request
**prev
); 717 externinttcp_child_process(struct sock
*parent
, 719 struct sk_buff
*skb
); 720 externvoidtcp_enter_loss(struct sock
*sk
,int how
); 721 externvoidtcp_clear_retrans(struct tcp_opt
*tp
); 722 externvoidtcp_update_metrics(struct sock
*sk
); 724 externvoidtcp_close(struct sock
*sk
, 726 externstruct sock
*tcp_accept(struct sock
*sk
,int flags
,int*err
); 727 externunsigned inttcp_poll(struct file
* file
,struct socket
*sock
,struct poll_table_struct
*wait
); 728 externvoidtcp_write_space(struct sock
*sk
); 730 externinttcp_getsockopt(struct sock
*sk
,int level
, 731 int optname
,char*optval
, 733 externinttcp_setsockopt(struct sock
*sk
,int level
, 734 int optname
,char*optval
, 736 externvoidtcp_set_keepalive(struct sock
*sk
,int val
); 737 externinttcp_recvmsg(struct sock
*sk
, 739 int len
,int nonblock
, 740 int flags
,int*addr_len
); 742 externinttcp_listen_start(struct sock
*sk
); 744 externvoidtcp_parse_options(struct sk_buff
*skb
, 749 * TCP v4 functions exported for the inet6 API 752 externinttcp_v4_rebuild_header(struct sock
*sk
); 754 externinttcp_v4_build_header(struct sock
*sk
, 755 struct sk_buff
*skb
); 757 externvoidtcp_v4_send_check(struct sock
*sk
, 758 struct tcphdr
*th
,int len
, 759 struct sk_buff
*skb
); 761 externinttcp_v4_conn_request(struct sock
*sk
, 762 struct sk_buff
*skb
); 764 externstruct sock
*tcp_create_openreq_child(struct sock
*sk
, 765 struct open_request
*req
, 766 struct sk_buff
*skb
); 768 externstruct sock
*tcp_v4_syn_recv_sock(struct sock
*sk
, 770 struct open_request
*req
, 771 struct dst_entry
*dst
); 773 externinttcp_v4_do_rcv(struct sock
*sk
, 774 struct sk_buff
*skb
); 776 externinttcp_v4_connect(struct sock
*sk
, 777 struct sockaddr
*uaddr
, 780 externinttcp_connect(struct sock
*sk
, 781 struct sk_buff
*skb
); 783 externstruct sk_buff
*tcp_make_synack(struct sock
*sk
, 784 struct dst_entry
*dst
, 785 struct open_request
*req
); 787 externinttcp_disconnect(struct sock
*sk
,int flags
); 789 externvoidtcp_unhash(struct sock
*sk
); 791 externinttcp_v4_hash_connecting(struct sock
*sk
); 794 /* From syncookies.c */ 795 externstruct sock
*cookie_v4_check(struct sock
*sk
,struct sk_buff
*skb
, 796 struct ip_options
*opt
); 797 extern __u32
cookie_v4_init_sequence(struct sock
*sk
,struct sk_buff
*skb
, 802 externinttcp_write_xmit(struct sock
*); 803 externinttcp_retransmit_skb(struct sock
*,struct sk_buff
*); 804 externvoidtcp_xmit_retransmit_queue(struct sock
*); 805 externvoidtcp_simple_retransmit(struct sock
*); 807 externvoidtcp_send_probe0(struct sock
*); 808 externvoidtcp_send_partial(struct sock
*); 809 externinttcp_write_wakeup(struct sock
*); 810 externvoidtcp_send_fin(struct sock
*sk
); 811 externvoidtcp_send_active_reset(struct sock
*sk
,int priority
); 812 externinttcp_send_synack(struct sock
*); 813 externinttcp_transmit_skb(struct sock
*,struct sk_buff
*); 814 externvoidtcp_send_skb(struct sock
*,struct sk_buff
*,int force_queue
,unsigned mss_now
); 815 externvoidtcp_send_ack(struct sock
*sk
); 816 externvoidtcp_send_delayed_ack(struct sock
*sk
); 819 externvoidtcp_init_xmit_timers(struct sock
*); 820 externvoidtcp_clear_xmit_timers(struct sock
*); 822 externvoidtcp_delete_keepalive_timer(struct sock
*); 823 externvoidtcp_reset_keepalive_timer(struct sock
*,unsigned long); 824 externinttcp_sync_mss(struct sock
*sk
, u32 pmtu
); 826 externconst char timer_bug_msg
[]; 829 staticinlinevoidtcp_clear_xmit_timer(struct sock
*sk
,int what
) 831 struct tcp_opt
*tp
= &sk
->tp_pinfo
.af_tcp
; 834 case TCP_TIME_RETRANS
: 835 case TCP_TIME_PROBE0
: 838 #ifdef TCP_CLEAR_TIMERS 839 if(timer_pending(&tp
->retransmit_timer
) && 840 del_timer(&tp
->retransmit_timer
)) 848 #ifdef TCP_CLEAR_TIMERS 849 if(timer_pending(&tp
->delack_timer
) && 850 del_timer(&tp
->delack_timer
)) 855 printk(timer_bug_msg
); 862 * Reset the retransmission timer 864 staticinlinevoidtcp_reset_xmit_timer(struct sock
*sk
,int what
,unsigned long when
) 866 struct tcp_opt
*tp
= &sk
->tp_pinfo
.af_tcp
; 868 if(when
> TCP_RTO_MAX
) { 870 printk(KERN_DEBUG
"reset_xmit_timer sk=%p %d when=0x%lx, caller=%p\n", sk
, what
, when
,current_text_addr()); 876 case TCP_TIME_RETRANS
: 877 case TCP_TIME_PROBE0
: 879 tp
->timeout
= jiffies
+when
; 880 if(!mod_timer(&tp
->retransmit_timer
, tp
->timeout
)) 885 tp
->ack
.pending
|= TCP_ACK_TIMER
; 886 tp
->ack
.timeout
= jiffies
+when
; 887 if(!mod_timer(&tp
->delack_timer
, tp
->ack
.timeout
)) 892 printk(KERN_DEBUG
"bug: unknown timer value\n"); 896 /* Compute the current effective MSS, taking SACKs and IP options, 897 * and even PMTU discovery events into account. 900 static __inline__
unsigned inttcp_current_mss(struct sock
*sk
) 902 struct tcp_opt
*tp
= &sk
->tp_pinfo
.af_tcp
; 903 struct dst_entry
*dst
=__sk_dst_get(sk
); 904 int mss_now
= tp
->mss_cache
; 906 if(dst
&& dst
->pmtu
!= tp
->pmtu_cookie
) 907 mss_now
=tcp_sync_mss(sk
, dst
->pmtu
); 910 mss_now
-= (TCPOLEN_SACK_BASE_ALIGNED
+ 911 (tp
->eff_sacks
* TCPOLEN_SACK_PERBLOCK
)); 915 /* Initialize RCV_MSS value. 916 * RCV_MSS is an our guess about MSS used by the peer. 917 * We haven't any direct information about the MSS. 918 * It's better to underestimate the RCV_MSS rather than overestimate. 919 * Overestimations make us ACKing less frequently than needed. 920 * Underestimations are more easy to detect and fix by tcp_measure_rcv_mss(). 923 staticinlinevoidtcp_initialize_rcv_mss(struct sock
*sk
) 925 struct tcp_opt
*tp
= &sk
->tp_pinfo
.af_tcp
; 926 int hint
=min(tp
->advmss
, tp
->mss_cache
); 928 hint
=min(hint
, tp
->rcv_wnd
/2); 930 tp
->ack
.rcv_mss
=max(min(hint
, TCP_MIN_RCVMSS
), TCP_MIN_MSS
); 933 static __inline__
void__tcp_fast_path_on(struct tcp_opt
*tp
, u32 snd_wnd
) 935 tp
->pred_flags
=htonl((tp
->tcp_header_len
<<26) | 936 ntohl(TCP_FLAG_ACK
) | 940 static __inline__
voidtcp_fast_path_on(struct tcp_opt
*tp
) 942 __tcp_fast_path_on(tp
, tp
->snd_wnd
>>tp
->snd_wscale
); 945 /* Compute the actual receive window we are currently advertising. 946 * Rcv_nxt can be after the window if our peer push more data 947 * than the offered window. 949 static __inline__ u32
tcp_receive_window(struct tcp_opt
*tp
) 951 s32 win
= tp
->rcv_wup
+ tp
->rcv_wnd
- tp
->rcv_nxt
; 958 /* Choose a new window, without checks for shrinking, and without 959 * scaling applied to the result. The caller does these things 960 * if necessary. This is a "raw" window selection. 962 extern u32
__tcp_select_window(struct sock
*sk
); 964 /* TCP timestamps are only 32-bits, this causes a slight 965 * complication on 64-bit systems since we store a snapshot 966 * of jiffies in the buffer control blocks below. We decidely 967 * only use of the low 32-bits of jiffies and hide the ugly 968 * casts with the following macro. 970 #define tcp_time_stamp ((__u32)(jiffies)) 972 /* This is what the send packet queueing engine uses to pass 973 * TCP per-packet control information to the transmission 974 * code. We also store the host-order sequence numbers in 975 * here too. This is 36 bytes on 32-bit architectures, 976 * 40 bytes on 64-bit machines, if this grows please adjust 977 * skbuff.h:skbuff->cb[xxx] size appropriately. 981 struct inet_skb_parm h4
; 982 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE) 983 struct inet6_skb_parm h6
; 985 } header
;/* For incoming frames */ 986 __u32 seq
;/* Starting sequence number */ 987 __u32 end_seq
;/* SEQ + FIN + SYN + datalen */ 988 __u32 when
;/* used to compute rtt's */ 989 __u8 flags
;/* TCP header flags. */ 991 /* NOTE: These must match up to the flags byte in a 994 #define TCPCB_FLAG_FIN 0x01 995 #define TCPCB_FLAG_SYN 0x02 996 #define TCPCB_FLAG_RST 0x04 997 #define TCPCB_FLAG_PSH 0x08 998 #define TCPCB_FLAG_ACK 0x10 999 #define TCPCB_FLAG_URG 0x20 1000 #define TCPCB_FLAG_ECE 0x40 1001 #define TCPCB_FLAG_CWR 0x80 1003 __u8 sacked
;/* State flags for SACK/FACK. */ 1004 #define TCPCB_SACKED_ACKED 0x01/* SKB ACK'd by a SACK block */ 1005 #define TCPCB_SACKED_RETRANS 0x02/* SKB retransmitted */ 1006 #define TCPCB_LOST 0x04/* SKB is lost */ 1007 #define TCPCB_TAGBITS 0x07/* All tag bits */ 1009 #define TCPCB_EVER_RETRANS 0x80/* Ever retransmitted frame */ 1010 #define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS) 1012 #define TCPCB_URG 0x20/* Urgent pointer advenced here */ 1014 #define TCPCB_AT_TAIL (TCPCB_URG) 1016 __u16 urg_ptr
;/* Valid w/URG flags is set. */ 1017 __u32 ack_seq
;/* Sequence number ACK'd */ 1020 #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0])) 1022 #define for_retrans_queue(skb, sk, tp) \ 1023 for (skb = (sk)->write_queue.next; \ 1024 (skb != (tp)->send_head) && \ 1025 (skb != (struct sk_buff *)&(sk)->write_queue); \ 1029 #include <net/tcp_ecn.h> 1033 * Compute minimal free write space needed to queue new packets. 1035 staticinlineinttcp_min_write_space(struct sock
*sk
) 1037 return sk
->wmem_queued
/2; 1040 staticinlineinttcp_wspace(struct sock
*sk
) 1042 return sk
->sndbuf
- sk
->wmem_queued
; 1046 /* This determines how many packets are "in the network" to the best 1047 * of our knowledge. In many cases it is conservative, but where 1048 * detailed information is available from the receiver (via SACK 1049 * blocks etc.) we can make more aggressive calculations. 1051 * Use this for decisions involving congestion control, use just 1052 * tp->packets_out to determine if the send queue is empty or not. 1054 * Read this equation as: 1056 * "Packets sent once on transmission queue" MINUS 1057 * "Packets left network, but not honestly ACKed yet" PLUS 1058 * "Packets fast retransmitted" 1060 static __inline__
inttcp_packets_in_flight(struct tcp_opt
*tp
) 1062 return tp
->packets_out
- tp
->left_out
+ tp
->retrans_out
; 1065 /* Recalculate snd_ssthresh, we want to set it to: 1067 * one half the current congestion window, but no 1068 * less than two segments 1070 staticinline __u32
tcp_recalc_ssthresh(struct tcp_opt
*tp
) 1072 returnmax(tp
->snd_cwnd
>>1,2); 1075 /* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd. 1076 * The exception is rate halving phase, when cwnd is decreasing towards 1079 staticinline __u32
tcp_current_ssthresh(struct tcp_opt
*tp
) 1081 if((1<<tp
->ca_state
)&(TCPF_CA_CWR
|TCPF_CA_Recovery
)) 1082 return tp
->snd_ssthresh
; 1084 returnmax(tp
->snd_ssthresh
, (tp
->snd_cwnd
>>1)+(tp
->snd_cwnd
>>2)); 1087 externvoidtcp_cwnd_application_limited(struct sock
*sk
); 1089 /* Congestion window validation. (RFC2861) */ 1091 staticinlinevoidtcp_cwnd_validate(struct sock
*sk
,struct tcp_opt
*tp
) 1093 if(tp
->packets_out
>= tp
->snd_cwnd
) { 1094 /* Network is feed fully. */ 1095 tp
->snd_cwnd_used
=0; 1096 tp
->snd_cwnd_stamp
= tcp_time_stamp
; 1098 /* Network starves. */ 1099 if(tp
->packets_out
> tp
->snd_cwnd_used
) 1100 tp
->snd_cwnd_used
= tp
->packets_out
; 1102 if((s32
)(tcp_time_stamp
- tp
->snd_cwnd_stamp
) >= tp
->rto
) 1103 tcp_cwnd_application_limited(sk
); 1107 /* Set slow start threshould and cwnd not falling to slow start */ 1108 staticinlinevoid__tcp_enter_cwr(struct tcp_opt
*tp
) 1111 tp
->snd_ssthresh
=tcp_recalc_ssthresh(tp
); 1112 tp
->snd_cwnd
=min(tp
->snd_cwnd
,tcp_packets_in_flight(tp
)+1); 1113 tp
->snd_cwnd_cnt
=0; 1114 tp
->high_seq
= tp
->snd_nxt
; 1115 tp
->snd_cwnd_stamp
= tcp_time_stamp
; 1116 TCP_ECN_queue_cwr(tp
); 1119 staticinlinevoidtcp_enter_cwr(struct tcp_opt
*tp
) 1121 tp
->prior_ssthresh
=0; 1122 if(tp
->ca_state
< TCP_CA_CWR
) { 1123 __tcp_enter_cwr(tp
); 1124 tp
->ca_state
= TCP_CA_CWR
; 1128 extern __u32
tcp_init_cwnd(struct tcp_opt
*tp
); 1130 /* Slow start with delack produces 3 packets of burst, so that 1131 * it is safe "de facto". 1133 static __inline__ __u32
tcp_max_burst(struct tcp_opt
*tp
) 1138 static __inline__
inttcp_minshall_check(struct tcp_opt
*tp
) 1140 returnafter(tp
->snd_sml
,tp
->snd_una
) && 1141 !after(tp
->snd_sml
, tp
->snd_nxt
); 1144 static __inline__
voidtcp_minshall_update(struct tcp_opt
*tp
,int mss
,struct sk_buff
*skb
) 1147 tp
->snd_sml
=TCP_SKB_CB(skb
)->end_seq
; 1150 /* Return 0, if packet can be sent now without violation Nagle's rules: 1151 1. It is full sized. 1152 2. Or it contains FIN. 1153 3. Or TCP_NODELAY was set. 1154 4. Or TCP_CORK is not set, and all sent packets are ACKed. 1155 With Minshall's modification: all sent small packets are ACKed. 1158 static __inline__
int 1159 tcp_nagle_check(struct tcp_opt
*tp
,struct sk_buff
*skb
,unsigned mss_now
,int nonagle
) 1161 return(skb
->len
< mss_now
&& 1162 !(TCP_SKB_CB(skb
)->flags
& TCPCB_FLAG_FIN
) && 1166 tcp_minshall_check(tp
)))); 1169 /* This checks if the data bearing packet SKB (usually tp->send_head) 1170 * should be put on the wire right now. 1172 static __inline__
inttcp_snd_test(struct tcp_opt
*tp
,struct sk_buff
*skb
, 1173 unsigned cur_mss
,int nonagle
) 1175 /* RFC 1122 - section 4.2.3.4 1179 * a) The right edge of this frame exceeds the window 1180 * b) There are packets in flight and we have a small segment 1181 * [SWS avoidance and Nagle algorithm] 1182 * (part of SWS is done on packetization) 1183 * Minshall version sounds: there are no _small_ 1184 * segments in flight. (tcp_nagle_check) 1185 * c) We have too many packets 'in flight' 1187 * Don't use the nagle rule for urgent data (or 1188 * for the final FIN -DaveM). 1190 * Also, Nagle rule does not apply to frames, which 1191 * sit in the middle of queue (they have no chances 1192 * to get new data) and if room at tail of skb is 1193 * not enough to save something seriously (<32 for now). 1196 /* Don't be strict about the congestion window for the 1197 * final FIN frame. -DaveM 1199 return((nonagle
==1|| tp
->urg_mode
1200 || !tcp_nagle_check(tp
, skb
, cur_mss
, nonagle
)) && 1201 ((tcp_packets_in_flight(tp
) < tp
->snd_cwnd
) || 1202 (TCP_SKB_CB(skb
)->flags
& TCPCB_FLAG_FIN
)) && 1203 !after(TCP_SKB_CB(skb
)->end_seq
, tp
->snd_una
+ tp
->snd_wnd
)); 1206 static __inline__
voidtcp_check_probe_timer(struct sock
*sk
,struct tcp_opt
*tp
) 1208 if(!tp
->packets_out
&& !tp
->pending
) 1209 tcp_reset_xmit_timer(sk
, TCP_TIME_PROBE0
, tp
->rto
); 1212 static __inline__
inttcp_skb_is_last(struct sock
*sk
,struct sk_buff
*skb
) 1214 return(skb
->next
== (struct sk_buff
*)&sk
->write_queue
); 1217 /* Push out any pending frames which were held back due to 1218 * TCP_CORK or attempt at coalescing tiny packets. 1219 * The socket must be locked by the caller. 1221 static __inline__
void__tcp_push_pending_frames(struct sock
*sk
, 1226 struct sk_buff
*skb
= tp
->send_head
; 1229 if(!tcp_skb_is_last(sk
, skb
)) 1231 if(!tcp_snd_test(tp
, skb
, cur_mss
, nonagle
) || 1233 tcp_check_probe_timer(sk
, tp
); 1235 tcp_cwnd_validate(sk
, tp
); 1238 static __inline__
voidtcp_push_pending_frames(struct sock
*sk
, 1241 __tcp_push_pending_frames(sk
, tp
,tcp_current_mss(sk
), tp
->nonagle
); 1244 static __inline__
inttcp_may_send_now(struct sock
*sk
,struct tcp_opt
*tp
) 1246 struct sk_buff
*skb
= tp
->send_head
; 1249 tcp_snd_test(tp
, skb
,tcp_current_mss(sk
), 1250 tcp_skb_is_last(sk
, skb
) ?1: tp
->nonagle
)); 1253 static __inline__
voidtcp_init_wl(struct tcp_opt
*tp
, u32 ack
, u32 seq
) 1258 static __inline__
voidtcp_update_wl(struct tcp_opt
*tp
, u32 ack
, u32 seq
) 1263 externvoidtcp_destroy_sock(struct sock
*sk
); 1267 * Calculate(/check) TCP checksum 1269 static __inline__ u16
tcp_v4_check(struct tcphdr
*th
,int len
, 1270 unsigned long saddr
,unsigned long daddr
, 1273 returncsum_tcpudp_magic(saddr
,daddr
,len
,IPPROTO_TCP
,base
); 1276 static __inline__
int__tcp_checksum_complete(struct sk_buff
*skb
) 1278 return(unsigned short)csum_fold(csum_partial(skb
->h
.raw
, skb
->len
, skb
->csum
)); 1281 static __inline__
inttcp_checksum_complete(struct sk_buff
*skb
) 1283 return skb
->ip_summed
!= CHECKSUM_UNNECESSARY
&& 1284 __tcp_checksum_complete(skb
); 1287 /* Prequeue for VJ style copy to user, combined with checksumming. */ 1289 static __inline__
voidtcp_prequeue_init(struct tcp_opt
*tp
) 1291 tp
->ucopy
.task
= NULL
; 1293 tp
->ucopy
.memory
=0; 1294 skb_queue_head_init(&tp
->ucopy
.prequeue
); 1297 /* Packet is added to VJ-style prequeue for processing in process 1298 * context, if a reader task is waiting. Apparently, this exciting 1299 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93) 1300 * failed somewhere. Latency? Burstiness? Well, at least now we will 1301 * see, why it failed. 8)8) --ANK 1303 static __inline__
inttcp_prequeue(struct sock
*sk
,struct sk_buff
*skb
) 1305 struct tcp_opt
*tp
= &sk
->tp_pinfo
.af_tcp
; 1307 if(tp
->ucopy
.task
) { 1308 if((tp
->ucopy
.memory
+= skb
->truesize
) <= (sk
->rcvbuf
<<1)) { 1309 __skb_queue_tail(&tp
->ucopy
.prequeue
, skb
); 1310 if(skb_queue_len(&tp
->ucopy
.prequeue
) ==1) { 1311 wake_up_interruptible(sk
->sleep
); 1312 if(!tcp_ack_scheduled(tp
)) 1313 tcp_reset_xmit_timer(sk
, TCP_TIME_DACK
, (3*TCP_RTO_MIN
)/4); 1316 NET_INC_STATS_BH(TCPPrequeueDropped
); 1317 tp
->ucopy
.memory
-= skb
->truesize
; 1329 static char*statename
[]={ 1330 "Unused","Established","Syn Sent","Syn Recv", 1331 "Fin Wait 1","Fin Wait 2","Time Wait","Close", 1332 "Close Wait","Last ACK","Listen","Closing" 1336 static __inline__
voidtcp_set_state(struct sock
*sk
,int state
) 1338 int oldstate
= sk
->state
; 1341 case TCP_ESTABLISHED
: 1342 if(oldstate
!= TCP_ESTABLISHED
) 1343 TCP_INC_STATS(TcpCurrEstab
); 1347 sk
->prot
->unhash(sk
); 1348 if(sk
->prev
&& !(sk
->userlocks
&SOCK_BINDPORT_LOCK
)) 1352 if(oldstate
==TCP_ESTABLISHED
) 1353 tcp_statistics
[smp_processor_id()*2+!in_softirq()].TcpCurrEstab
--; 1356 /* Change state AFTER socket is unhashed to avoid closed 1357 * socket sitting in hash tables. 1362 SOCK_DEBUG(sk
,"TCP sk=%p, State %s -> %s\n",sk
, statename
[oldstate
],statename
[state
]); 1366 static __inline__
voidtcp_done(struct sock
*sk
) 1368 tcp_set_state(sk
, TCP_CLOSE
); 1369 tcp_clear_xmit_timers(sk
); 1371 sk
->shutdown
= SHUTDOWN_MASK
; 1374 sk
->state_change(sk
); 1376 tcp_destroy_sock(sk
); 1379 static __inline__
voidtcp_sack_reset(struct tcp_opt
*tp
) 1386 static __inline__
voidtcp_build_and_update_options(__u32
*ptr
,struct tcp_opt
*tp
, __u32 tstamp
) 1389 *ptr
++ =__constant_htonl((TCPOPT_NOP
<<24) | 1391 (TCPOPT_TIMESTAMP
<<8) | 1393 *ptr
++ =htonl(tstamp
); 1394 *ptr
++ =htonl(tp
->ts_recent
); 1397 struct tcp_sack_block
*sp
= tp
->dsack
? tp
->duplicate_sack
: tp
->selective_acks
; 1400 *ptr
++ =__constant_htonl((TCPOPT_NOP
<<24) | 1403 (TCPOLEN_SACK_BASE
+ 1404 (tp
->eff_sacks
* TCPOLEN_SACK_PERBLOCK
))); 1405 for(this_sack
=0; this_sack
< tp
->eff_sacks
; this_sack
++) { 1406 *ptr
++ =htonl(sp
[this_sack
].start_seq
); 1407 *ptr
++ =htonl(sp
[this_sack
].end_seq
); 1416 /* Construct a tcp options header for a SYN or SYN_ACK packet. 1417 * If this is every changed make sure to change the definition of 1418 * MAX_SYN_SIZE to match the new maximum number of options that you 1421 staticinlinevoidtcp_syn_build_options(__u32
*ptr
,int mss
,int ts
,int sack
, 1422 int offer_wscale
,int wscale
, __u32 tstamp
, __u32 ts_recent
) 1424 /* We always get an MSS option. 1425 * The option bytes which will be seen in normal data 1426 * packets should timestamps be used, must be in the MSS 1427 * advertised. But we subtract them from tp->mss_cache so 1428 * that calculations in tcp_sendmsg are simpler etc. 1429 * So account for this fact here if necessary. If we 1430 * don't do this correctly, as a receiver we won't 1431 * recognize data packets as being full sized when we 1432 * should, and thus we won't abide by the delayed ACK 1434 * SACKs don't matter, we never delay an ACK when we 1435 * have any of those going out. 1437 *ptr
++ =htonl((TCPOPT_MSS
<<24) | (TCPOLEN_MSS
<<16) | mss
); 1440 *ptr
++ =__constant_htonl((TCPOPT_SACK_PERM
<<24) | (TCPOLEN_SACK_PERM
<<16) | 1441 (TCPOPT_TIMESTAMP
<<8) | TCPOLEN_TIMESTAMP
); 1443 *ptr
++ =__constant_htonl((TCPOPT_NOP
<<24) | (TCPOPT_NOP
<<16) | 1444 (TCPOPT_TIMESTAMP
<<8) | TCPOLEN_TIMESTAMP
); 1445 *ptr
++ =htonl(tstamp
);/* TSVAL */ 1446 *ptr
++ =htonl(ts_recent
);/* TSECR */ 1448 *ptr
++ =__constant_htonl((TCPOPT_NOP
<<24) | (TCPOPT_NOP
<<16) | 1449 (TCPOPT_SACK_PERM
<<8) | TCPOLEN_SACK_PERM
); 1451 *ptr
++ =htonl((TCPOPT_NOP
<<24) | (TCPOPT_WINDOW
<<16) | (TCPOLEN_WINDOW
<<8) | (wscale
)); 1454 /* Determine a window scaling and initial window to offer. 1455 * Based on the assumption that the given amount of space 1456 * will be offered. Store the results in the tp structure. 1457 * NOTE: for smooth operation initial space offering should 1458 * be a multiple of mss if possible. We assume here that mss >= 1. 1459 * This MUST be enforced by all callers. 1461 staticinlinevoidtcp_select_initial_window(int space
, __u32 mss
, 1463 __u32
*window_clamp
, 1467 /* If no clamp set the clamp to the max possible scaled window */ 1468 if(*window_clamp
==0) 1469 (*window_clamp
) = (65535<<14); 1470 space
=min(*window_clamp
,space
); 1472 /* Quantize space offering to a multiple of mss if possible. */ 1474 space
= (space
/mss
)*mss
; 1476 /* NOTE: offering an initial window larger than 32767 1477 * will break some buggy TCP stacks. We try to be nice. 1478 * If we are not window scaling, then this truncates 1479 * our initial window offering to 32k. There should also 1480 * be a sysctl option to stop being nice. 1482 (*rcv_wnd
) =min(space
, MAX_TCP_WINDOW
); 1485 /* See RFC1323 for an explanation of the limit to 14 */ 1486 while(space
>65535&& (*rcv_wscale
) <14) { 1490 if(*rcv_wscale
&& sysctl_tcp_app_win
&& space
>=mss
&& 1491 space
-max((space
>>sysctl_tcp_app_win
), mss
>>*rcv_wscale
) <65536/2) 1495 /* Set initial window to value enough for senders, 1496 * following RFC1414. Senders, not following this RFC, 1497 * will be satisfied with 2. 1499 if(mss
> (1<<*rcv_wscale
)) { 1505 if(*rcv_wnd
> init_cwnd
*mss
) 1506 *rcv_wnd
= init_cwnd
*mss
; 1508 /* Set the clamp no higher than max representable value */ 1509 (*window_clamp
) =min(65535<<(*rcv_wscale
),*window_clamp
); 1512 staticinlineinttcp_win_from_space(int space
) 1514 return sysctl_tcp_adv_win_scale
<=0? 1515 (space
>>(-sysctl_tcp_adv_win_scale
)) : 1516 space
- (space
>>sysctl_tcp_adv_win_scale
); 1519 /* Note: caller must be prepared to deal with negative returns */ 1520 staticinlineinttcp_space(struct sock
*sk
) 1522 returntcp_win_from_space(sk
->rcvbuf
-atomic_read(&sk
->rmem_alloc
)); 1525 staticinlineinttcp_full_space(struct sock
*sk
) 1527 returntcp_win_from_space(sk
->rcvbuf
); 1530 staticinlinevoidtcp_acceptq_removed(struct sock
*sk
) 1535 staticinlinevoidtcp_acceptq_added(struct sock
*sk
) 1540 staticinlineinttcp_acceptq_is_full(struct sock
*sk
) 1542 return sk
->ack_backlog
> sk
->max_ack_backlog
; 1545 staticinlinevoidtcp_acceptq_queue(struct sock
*sk
,struct open_request
*req
, 1548 struct tcp_opt
*tp
= &sk
->tp_pinfo
.af_tcp
; 1551 tcp_acceptq_added(sk
); 1553 if(!tp
->accept_queue_tail
) { 1554 tp
->accept_queue
= req
; 1556 tp
->accept_queue_tail
->dl_next
= req
; 1558 tp
->accept_queue_tail
= req
; 1559 req
->dl_next
= NULL
; 1562 struct tcp_listen_opt
1564 u8 max_qlen_log
;/* log_2 of maximal queued SYNs */ 1568 struct open_request
*syn_table
[TCP_SYNQ_HSIZE
]; 1572 tcp_synq_removed(struct sock
*sk
,struct open_request
*req
) 1574 struct tcp_listen_opt
*lopt
= sk
->tp_pinfo
.af_tcp
.listen_opt
; 1576 if(--lopt
->qlen
==0) 1577 tcp_delete_keepalive_timer(sk
); 1578 if(req
->retrans
==0) 1582 staticinlinevoidtcp_synq_added(struct sock
*sk
) 1584 struct tcp_listen_opt
*lopt
= sk
->tp_pinfo
.af_tcp
.listen_opt
; 1586 if(lopt
->qlen
++ ==0) 1587 tcp_reset_keepalive_timer(sk
, TCP_TIMEOUT_INIT
); 1591 staticinlineinttcp_synq_len(struct sock
*sk
) 1593 return sk
->tp_pinfo
.af_tcp
.listen_opt
->qlen
; 1596 staticinlineinttcp_synq_young(struct sock
*sk
) 1598 return sk
->tp_pinfo
.af_tcp
.listen_opt
->qlen_young
; 1601 staticinlineinttcp_synq_is_full(struct sock
*sk
) 1603 returntcp_synq_len(sk
)>>sk
->tp_pinfo
.af_tcp
.listen_opt
->max_qlen_log
; 1606 staticinlinevoidtcp_synq_unlink(struct tcp_opt
*tp
,struct open_request
*req
, 1607 struct open_request
**prev
) 1609 write_lock(&tp
->syn_wait_lock
); 1610 *prev
= req
->dl_next
; 1611 write_unlock(&tp
->syn_wait_lock
); 1614 staticinlinevoidtcp_synq_drop(struct sock
*sk
,struct open_request
*req
, 1615 struct open_request
**prev
) 1617 tcp_synq_unlink(&sk
->tp_pinfo
.af_tcp
, req
, prev
); 1618 tcp_synq_removed(sk
, req
); 1619 tcp_openreq_free(req
); 1622 static __inline__
voidtcp_openreq_init(struct open_request
*req
, 1624 struct sk_buff
*skb
) 1626 req
->rcv_wnd
=0;/* So that tcp_send_synack() knows! */ 1627 req
->rcv_isn
=TCP_SKB_CB(skb
)->seq
; 1628 req
->mss
= tp
->mss_clamp
; 1629 req
->ts_recent
= tp
->saw_tstamp
? tp
->rcv_tsval
:0; 1630 req
->tstamp_ok
= tp
->tstamp_ok
; 1631 req
->sack_ok
= tp
->sack_ok
; 1632 req
->snd_wscale
= tp
->snd_wscale
; 1633 req
->wscale_ok
= tp
->wscale_ok
; 1636 req
->rmt_port
= skb
->h
.th
->source
; 1639 #define TCP_MEM_QUANTUM ((int)PAGE_SIZE) 1641 staticinlinevoidtcp_free_skb(struct sock
*sk
,struct sk_buff
*skb
) 1643 sk
->tp_pinfo
.af_tcp
.queue_shrunk
=1; 1644 sk
->wmem_queued
-= skb
->truesize
; 1645 sk
->forward_alloc
+= skb
->truesize
; 1649 staticinlinevoidtcp_charge_skb(struct sock
*sk
,struct sk_buff
*skb
) 1651 sk
->wmem_queued
+= skb
->truesize
; 1652 sk
->forward_alloc
-= skb
->truesize
; 1655 externvoid__tcp_mem_reclaim(struct sock
*sk
); 1656 externinttcp_mem_schedule(struct sock
*sk
,int size
,int kind
); 1658 staticinlinevoidtcp_mem_reclaim(struct sock
*sk
) 1660 if(sk
->forward_alloc
>= TCP_MEM_QUANTUM
) 1661 __tcp_mem_reclaim(sk
); 1664 staticinlinevoidtcp_enter_memory_pressure(void) 1666 if(!tcp_memory_pressure
) { 1667 NET_INC_STATS(TCPMemoryPressures
); 1668 tcp_memory_pressure
=1; 1672 staticinlinevoidtcp_moderate_sndbuf(struct sock
*sk
) 1674 if(!(sk
->userlocks
&SOCK_SNDBUF_LOCK
)) { 1675 sk
->sndbuf
=min(sk
->sndbuf
, sk
->wmem_queued
/2); 1676 sk
->sndbuf
=max(sk
->sndbuf
, SOCK_MIN_SNDBUF
); 1680 staticinlinestruct sk_buff
*tcp_alloc_skb(struct sock
*sk
,int size
,int gfp
) 1682 struct sk_buff
*skb
=alloc_skb(size
, gfp
); 1685 if(sk
->forward_alloc
>= (int)skb
->truesize
|| 1686 tcp_mem_schedule(sk
, skb
->truesize
,0)) 1690 tcp_enter_memory_pressure(); 1691 tcp_moderate_sndbuf(sk
); 1696 staticinlinevoidtcp_writequeue_purge(struct sock
*sk
) 1698 struct sk_buff
*skb
; 1700 while((skb
=__skb_dequeue(&sk
->write_queue
)) != NULL
) 1701 tcp_free_skb(sk
, skb
); 1702 tcp_mem_reclaim(sk
); 1705 externvoidtcp_rfree(struct sk_buff
*skb
); 1707 staticinlinevoidtcp_set_owner_r(struct sk_buff
*skb
,struct sock
*sk
) 1710 skb
->destructor
= tcp_rfree
; 1711 atomic_add(skb
->truesize
, &sk
->rmem_alloc
); 1712 sk
->forward_alloc
-= skb
->truesize
; 1715 externvoidtcp_listen_wlock(void); 1717 /* - We may sleep inside this lock. 1718 * - If sleeping is not required (or called from BH), 1719 * use plain read_(un)lock(&tcp_lhash_lock). 1722 staticinlinevoidtcp_listen_lock(void) 1724 /* read_lock synchronizes to candidates to writers */ 1725 read_lock(&tcp_lhash_lock
); 1726 atomic_inc(&tcp_lhash_users
); 1727 read_unlock(&tcp_lhash_lock
); 1730 staticinlinevoidtcp_listen_unlock(void) 1732 if(atomic_dec_and_test(&tcp_lhash_users
)) 1733 wake_up(&tcp_lhash_wait
); 1736 staticinlineintkeepalive_intvl_when(struct tcp_opt
*tp
) 1738 return tp
->keepalive_intvl
? : sysctl_tcp_keepalive_intvl
; 1741 staticinlineintkeepalive_time_when(struct tcp_opt
*tp
) 1743 return tp
->keepalive_time
? : sysctl_tcp_keepalive_time
; 1746 staticinlineinttcp_fin_time(struct tcp_opt
*tp
) 1748 int fin_timeout
= tp
->linger2
? : sysctl_tcp_fin_timeout
; 1750 if(fin_timeout
< (tp
->rto
<<2) - (tp
->rto
>>1)) 1751 fin_timeout
= (tp
->rto
<<2) - (tp
->rto
>>1); 1756 staticinlineinttcp_paws_check(struct tcp_opt
*tp
,int rst
) 1758 if((s32
)(tp
->rcv_tsval
- tp
->ts_recent
) >=0) 1760 if(xtime
.tv_sec
>= tp
->ts_recent_stamp
+ TCP_PAWS_24DAYS
) 1763 /* RST segments are not recommended to carry timestamp, 1764 and, if they do, it is recommended to ignore PAWS because 1765 "their cleanup function should take precedence over timestamps." 1766 Certainly, it is mistake. It is necessary to understand the reasons 1767 of this constraint to relax it: if peer reboots, clock may go 1768 out-of-sync and half-open connections will not be reset. 1769 Actually, the problem would be not existing if all 1770 the implementations followed draft about maintaining clock 1771 via reboots. Linux-2.2 DOES NOT! 1773 However, we can relax time bounds for RST segments to MSL. 1775 if(rst
&& xtime
.tv_sec
>= tp
->ts_recent_stamp
+ TCP_PAWS_MSL
) 1780 #define TCP_CHECK_TIMER(sk) do { } while (0);