patch-2.3.41 linux/include/net/tcp.h
Next file: linux/ipc/shm.c
Previous file: linux/include/net/sock.h
Back to the patch index
Back to the overall index
- Lines: 1088
- Date:
Fri Jan 28 08:02:41 2000
- Orig file:
v2.3.40/linux/include/net/tcp.h
- Orig date:
Tue Jan 11 22:31:45 2000
diff -u --recursive --new-file v2.3.40/linux/include/net/tcp.h linux/include/net/tcp.h
@@ -19,6 +19,7 @@
#define _TCP_H
#define TCP_DEBUG 1
+#undef TCP_FORMAL_WINDOW
#include <linux/config.h>
#include <linux/tcp.h>
@@ -130,27 +131,27 @@
struct sock *bind_next;
struct sock **bind_pprev;
unsigned char state,
- zapped;
+ substate; /* "zapped" is replaced with "substate" */
__u16 sport;
unsigned short family;
unsigned char reuse,
- nonagle;
+ rcv_wscale; /* It is also TW bucket specific */
atomic_t refcnt;
/* And these are ours. */
int hashent;
+ int timeout;
__u32 rcv_nxt;
__u32 snd_nxt;
+ __u32 rcv_wnd;
+ __u32 syn_seq;
__u32 ts_recent;
long ts_recent_stamp;
+ unsigned long ttd;
struct tcp_bind_bucket *tb;
struct tcp_tw_bucket *next_death;
struct tcp_tw_bucket **pprev_death;
- int death_slot;
-#ifdef CONFIG_TCP_TW_RECYCLE
- unsigned long ttd;
- int rto;
-#endif
+
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
struct in6_addr v6_daddr;
struct in6_addr v6_rcv_saddr;
@@ -169,10 +170,11 @@
}
}
-extern int tcp_tw_death_row_slot;
+extern atomic_t tcp_orphan_count;
+extern int tcp_tw_count;
+extern void tcp_time_wait(struct sock *sk, int state, int timeo);
extern void tcp_timewait_kill(struct tcp_tw_bucket *tw);
-extern void tcp_tw_schedule(struct tcp_tw_bucket *tw);
-extern void tcp_tw_reschedule(struct tcp_tw_bucket *tw);
+extern void tcp_tw_schedule(struct tcp_tw_bucket *tw, int timeo);
extern void tcp_tw_deschedule(struct tcp_tw_bucket *tw);
@@ -224,67 +226,81 @@
return tcp_lhashfn(sk->num);
}
-/* Note, that it is > than ipv6 header */
-#define NETHDR_SIZE (sizeof(struct iphdr) + 40)
-
-/*
- * 40 is maximal IP options size
- * 20 is the maximum TCP options size we can currently construct on a SYN.
- * 40 is the maximum possible TCP options size.
- */
-
-#define MAX_SYN_SIZE (NETHDR_SIZE + sizeof(struct tcphdr) + 20 + MAX_HEADER + 15)
-#define MAX_FIN_SIZE (NETHDR_SIZE + sizeof(struct tcphdr) + MAX_HEADER + 15)
-#define BASE_ACK_SIZE (NETHDR_SIZE + MAX_HEADER + 15)
-#define MAX_ACK_SIZE (NETHDR_SIZE + sizeof(struct tcphdr) + MAX_HEADER + 15)
-#define MAX_RESET_SIZE (NETHDR_SIZE + sizeof(struct tcphdr) + MAX_HEADER + 15)
-#define MAX_TCPHEADER_SIZE (NETHDR_SIZE + sizeof(struct tcphdr) + 20 + MAX_HEADER + 15)
+#define MAX_TCP_HEADER (128 + MAX_HEADER)
/*
* Never offer a window over 32767 without using window scaling. Some
* poor stacks do signed 16bit maths!
*/
-#define MAX_WINDOW 32767
-#define MAX_DELAY_ACK 2
+#define MAX_TCP_WINDOW 32767
+
+/* Minimal accepted MSS. It is (60+60+8) - (20+20). */
+#define TCP_MIN_MSS 88
+
+/* Minimal RCV_MSS. */
+#define TCP_MIN_RCVMSS 536
/*
* How much of the receive buffer do we advertize
* (the rest is reserved for headers and driver packet overhead)
* Use a power of 2.
*/
-#define WINDOW_ADVERTISE_DIVISOR 2
+#define TCP_WINDOW_ADVERTISE_DIVISOR 2
/* urg_data states */
-#define URG_VALID 0x0100
-#define URG_NOTYET 0x0200
-#define URG_READ 0x0400
+#define TCP_URG_VALID 0x0100
+#define TCP_URG_NOTYET 0x0200
+#define TCP_URG_READ 0x0400
-#define TCP_RETR1 7 /*
+#define TCP_RETR1 3 /*
* This is how many retries it does before it
* tries to figure out if the gateway is
- * down.
+ * down. Minimal RFC value is 3; it corresponds
+ * to ~3sec-8min depending on RTO.
*/
#define TCP_RETR2 15 /*
* This should take at least
* 90 minutes to time out.
+ * RFC1122 says that the limit is 100 sec.
+ * 15 is ~13-30min depending on RTO.
*/
-#define TCP_TIMEOUT_LEN (15*60*HZ) /* should be about 15 mins */
-#define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to successfully
- * close the socket, about 60 seconds */
-#define TCP_FIN_TIMEOUT (3*60*HZ) /* BSD style FIN_WAIT2 deadlock breaker */
-
-#define TCP_ACK_TIME (3*HZ) /* time to delay before sending an ACK */
-#define TCP_WRITE_TIME (30*HZ) /* initial time to wait for an ACK,
- * after last transmit */
-#define TCP_TIMEOUT_INIT (3*HZ) /* RFC 1122 initial timeout value */
-#define TCP_SYN_RETRIES 10 /* number of times to retry opening a
- * connection (TCP_RETR2-....) */
-#define TCP_PROBEWAIT_LEN (1*HZ)/* time to wait between probes when
- * I've got something to write and
- * there is no window */
-#define TCP_KEEPALIVE_TIME (120*60*HZ) /* two hours */
+#define TCP_SYN_RETRIES 5 /* number of times to retry active opening a
+ * connection: ~180sec is RFC minumum */
+
+#define TCP_SYNACK_RETRIES 5 /* number of times to retry passive opening a
+ * connection: ~180sec is RFC minumum */
+
+
+#define TCP_ORPHAN_RETRIES 7 /* number of times to retry on an orphaned
+ * socket. 7 is ~50sec-16min.
+ */
+
+
+#define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
+ * state, about 60 seconds */
+#define TCP_FIN_TIMEOUT TCP_TIMEWAIT_LEN
+ /* BSD style FIN_WAIT2 deadlock breaker.
+ * It used to be 3min, new value is 60sec,
+ * to combine FIN-WAIT-2 timeout with
+ * TIME-WAIT timer.
+ */
+
+#define TCP_DELACK_MAX (HZ/2) /* maximal time to delay before sending an ACK */
+#define TCP_DELACK_MIN (2) /* minimal time to delay before sending an ACK,
+ * 2 scheduler ticks, not depending on HZ */
+#define TCP_ATO_MAX ((TCP_DELACK_MAX*4)/5) /* ATO producing TCP_DELACK_MAX */
+#define TCP_ATO_MIN 2
+#define TCP_RTO_MAX (120*HZ)
+#define TCP_RTO_MIN (HZ/5)
+#define TCP_TIMEOUT_INIT (3*HZ) /* RFC 1122 initial RTO value */
+
+#define TCP_RESOURCE_PROBE_INTERVAL (HZ/2) /* Maximal interval between probes
+ * for local resources.
+ */
+
+#define TCP_KEEPALIVE_TIME (120*60*HZ) /* two hours */
#define TCP_KEEPALIVE_PROBES 9 /* Max of 9 keepalive probes */
#define TCP_KEEPALIVE_INTVL (75*HZ)
@@ -293,14 +309,39 @@
#define MAX_TCP_KEEPCNT 127
#define MAX_TCP_SYNCNT 127
-#define TCP_SYNACK_PERIOD (HZ/2) /* How often to run the synack slow timer */
-#define TCP_QUICK_TRIES 8 /* How often we try to retransmit, until
- * we tell the link layer that it is something
- * wrong (e.g. that it can expire redirects) */
-
/* TIME_WAIT reaping mechanism. */
#define TCP_TWKILL_SLOTS 8 /* Please keep this a power of 2. */
-#define TCP_TWKILL_PERIOD ((HZ*60)/TCP_TWKILL_SLOTS)
+#define TCP_TWKILL_PERIOD (TCP_TIMEWAIT_LEN/TCP_TWKILL_SLOTS)
+
+#define TCP_SYNQ_INTERVAL (HZ/5) /* Period of SYNACK timer */
+#define TCP_SYNQ_HSIZE 64 /* Size of SYNACK hash table */
+
+#define TCP_PAWS_24DAYS (60 * 60 * 24 * 24)
+#define TCP_PAWS_MSL 60 /* Per-host timestamps are invalidated
+ * after this time. It should be equal
+ * (or greater than) TCP_TIMEWAIT_LEN
+ * to provide reliability equal to one
+ * provided by timewait state.
+ */
+#define TCP_PAWS_WINDOW 1 /* Replay window for per-host
+ * timestamps. It must be less than
+ * minimal timewait lifetime.
+ */
+
+#define TCP_TW_RECYCLE_SLOTS_LOG 5
+#define TCP_TW_RECYCLE_SLOTS (1<<TCP_TW_RECYCLE_SLOTS_LOG)
+
+/* If time > 4sec, it is "slow" path, no recycling is required,
+ so that we select tick to get range about 4 seconds.
+ */
+
+#if HZ == 100
+#define TCP_TW_RECYCLE_TICK (7+2-TCP_TW_RECYCLE_SLOTS_LOG)
+#elif HZ == 1024
+#define TCP_TW_RECYCLE_TICK (10+2-TCP_TW_RECYCLE_SLOTS_LOG)
+#else
+#error HZ != 100 && HZ != 1024.
+#endif
/*
* TCP option
@@ -331,23 +372,40 @@
#define TCPOLEN_SACK_BASE_ALIGNED 4
#define TCPOLEN_SACK_PERBLOCK 8
-#define TIME_WRITE 1 /* Not yet used */
-#define TIME_RETRANS 2 /* Retransmit timer */
-#define TIME_DACK 3 /* Delayed ack timer */
-#define TIME_PROBE0 4
-#define TIME_KEEPOPEN 5
+#define TCP_TIME_RETRANS 1 /* Retransmit timer */
+#define TCP_TIME_DACK 2 /* Delayed ack timer */
+#define TCP_TIME_PROBE0 3 /* Zero window probe timer */
+#define TCP_TIME_KEEPOPEN 4 /* Keepalive timer */
/* sysctl variables for tcp */
+extern int sysctl_max_syn_backlog;
+extern int sysctl_tcp_timestamps;
+extern int sysctl_tcp_window_scaling;
+extern int sysctl_tcp_sack;
+extern int sysctl_tcp_fin_timeout;
+extern int sysctl_tcp_tw_recycle;
extern int sysctl_tcp_keepalive_time;
extern int sysctl_tcp_keepalive_probes;
extern int sysctl_tcp_keepalive_intvl;
extern int sysctl_tcp_syn_retries;
+extern int sysctl_tcp_synack_retries;
+extern int sysctl_tcp_retries1;
+extern int sysctl_tcp_retries2;
+extern int sysctl_tcp_orphan_retries;
+extern int sysctl_tcp_syncookies;
+extern int sysctl_tcp_retrans_collapse;
+extern int sysctl_tcp_stdurg;
+extern int sysctl_tcp_rfc1337;
+extern int sysctl_tcp_tw_recycle;
+extern int sysctl_tcp_abort_on_overflow;
+extern int sysctl_tcp_max_orphans;
+extern int sysctl_tcp_max_tw_buckets;
struct open_request;
struct or_calltable {
int family;
- void (*rtx_syn_ack) (struct sock *sk, struct open_request *req);
+ int (*rtx_syn_ack) (struct sock *sk, struct open_request *req, struct dst_entry*);
void (*send_ack) (struct sk_buff *skb, struct open_request *req);
void (*destructor) (struct open_request *req);
void (*send_reset) (struct sk_buff *skb);
@@ -376,12 +434,14 @@
__u16 rmt_port;
__u16 mss;
__u8 retrans;
- __u8 __pad;
- unsigned snd_wscale : 4,
+ __u8 index;
+ __u16 snd_wscale : 4,
rcv_wscale : 4,
tstamp_ok : 1,
sack_ok : 1,
- wscale_ok : 1;
+ wscale_ok : 1,
+ ecn_ok : 1,
+ acked : 1;
/* The following two fields can be easily recomputed I think -AK */
__u32 window_clamp; /* window clamp at creation time */
__u32 rcv_wnd; /* rcv_wnd offered first time */
@@ -400,8 +460,14 @@
/* SLAB cache for open requests. */
extern kmem_cache_t *tcp_openreq_cachep;
-#define tcp_openreq_alloc() kmem_cache_alloc(tcp_openreq_cachep, SLAB_ATOMIC)
-#define tcp_openreq_free(req) kmem_cache_free(tcp_openreq_cachep, req)
+#define tcp_openreq_alloc() kmem_cache_alloc(tcp_openreq_cachep, SLAB_ATOMIC)
+#define tcp_openreq_fastfree(req) kmem_cache_free(tcp_openreq_cachep, req)
+
+extern __inline__ void tcp_openreq_free(struct open_request *req)
+{
+ req->class->destructor(req);
+ tcp_openreq_fastfree(req);
+}
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
#define TCP_INET_FAMILY(fam) ((fam) == AF_INET)
@@ -441,9 +507,9 @@
int (*hash_connecting) (struct sock *sk);
- __u16 net_header_len;
-
+ int (*remember_stamp) (struct sock *sk);
+ __u16 net_header_len;
int (*setsockopt) (struct sock *sk,
int level,
@@ -506,7 +572,11 @@
extern int tcp_v4_rcv(struct sk_buff *skb,
unsigned short len);
-extern int tcp_do_sendmsg(struct sock *sk, struct msghdr *msg);
+extern int tcp_v4_remember_stamp(struct sock *sk);
+
+extern int tcp_v4_tw_remember_stamp(struct tcp_tw_bucket *tw);
+
+extern int tcp_sendmsg(struct sock *sk, struct msghdr *msg, int size);
extern int tcp_ioctl(struct sock *sk,
int cmd,
@@ -522,6 +592,23 @@
struct tcphdr *th,
unsigned len);
+static __inline__ void tcp_dec_quickack_mode(struct tcp_opt *tp)
+{
+ if (tp->ack.quick && --tp->ack.quick == 0 && !tp->ack.pingpong) {
+ /* Leaving quickack mode we deflate ATO to give peer
+ * a time to adapt to new worse(!) RTO. It is not required
+ * in pingpong mode, when ACKs were delayed in any case.
+ */
+ tp->ack.ato = TCP_ATO_MIN;
+ }
+}
+
+static __inline__ void tcp_delack_init(struct tcp_opt *tp)
+{
+ memset(&tp->ack, 0, sizeof(tp->ack));
+}
+
+
enum tcp_tw_status
{
TCP_TW_SUCCESS = 0,
@@ -530,6 +617,7 @@
TCP_TW_SYN = 3
};
+
extern enum tcp_tw_status tcp_timewait_state_process(struct tcp_tw_bucket *tw,
struct sk_buff *skb,
struct tcphdr *th,
@@ -537,7 +625,10 @@
extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb,
struct open_request *req,
- struct open_request *prev);
+ struct open_request **prev);
+extern int tcp_child_process(struct sock *parent,
+ struct sock *child,
+ struct sk_buff *skb);
extern void tcp_close(struct sock *sk,
long timeout);
@@ -557,6 +648,8 @@
int len, int nonblock,
int flags, int *addr_len);
+extern int tcp_listen_start(struct sock *sk);
+
extern void tcp_parse_options(struct sock *sk, struct tcphdr *th,
struct tcp_opt *tp, int no_fancy);
@@ -614,9 +707,7 @@
/* tcp_output.c */
-extern void tcp_read_wakeup(struct sock *);
-extern void tcp_write_xmit(struct sock *);
-extern void tcp_time_wait(struct sock *);
+extern int tcp_write_xmit(struct sock *);
extern int tcp_retransmit_skb(struct sock *, struct sk_buff *);
extern void tcp_fack_retransmit(struct sock *);
extern void tcp_xmit_retransmit_queue(struct sock *);
@@ -624,46 +715,22 @@
extern void tcp_send_probe0(struct sock *);
extern void tcp_send_partial(struct sock *);
-extern void tcp_write_wakeup(struct sock *);
+extern int tcp_write_wakeup(struct sock *);
extern void tcp_send_fin(struct sock *sk);
extern void tcp_send_active_reset(struct sock *sk, int priority);
extern int tcp_send_synack(struct sock *);
-extern void tcp_transmit_skb(struct sock *, struct sk_buff *);
-extern void tcp_send_skb(struct sock *, struct sk_buff *, int force_queue);
+extern int tcp_transmit_skb(struct sock *, struct sk_buff *);
+extern void tcp_send_skb(struct sock *, struct sk_buff *, int force_queue, unsigned mss_now);
extern void tcp_send_ack(struct sock *sk);
-extern void tcp_send_delayed_ack(struct sock *sk, int max_timeout);
+extern void tcp_send_delayed_ack(struct sock *sk);
/* tcp_timer.c */
extern void tcp_reset_xmit_timer(struct sock *, int, unsigned long);
extern void tcp_init_xmit_timers(struct sock *);
extern void tcp_clear_xmit_timers(struct sock *);
-extern void tcp_retransmit_timer(unsigned long);
-extern void tcp_delack_timer(unsigned long);
-extern void tcp_probe_timer(unsigned long);
-
extern void tcp_delete_keepalive_timer (struct sock *);
extern void tcp_reset_keepalive_timer (struct sock *, unsigned long);
-extern void tcp_keepalive_timer (unsigned long);
-
-/*
- * TCP slow timer
- */
-extern struct timer_list tcp_slow_timer;
-
-struct tcp_sl_timer {
- atomic_t count;
- unsigned long period;
- unsigned long last;
- void (*handler) (unsigned long);
-};
-
-#define TCP_SLT_SYNACK 0
-#define TCP_SLT_TWKILL 1
-#define TCP_SLT_MAX 2
-
-extern struct tcp_sl_timer tcp_slt_array[TCP_SLT_MAX];
-
extern int tcp_sync_mss(struct sock *sk, u32 pmtu);
/* Compute the current effective MSS, taking SACKs and IP options,
@@ -673,7 +740,7 @@
static __inline__ unsigned int tcp_current_mss(struct sock *sk)
{
struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
- struct dst_entry *dst = sk->dst_cache;
+ struct dst_entry *dst = __sk_dst_get(sk);
int mss_now = tp->mss_cache;
if (dst && dst->pmtu != tp->pmtu_cookie)
@@ -682,7 +749,7 @@
if(tp->sack_ok && tp->num_sacks)
mss_now -= (TCPOLEN_SACK_BASE_ALIGNED +
(tp->num_sacks * TCPOLEN_SACK_PERBLOCK));
- return mss_now > 8 ? mss_now : 8;
+ return mss_now;
}
/* Initialize RCV_MSS value.
@@ -704,9 +771,24 @@
else
mss = tp->mss_cache;
- tp->rcv_mss = max(min(mss, 536), 8);
+ tp->ack.rcv_mss = max(min(mss, TCP_MIN_RCVMSS), TCP_MIN_MSS);
+}
+
+static __inline__ void __tcp_fast_path_on(struct tcp_opt *tp, u32 snd_wnd)
+{
+ tp->pred_flags = htonl((tp->tcp_header_len << 26) |
+ ntohl(TCP_FLAG_ACK) |
+ snd_wnd);
+}
+
+static __inline__ void tcp_fast_path_on(struct tcp_opt *tp)
+{
+ __tcp_fast_path_on(tp, tp->snd_wnd>>tp->snd_wscale);
}
+
+
+
/* Compute the actual receive window we are currently advertising.
* Rcv_nxt can be after the window if our peer push more data
* than the offered window.
@@ -751,23 +833,26 @@
}
/* RFC1323 scaling applied */
- return new_win >> tp->rcv_wscale;
-}
+ new_win >>= tp->rcv_wscale;
-/* See if we can advertise non-zero, and if so how much we
- * can increase our advertisement. If it becomes more than
- * twice what we are talking about right now, return true.
- */
-extern __inline__ int tcp_raise_window(struct sock *sk)
-{
- struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
- u32 cur_win = tcp_receive_window(tp);
- u32 new_win = __tcp_select_window(sk);
+#ifdef TCP_FORMAL_WINDOW
+ if (new_win == 0) {
+ /* If we advertise zero window, disable fast path. */
+ tp->pred_flags = 0;
+ } else if (cur_win == 0 && tp->pred_flags == 0 &&
+ skb_queue_len(&tp->out_of_order_queue) == 0 &&
+ !tp->urg_data) {
+ /* If we open zero window, enable fast path.
+ Without this it will be open by the first data packet,
+ it is too late to merge checksumming to copy.
+ */
+ tcp_fast_path_on(tp);
+ }
+#endif
- return (new_win && (new_win > (cur_win << 1)));
+ return new_win;
}
-
/* TCP timestamps are only 32-bits, this causes a slight
* complication on 64-bit systems since we store a snapshot
* of jiffies in the buffer control blocks below. We decidely
@@ -804,6 +889,8 @@
#define TCPCB_FLAG_PSH 0x08
#define TCPCB_FLAG_ACK 0x10
#define TCPCB_FLAG_URG 0x20
+#define TCPCB_FLAG_ECE 0x40
+#define TCPCB_FLAG_CWR 0x80
__u8 sacked; /* State flags for SACK/FACK. */
#define TCPCB_SACKED_ACKED 0x01 /* SKB ACK'd by a SACK block */
@@ -860,13 +947,91 @@
return max(min(FlightSize, tp->snd_cwnd) >> 1, 2);
}
+/* Set slow start threshould and cwnd not falling to slow start */
+extern __inline__ void __tcp_enter_cong_avoid(struct tcp_opt *tp)
+{
+ tp->snd_ssthresh = tcp_recalc_ssthresh(tp);
+ if (tp->snd_ssthresh > tp->snd_cwnd_clamp)
+ tp->snd_ssthresh = tp->snd_cwnd_clamp;
+ tp->snd_cwnd = tp->snd_ssthresh;
+ tp->snd_cwnd_cnt = 0;
+ tp->high_seq = tp->snd_nxt;
+}
+
+extern __inline__ void tcp_enter_cong_avoid(struct tcp_opt *tp)
+{
+ if (!tp->high_seq || after(tp->snd_nxt, tp->high_seq))
+ __tcp_enter_cong_avoid(tp);
+}
+
+
+/* Increase initial CWND conservatively, i.e. only if estimated
+ RTT is low enough. It is not quite correct, we should use
+ POWER i.e. RTT*BANDWIDTH, but we still cannot estimate this.
+
+ Numbers are taken from RFC1414.
+ */
+static __inline__ __u32 tcp_init_cwnd(struct tcp_opt *tp)
+{
+ __u32 cwnd;
+
+ if (!tp->srtt || tp->srtt > ((HZ/50)<<3) || tp->mss_cache > 1460)
+ cwnd = 2;
+ else if (tp->mss_cache > 1095)
+ cwnd = 3;
+ else
+ cwnd = 4;
+
+ return min(cwnd, tp->snd_cwnd_clamp);
+}
+
+
+static __inline__ int tcp_minshall_check(struct tcp_opt *tp)
+{
+ return after(tp->snd_sml,tp->snd_una) &&
+ !after(tp->snd_sml, tp->snd_nxt);
+}
+
+static __inline__ void tcp_minshall_update(struct tcp_opt *tp, int mss, int len)
+{
+ if (len < mss)
+ tp->snd_sml = tp->snd_nxt;
+}
+
+/* Return 0, if packet can be sent now without violation Nagle's rules:
+ 1. It is full sized.
+ 2. Or it contains FIN or URG.
+ 3. Or TCP_NODELAY was set.
+ 4. Or TCP_CORK is not set, and all sent packets are ACKed.
+ With Minshall's modification: all sent small packets are ACKed.
+ */
+
+static __inline__ int tcp_nagle_check(struct tcp_opt *tp, struct sk_buff *skb, unsigned mss_now)
+{
+ return (skb->len < mss_now &&
+ !(TCP_SKB_CB(skb)->flags & (TCPCB_FLAG_URG|TCPCB_FLAG_FIN)) &&
+ (tp->nonagle == 2 ||
+ (!tp->nonagle &&
+ tp->packets_out &&
+ tcp_minshall_check(tp))));
+}
+
/* This checks if the data bearing packet SKB (usually tp->send_head)
* should be put on the wire right now.
*/
-static __inline__ int tcp_snd_test(struct sock *sk, struct sk_buff *skb)
+static __inline__ int tcp_snd_test(struct tcp_opt *tp, struct sk_buff *skb,
+ unsigned cur_mss, int tail)
{
- struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
- int nagle_check = 1;
+ /*
+ * Reset CWND after idle period longer RTO to "restart window".
+ * It is "side" effect of the function, which is _not_ good
+ * from viewpoint of clarity. But we have to make it before
+ * checking congestion window below. Alternative is to prepend
+ * all the calls with this test.
+ */
+ if (tp->packets_out==0 &&
+ (s32)(tcp_time_stamp - tp->lsndtime) > tp->rto)
+ tp->snd_cwnd = min(tp->snd_cwnd, tcp_init_cwnd(tp));
/* RFC 1122 - section 4.2.3.4
*
@@ -876,97 +1041,126 @@
* b) There are packets in flight and we have a small segment
* [SWS avoidance and Nagle algorithm]
* (part of SWS is done on packetization)
+ * Minshall version sounds: there are no _small_
+ * segments in flight. (tcp_nagle_check)
* c) We are retransmiting [Nagle]
* d) We have too many packets 'in flight'
*
* Don't use the nagle rule for urgent data (or
* for the final FIN -DaveM).
+ *
+ * Also, Nagle rule does not apply to frames, which
+ * sit in the middle of queue (they have no chances
+ * to get new data) and if room at tail of skb is
+ * not enough to save something seriously (<32 for now).
*/
- if ((sk->nonagle == 2 && (skb->len < tp->mss_cache)) ||
- (!sk->nonagle &&
- skb->len < (tp->mss_cache >> 1) &&
- tp->packets_out &&
- !(TCP_SKB_CB(skb)->flags & (TCPCB_FLAG_URG|TCPCB_FLAG_FIN))))
- nagle_check = 0;
-
- /*
- * Reset CWND after idle period longer rto. Actually, it would
- * be better to save last send time, but VJ in SIGCOMM'88 proposes
- * to use keepalive timestamp. Well, it is not good, certainly,
- * because SMTP is still broken, but it is better than nothing yet.
- */
- if (tp->packets_out==0 && (s32)(tcp_time_stamp - tp->rcv_tstamp) > tp->rto)
- tp->snd_cwnd = min(tp->snd_cwnd, 2);
/* Don't be strict about the congestion window for the
* final FIN frame. -DaveM
*/
- return (nagle_check &&
+ return ((!tail || !tcp_nagle_check(tp, skb, cur_mss) ||
+ skb_tailroom(skb) < 32) &&
((tcp_packets_in_flight(tp) < tp->snd_cwnd) ||
(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)) &&
!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una + tp->snd_wnd) &&
tp->retransmits == 0);
}
+static __inline__ void tcp_check_probe_timer(struct sock *sk, struct tcp_opt *tp)
+{
+ if (!tp->packets_out && !tp->probe_timer.prev)
+ tcp_reset_xmit_timer(sk, TCP_TIME_PROBE0, tp->rto);
+}
+
+static __inline__ int tcp_skb_is_last(struct sock *sk, struct sk_buff *skb)
+{
+ return (skb->next == (struct sk_buff*)&sk->write_queue);
+}
+
/* Push out any pending frames which were held back due to
* TCP_CORK or attempt at coalescing tiny packets.
* The socket must be locked by the caller.
*/
-static __inline__ void tcp_push_pending_frames(struct sock *sk, struct tcp_opt *tp)
-{
- if(tp->send_head) {
- if(tcp_snd_test(sk, tp->send_head))
- tcp_write_xmit(sk);
- else if(tp->packets_out == 0 && !tp->pending) {
- /* We held off on this in tcp_send_skb() */
- tp->pending = TIME_PROBE0;
- tcp_reset_xmit_timer(sk, TIME_PROBE0, tp->rto);
- }
+static __inline__ void __tcp_push_pending_frames(struct sock *sk,
+ struct tcp_opt *tp,
+ unsigned cur_mss)
+{
+ struct sk_buff *skb = tp->send_head;
+
+ if (skb) {
+ if (!tcp_snd_test(tp, skb, cur_mss, tcp_skb_is_last(sk, skb)) ||
+ tcp_write_xmit(sk))
+ tcp_check_probe_timer(sk, tp);
}
}
-/* This tells the input processing path that an ACK should go out
- * right now.
- */
-#define tcp_enter_quickack_mode(__tp) ((__tp)->ato |= (1<<31))
-#define tcp_exit_quickack_mode(__tp) ((__tp)->ato &= ~(1<<31))
-#define tcp_in_quickack_mode(__tp) (((__tp)->ato & (1 << 31)) != 0)
+static __inline__ void tcp_push_pending_frames(struct sock *sk,
+ struct tcp_opt *tp)
+{
+ __tcp_push_pending_frames(sk, tp, tcp_current_mss(sk));
+}
+
+extern void tcp_destroy_sock(struct sock *sk);
+
/*
- * List all states of a TCP socket that can be viewed as a "connected"
- * state. This now includes TCP_SYN_RECV, although I am not yet fully
- * convinced that this is the solution for the 'getpeername(2)'
- * problem. Thanks to Stephen A. Wood <saw@cebaf.gov> -FvK
+ * Calculate(/check) TCP checksum
*/
+static __inline__ u16 tcp_v4_check(struct tcphdr *th, int len,
+ unsigned long saddr, unsigned long daddr,
+ unsigned long base)
+{
+ return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
+}
-extern __inline const int tcp_connected(const int state)
+static __inline__ int __tcp_checksum_complete(struct sk_buff *skb)
{
- return ((1 << state) &
- (TCPF_ESTABLISHED|TCPF_CLOSE_WAIT|TCPF_FIN_WAIT1|
- TCPF_FIN_WAIT2|TCPF_SYN_RECV));
+ return (unsigned short)csum_fold(csum_partial(skb->h.raw, skb->len, skb->csum));
}
-extern __inline const int tcp_established(const int state)
+static __inline__ int tcp_checksum_complete(struct sk_buff *skb)
{
- return ((1 << state) &
- (TCPF_ESTABLISHED|TCPF_CLOSE_WAIT|TCPF_FIN_WAIT1|
- TCPF_FIN_WAIT2));
+ return skb->ip_summed != CHECKSUM_UNNECESSARY &&
+ __tcp_checksum_complete(skb);
}
-extern void tcp_destroy_sock(struct sock *sk);
+/* Prequeue for VJ style copy to user, combined with checksumming. */
+static __inline__ void tcp_prequeue_init(struct tcp_opt *tp)
+{
+ tp->ucopy.task = NULL;
+ tp->ucopy.len = 0;
+ tp->ucopy.memory = 0;
+ skb_queue_head_init(&tp->ucopy.prequeue);
+}
-/*
- * Calculate(/check) TCP checksum
+/* Packet is added to VJ-style prequeue for processing in process
+ * context, if a reader task is waiting. Apparently, this exciting
+ * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
+ * failed somewhere. Latency? Burstiness? Well, at least now we will
+ * see, why it failed. 8)8) --ANK
*/
-static __inline__ u16 tcp_v4_check(struct tcphdr *th, int len,
- unsigned long saddr, unsigned long daddr,
- unsigned long base)
+static __inline__ int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
{
- return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
+ struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
+
+ if (tp->ucopy.task) {
+ if ((tp->ucopy.memory += skb->truesize) <= (sk->rcvbuf<<1)) {
+ __skb_queue_tail(&tp->ucopy.prequeue, skb);
+ if (skb_queue_len(&tp->ucopy.prequeue) == 1)
+ wake_up_interruptible(sk->sleep);
+ } else {
+ NET_INC_STATS_BH(TCPPrequeueDropped);
+ tp->ucopy.memory -= skb->truesize;
+ kfree_skb(skb);
+ }
+ return 1;
+ }
+ return 0;
}
+
#undef STATE_TRACE
#ifdef STATE_TRACE
@@ -1007,9 +1201,12 @@
static __inline__ void tcp_done(struct sock *sk)
{
+ tcp_set_state(sk, TCP_CLOSE);
+ tcp_clear_xmit_timers(sk);
+
sk->shutdown = SHUTDOWN_MASK;
- if (!sk->dead)
+ if (!sk->dead)
sk->state_change(sk);
else
tcp_destroy_sock(sk);
@@ -1106,7 +1303,7 @@
* our initial window offering to 32k. There should also
* be a sysctl option to stop being nice.
*/
- (*rcv_wnd) = min(space, MAX_WINDOW);
+ (*rcv_wnd) = min(space, MAX_TCP_WINDOW);
(*rcv_wscale) = 0;
if (wscale_ok) {
/* See RFC1323 for an explanation of the limit to 14 */
@@ -1123,52 +1320,127 @@
extern __inline__ int tcp_space(struct sock *sk)
{
return (sk->rcvbuf - atomic_read(&sk->rmem_alloc)) /
- WINDOW_ADVERTISE_DIVISOR;
+ TCP_WINDOW_ADVERTISE_DIVISOR;
}
extern __inline__ int tcp_full_space( struct sock *sk)
{
- return sk->rcvbuf / WINDOW_ADVERTISE_DIVISOR;
+ return sk->rcvbuf / TCP_WINDOW_ADVERTISE_DIVISOR;
+}
+
+extern __inline__ void tcp_init_buffer_space(struct sock *sk)
+{
+ struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
+ int rcvbuf = tp->advmss+MAX_TCP_HEADER+16+sizeof(struct sk_buff);
+ int sndbuf = tp->mss_clamp+MAX_TCP_HEADER+16+sizeof(struct sk_buff);
+
+ if (sk->rcvbuf < 3*rcvbuf)
+ sk->rcvbuf = min (3*rcvbuf, sysctl_rmem_max);
+ if (sk->sndbuf < 3*sndbuf)
+ sk->sndbuf = min (3*sndbuf, sysctl_wmem_max);
}
-extern __inline__ void tcp_synq_unlink(struct tcp_opt *tp, struct open_request *req, struct open_request *prev)
+extern __inline__ void tcp_acceptq_removed(struct sock *sk)
{
- if(!req->dl_next)
- tp->syn_wait_last = (struct open_request **)prev;
- prev->dl_next = req->dl_next;
+ sk->ack_backlog--;
}
-extern __inline__ void tcp_synq_queue(struct tcp_opt *tp, struct open_request *req)
-{
- req->dl_next = NULL;
- *tp->syn_wait_last = req;
- tp->syn_wait_last = &req->dl_next;
+extern __inline__ void tcp_acceptq_added(struct sock *sk)
+{
+ sk->ack_backlog++;
}
-extern __inline__ void tcp_synq_init(struct tcp_opt *tp)
+extern __inline__ int tcp_acceptq_is_full(struct sock *sk)
{
- tp->syn_wait_queue = NULL;
- tp->syn_wait_last = &tp->syn_wait_queue;
+ return sk->ack_backlog > sk->max_ack_backlog;
}
-extern void __tcp_inc_slow_timer(struct tcp_sl_timer *slt);
-extern __inline__ void tcp_inc_slow_timer(int timer)
+extern __inline__ void tcp_acceptq_queue(struct sock *sk, struct open_request *req,
+ struct sock *child)
{
- struct tcp_sl_timer *slt = &tcp_slt_array[timer];
-
- if (atomic_read(&slt->count) == 0)
- {
- __tcp_inc_slow_timer(slt);
- }
+ struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
- atomic_inc(&slt->count);
+ req->sk = child;
+ tcp_acceptq_added(sk);
+
+ req->dl_next = tp->accept_queue;
+ tp->accept_queue = req;
}
-extern __inline__ void tcp_dec_slow_timer(int timer)
+struct tcp_listen_opt
+{
+ u8 max_qlen_log; /* log_2 of maximal queued SYNs */
+ int qlen;
+ int qlen_young;
+ int clock_hand;
+ struct open_request *syn_table[TCP_SYNQ_HSIZE];
+};
+
+extern __inline__ void
+tcp_synq_removed(struct sock *sk, struct open_request *req)
{
- struct tcp_sl_timer *slt = &tcp_slt_array[timer];
+ struct tcp_listen_opt *lopt = sk->tp_pinfo.af_tcp.listen_opt;
+
+ if (--lopt->qlen == 0)
+ tcp_delete_keepalive_timer(sk);
+ if (req->retrans == 0)
+ lopt->qlen_young--;
+}
- atomic_dec(&slt->count);
+extern __inline__ void tcp_synq_added(struct sock *sk)
+{
+ struct tcp_listen_opt *lopt = sk->tp_pinfo.af_tcp.listen_opt;
+
+ if (lopt->qlen++ == 0)
+ tcp_reset_keepalive_timer(sk, TCP_TIMEOUT_INIT);
+ lopt->qlen_young++;
+}
+
+extern __inline__ int tcp_synq_len(struct sock *sk)
+{
+ return sk->tp_pinfo.af_tcp.listen_opt->qlen;
+}
+
+extern __inline__ int tcp_synq_young(struct sock *sk)
+{
+ return sk->tp_pinfo.af_tcp.listen_opt->qlen_young;
+}
+
+extern __inline__ int tcp_synq_is_full(struct sock *sk)
+{
+ return tcp_synq_len(sk)>>sk->tp_pinfo.af_tcp.listen_opt->max_qlen_log;
+}
+
+extern __inline__ void tcp_synq_unlink(struct tcp_opt *tp, struct open_request *req,
+ struct open_request **prev)
+{
+ write_lock(&tp->syn_wait_lock);
+ *prev = req->dl_next;
+ write_unlock(&tp->syn_wait_lock);
+}
+
+extern __inline__ void tcp_synq_drop(struct sock *sk, struct open_request *req,
+ struct open_request **prev)
+{
+ tcp_synq_unlink(&sk->tp_pinfo.af_tcp, req, prev);
+ tcp_synq_removed(sk, req);
+ tcp_openreq_free(req);
+}
+
+static __inline__ void tcp_openreq_init(struct open_request *req,
+ struct tcp_opt *tp,
+ struct sk_buff *skb)
+{
+ req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */
+ req->rcv_isn = TCP_SKB_CB(skb)->seq;
+ req->mss = tp->mss_clamp;
+ req->ts_recent = tp->saw_tstamp ? tp->rcv_tsval : 0;
+ req->tstamp_ok = tp->tstamp_ok;
+ req->sack_ok = tp->sack_ok;
+ req->snd_wscale = tp->snd_wscale;
+ req->wscale_ok = tp->wscale_ok;
+ req->acked = 0;
+ req->rmt_port = skb->h.th->source;
}
extern const char timer_bug_msg[];
@@ -1179,13 +1451,14 @@
struct timer_list *timer;
switch (what) {
- case TIME_RETRANS:
+ case TCP_TIME_RETRANS:
timer = &tp->retransmit_timer;
break;
- case TIME_DACK:
+ case TCP_TIME_DACK:
+ tp->ack.blocked = 0;
timer = &tp->delack_timer;
break;
- case TIME_PROBE0:
+ case TCP_TIME_PROBE0:
timer = &tp->probe_timer;
break;
default:
@@ -1199,7 +1472,7 @@
spin_unlock_bh(&sk->timer_lock);
}
-/* This function does not return reliable answer. You is only as advice.
+/* This function does not return reliable answer. Use it only as advice.
*/
static inline int tcp_timer_is_set(struct sock *sk, int what)
@@ -1208,13 +1481,13 @@
int ret;
switch (what) {
- case TIME_RETRANS:
+ case TCP_TIME_RETRANS:
ret = tp->retransmit_timer.prev != NULL;
break;
- case TIME_DACK:
+ case TCP_TIME_DACK:
ret = tp->delack_timer.prev != NULL;
break;
- case TIME_PROBE0:
+ case TCP_TIME_PROBE0:
ret = tp->probe_timer.prev != NULL;
break;
default:
@@ -1248,18 +1521,46 @@
static inline int keepalive_intvl_when(struct tcp_opt *tp)
{
- if (tp->keepalive_intvl)
- return tp->keepalive_intvl;
- else
- return sysctl_tcp_keepalive_intvl;
+ return tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl;
}
static inline int keepalive_time_when(struct tcp_opt *tp)
{
- if (tp->keepalive_time)
- return tp->keepalive_time;
- else
- return sysctl_tcp_keepalive_time;
+ return tp->keepalive_time ? : sysctl_tcp_keepalive_time;
}
+
+static inline int tcp_fin_time(struct tcp_opt *tp)
+{
+ int fin_timeout = tp->linger2 ? : sysctl_tcp_fin_timeout;
+
+ if (fin_timeout < (tp->rto<<2) - (tp->rto>>1))
+ fin_timeout = (tp->rto<<2) - (tp->rto>>1);
+
+ return fin_timeout;
+}
+
+#if 0 /* TCP_DEBUG */
+#define TCP_CHECK_TIMER(sk) \
+do { struct tcp_opt *__tp = &sk->tp_pinfo.af_tcp; \
+ if (sk->state != TCP_CLOSE) { \
+ if (__tp->packets_out) { \
+ if (!tcp_timer_is_set(sk, TCP_TIME_RETRANS) && !timer_is_running(&__tp->retransmit_timer) && net_ratelimit()) \
+ printk(KERN_DEBUG "sk=%p RETRANS" __FUNCTION__ "(%d) %d\n", sk, __LINE__, sk->state); \
+ } else if (__tp->send_head) { \
+ if (!tcp_timer_is_set(sk, TCP_TIME_PROBE0) && !timer_is_running(&__tp->probe_timer) && net_ratelimit()) \
+ printk(KERN_DEBUG "sk=%p PROBE0" __FUNCTION__ "(%d) %d\n", sk, __LINE__, sk->state); \
+ } \
+ if (__tp->ack.pending) { \
+ if (!tcp_timer_is_set(sk, TCP_TIME_DACK) && !timer_is_running(&__tp->delack_timer) && net_ratelimit()) \
+ printk(KERN_DEBUG "sk=%p DACK" __FUNCTION__ "(%d) %d\n", sk, __LINE__, sk->state); \
+ } \
+ if (__tp->packets_out > skb_queue_len(&sk->write_queue) || \
+ (__tp->send_head && skb_queue_len(&sk->write_queue) == 0)) { \
+ printk(KERN_DEBUG "sk=%p QUEUE" __FUNCTION__ "(%d) %d %d %d %p\n", sk, __LINE__, sk->state, __tp->packets_out, skb_queue_len(&sk->write_queue), __tp->send_head); \
+ } \
+ } } while (0)
+#else
+#define TCP_CHECK_TIMER(sk) do { } while (0);
+#endif
#endif /* _TCP_H */
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)