return 0;
}
- hctx = ccid2_hc_tx_sk(sk);
+ hctx = ccid2_hc_tx_sk(sk);
ccid2_pr_debug("pipe=%d cwnd=%d\n", hctx->ccid2hctx_pipe,
hctx->ccid2hctx_cwnd);
static int ccid2_ackvector(struct sock *sk, struct sk_buff *skb, int offset,
unsigned char **vec, unsigned char *veclen)
{
- const struct dccp_hdr *dh = dccp_hdr(skb);
- unsigned char *options = (unsigned char *)dh + dccp_hdr_len(skb);
- unsigned char *opt_ptr;
- const unsigned char *opt_end = (unsigned char *)dh +
- (dh->dccph_doff * 4);
- unsigned char opt, len;
- unsigned char *value;
+ const struct dccp_hdr *dh = dccp_hdr(skb);
+ unsigned char *options = (unsigned char *)dh + dccp_hdr_len(skb);
+ unsigned char *opt_ptr;
+ const unsigned char *opt_end = (unsigned char *)dh +
+ (dh->dccph_doff * 4);
+ unsigned char opt, len;
+ unsigned char *value;
BUG_ON(offset < 0);
options += offset;
return -1;
while (opt_ptr != opt_end) {
- opt = *opt_ptr++;
- len = 0;
- value = NULL;
-
- /* Check if this isn't a single byte option */
- if (opt > DCCPO_MAX_RESERVED) {
- if (opt_ptr == opt_end)
- goto out_invalid_option;
-
- len = *opt_ptr++;
- if (len < 3)
- goto out_invalid_option;
- /*
- * Remove the type and len fields, leaving
- * just the value size
- */
- len -= 2;
- value = opt_ptr;
- opt_ptr += len;
-
- if (opt_ptr > opt_end)
- goto out_invalid_option;
- }
+ opt = *opt_ptr++;
+ len = 0;
+ value = NULL;
+
+ /* Check if this isn't a single byte option */
+ if (opt > DCCPO_MAX_RESERVED) {
+ if (opt_ptr == opt_end)
+ goto out_invalid_option;
+
+ len = *opt_ptr++;
+ if (len < 3)
+ goto out_invalid_option;
+ /*
+ * Remove the type and len fields, leaving
+ * just the value size
+ */
+ len -= 2;
+ value = opt_ptr;
+ opt_ptr += len;
+
+ if (opt_ptr > opt_end)
+ goto out_invalid_option;
+ }
switch (opt) {
case DCCPO_ACK_VECTOR_0:
}
static inline void ccid2_new_ack(struct sock *sk,
- struct ccid2_seq *seqp,
+ struct ccid2_seq *seqp,
unsigned int *maxincr)
{
struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk);
static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk)
{
- struct ccid2_hc_tx_sock *hctx = ccid_priv(ccid);
+ struct ccid2_hc_tx_sock *hctx = ccid_priv(ccid);
ccid2_change_cwnd(hctx, 1);
/* Initialize ssthresh to infinity. This means that we will exit the
static void ccid2_hc_tx_exit(struct sock *sk)
{
- struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk);
+ struct ccid2_hc_tx_sock *hctx = ccid2_hc_tx_sk(sk);
int i;
ccid2_hc_tx_kill_rto_timer(sk);
restart_timer:
sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer,
- jiffies + usecs_to_jiffies(t_nfb));
+ jiffies + usecs_to_jiffies(t_nfb));
out:
bh_unlock_sock(sk);
sock_put(sk);
case TFRC_SSTATE_NO_SENT:
sk_reset_timer(sk, &hctx->ccid3hctx_no_feedback_timer,
(jiffies +
- usecs_to_jiffies(TFRC_INITIAL_TIMEOUT)));
+ usecs_to_jiffies(TFRC_INITIAL_TIMEOUT)));
hctx->ccid3hctx_last_win_count = 0;
hctx->ccid3hctx_t_last_win_count = now;
ccid3_hc_tx_set_state(sk, TFRC_SSTATE_NO_FBACK);
ccid3_hc_tx_set_state(sk, TFRC_SSTATE_FBACK);
} else {
hctx->ccid3hctx_rtt = (9 * hctx->ccid3hctx_rtt +
- (u32)r_sample) / 10;
+ (u32)r_sample) / 10;
/* Update sending rate (step 4 of [RFC 3448, 4.3]) */
if (hctx->ccid3hctx_p > 0)
}
static int ccid3_hc_rx_detect_loss(struct sock *sk,
- struct dccp_rx_hist_entry *packet)
+ struct dccp_rx_hist_entry *packet)
{
struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
struct dccp_rx_hist_entry *rx_hist =
dccp_role(sk), sk, dccp_state_name(sk->sk_state));
p_prev = hcrx->ccid3hcrx_p;
-
+
/* Calculate loss event rate */
if (!list_empty(&hcrx->ccid3hcrx_li_hist)) {
u32 i_mean = dccp_li_hist_calc_i_mean(&hcrx->ccid3hcrx_li_hist);
{
const struct ccid3_hc_rx_sock *hcrx = ccid3_hc_rx_sk(sk);
const void *val;
-
+
/* Listen socks doesn't have a private CCID block */
if (sk->sk_state == DCCP_LISTEN)
return -EINVAL;
{
const struct ccid3_hc_tx_sock *hctx = ccid3_hc_tx_sk(sk);
const void *val;
-
+
/* Listen socks doesn't have a private CCID block */
if (sk->sk_state == DCCP_LISTEN)
return -EINVAL;
static inline struct dccp_rx_hist_entry *
dccp_rx_hist_entry_new(struct dccp_rx_hist *hist,
const struct sock *sk,
- const u32 ndp,
+ const u32 ndp,
const struct sk_buff *skb,
const gfp_t prio)
{
}
extern int dccp_rx_hist_find_entry(const struct list_head *list, const u64 seq,
- u8 *ccval);
+ u8 *ccval);
extern struct dccp_rx_hist_entry *
dccp_rx_hist_find_data_packet(const struct list_head *list);
The following two-column lookup table implements a part of the TCP throughput
equation from [RFC 3448, sec. 3.1]:
- s
+ s
X_calc = --------------------------------------------------------------
R * sqrt(2*b*p/3) + (3 * t_RTO * sqrt(3*b*p/8) * (p + 32*p^3))
s is the packet size in bytes
R is the round trip time in seconds
p is the loss event rate, between 0 and 1.0, of the number of loss
- events as a fraction of the number of packets transmitted
+ events as a fraction of the number of packets transmitted
t_RTO is the TCP retransmission timeout value in seconds
b is the number of packets acknowledged by a single TCP ACK
which we can break down into:
- s
+ s
X_calc = ---------
- R * f(p)
+ R * f(p)
where f(p) is given for 0 < p <= 1 by:
* the return result f(p)
The lookup table therefore actually tabulates the following function g(q):
- g(q) = 1000000 * f(q/1000000)
+ g(q) = 1000000 * f(q/1000000)
Hence, when p <= 1, q must be less than or equal to 1000000. To achieve finer
granularity for the practically more relevant case of small values of p (up to
if (R == 0) { /* possible divide by zero */
DCCP_CRIT("WARNING: RTT is 0, returning maximum X_calc.");
return ~0U;
- }
+ }
if (p <= TFRC_CALC_X_SPLIT) { /* 0.0000 < p <= 0.05 */
if (p < TFRC_SMALLEST_P) { /* 0.0000 < p < 0.0001 */
} else /* 0.0001 <= p <= 0.05 */
index = p/TFRC_SMALLEST_P - 1;
- f = tfrc_calc_x_lookup[index][1];
+ f = tfrc_calc_x_lookup[index][1];
} else { /* 0.05 < p <= 1.00 */
index = p/(1000000/TFRC_CALC_X_ARRSIZE) - 1;
if (fvalue <= tfrc_calc_x_lookup[TFRC_CALC_X_ARRSIZE - 1][1]) {
index = tfrc_binsearch(fvalue, 1);
return (index + 1) * TFRC_CALC_X_SPLIT / TFRC_CALC_X_ARRSIZE;
- }
-
+ }
+
/* else ... it must be in the coarse-grained column */
index = tfrc_binsearch(fvalue, 0);
return (index + 1) * 1000000 / TFRC_CALC_X_ARRSIZE;
* DCCP - specific warning and debugging macros.
*/
#define DCCP_WARN(fmt, a...) LIMIT_NETDEBUG(KERN_WARNING "%s: " fmt, \
- __FUNCTION__, ##a)
+ __FUNCTION__, ##a)
#define DCCP_CRIT(fmt, a...) printk(KERN_CRIT fmt " at %s:%d/%s()\n", ##a, \
__FILE__, __LINE__, __FUNCTION__)
#define DCCP_BUG(a...) do { DCCP_CRIT("BUG: " a); dump_stack(); } while(0)
#ifdef MODULE
#define DCCP_PRINTK(enable, fmt, args...) do { if (enable) \
printk(fmt, ##args); \
- } while(0)
+ } while(0)
#else
#define DCCP_PRINTK(enable, fmt, args...) printk(fmt, ##args)
#endif
DCCP_MIB_ACTIVEOPENS, /* ActiveOpens */
DCCP_MIB_ESTABRESETS, /* EstabResets */
DCCP_MIB_CURRESTAB, /* CurrEstab */
- DCCP_MIB_OUTSEGS, /* OutSegs */
+ DCCP_MIB_OUTSEGS, /* OutSegs */
DCCP_MIB_OUTRSTS,
DCCP_MIB_ABORTONTIMEOUT,
DCCP_MIB_TIMEOUTS,
(dp->dccps_gss -
dccp_msk(sk)->dccpms_sequence_window + 1));
}
-
+
static inline int dccp_ack_pending(const struct sock *sk)
{
const struct dccp_sock *dp = dccp_sk(sk);
list_for_each_entry_safe(opt, next, &dmsk->dccpms_pending,
dccpop_node) {
- BUG_ON(opt->dccpop_val == NULL);
- kfree(opt->dccpop_val);
+ BUG_ON(opt->dccpop_val == NULL);
+ kfree(opt->dccpop_val);
if (opt->dccpop_sc != NULL) {
BUG_ON(opt->dccpop_sc->dccpoc_val == NULL);
kfree(opt->dccpop_sc);
}
- kfree(opt);
- }
+ kfree(opt);
+ }
INIT_LIST_HEAD(&dmsk->dccpms_pending);
list_for_each_entry_safe(opt, next, &dmsk->dccpms_conf, dccpop_node) {
{
return (feat > DCCPF_DATA_CHECKSUM &&
feat < DCCPF_MIN_CCID_SPECIFIC) ||
- feat == DCCPF_RESERVED;
+ feat == DCCPF_RESERVED;
}
/* feature negotiation knows only these four option types (RFC 4340, sec. 6) */
else
return -1;
}
-
+
/*
* Step 6: Check sequence numbers
* Let LSWL = S.SWL and LAWL = S.AWL
(unsigned long long) DCCP_SKB_CB(skb)->dccpd_seq,
(unsigned long long) dp->dccps_swh,
(DCCP_SKB_CB(skb)->dccpd_ack_seq ==
- DCCP_PKT_WITHOUT_ACK_SEQ) ? "doesn't exist" : "exists",
+ DCCP_PKT_WITHOUT_ACK_SEQ) ? "doesn't exist" : "exists",
(unsigned long long) lawl,
(unsigned long long) DCCP_SKB_CB(skb)->dccpd_ack_seq,
(unsigned long long) dp->dccps_awh);
if (dccp_parse_options(sk, skb))
goto out_invalid_packet;
- if (dccp_msk(sk)->dccpms_send_ack_vector &&
- dccp_ackvec_add(dp->dccps_hc_rx_ackvec, sk,
- DCCP_SKB_CB(skb)->dccpd_seq,
- DCCP_ACKVEC_STATE_RECEIVED))
- goto out_invalid_packet; /* FIXME: change error code */
+ if (dccp_msk(sk)->dccpms_send_ack_vector &&
+ dccp_ackvec_add(dp->dccps_hc_rx_ackvec, sk,
+ DCCP_SKB_CB(skb)->dccpd_seq,
+ DCCP_ACKVEC_STATE_RECEIVED))
+ goto out_invalid_packet; /* FIXME: change error code */
dp->dccps_isr = DCCP_SKB_CB(skb)->dccpd_seq;
dccp_update_gsr(sk, dp->dccps_isr);
goto failure;
err = ip_route_newports(&rt, IPPROTO_DCCP, inet->sport, inet->dport,
- sk);
+ sk);
if (err != 0)
goto failure;
/* We don't check in the destentry if pmtu discovery is forbidden
* on this route. We just assume that no packet_to_big packets
* are send back when pmtu discovery is not active.
- * There is a small race when the user changes this flag in the
+ * There is a small race when the user changes this flag in the
* route, but I think that's acceptable.
*/
if ((dst = __sk_dst_check(sk, 0)) == NULL)
struct sk_buff *skb;
/* First, grab a route. */
-
+
if (dst == NULL && (dst = inet_csk_route_req(sk, req)) == NULL)
goto out;
looks not very well thought. For now we latch
options, received in the last packet, enqueued
by tcp. Feel free to propose better solution.
- --ANK (980728)
+ --ANK (980728)
*/
if (np->rxopt.all)
/*
DCCP_CRIT("DCCP(%p): option %d(len=%d) not "
"implemented, ignoring", sk, opt, len);
break;
- }
+ }
if (opt != DCCPO_MANDATORY)
mandatory = 0;
}
static int dccp_insert_feat_opt(struct sk_buff *skb, u8 type, u8 feat,
- u8 *val, u8 len)
+ u8 *val, u8 len)
{
u8 *to;
kfree_skb(skb);
return -EPROTO;
}
-
+
/* Build DCCP header and checksum it. */
dh = dccp_zeroed_hdr(skb, dccp_header_size);
sk->sk_err = 0;
sock_reset_flag(sk, SOCK_DONE);
-
+
dccp_sync_mss(sk, dst_mtu(dst));
/*
err = -EINVAL;
else
err = dccp_setsockopt_change(sk, DCCPO_CHANGE_L,
- (struct dccp_so_feat __user *)
+ (struct dccp_so_feat __user *)
optval);
break;
case DCCP_SOCKOPT_CHANGE_R:
be far nicer to have all of the black holes fixed rather than fixing
all of the TCP implementations."
- Golden words :-).
+ Golden words :-).
*/
dst_negative_advice(&sk->sk_dst_cache);