initval);
}
-static inline struct request_sock *
-inet_reqsk_alloc(const struct request_sock_ops *ops, struct sock *sk_listener)
-{
- struct request_sock *req = reqsk_alloc(ops);
-
- if (req) {
- struct inet_request_sock *ireq = inet_rsk(req);
-
- kmemcheck_annotate_bitfield(ireq, flags);
- ireq->opt = NULL;
- atomic64_set(&ireq->ir_cookie, 0);
- ireq->ireq_state = TCP_NEW_SYN_RECV;
- write_pnet(&ireq->ireq_net, sock_net(sk_listener));
-
- /* Following is temporary. It is coupled with debugging
- * helpers in reqsk_put() & reqsk_free()
- */
- atomic_set(&ireq->ireq_refcnt, 0);
- }
-
- return req;
-}
+struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops,
+ struct sock *sk_listener);
static inline __u8 inet_sk_flowi_flags(const struct sock *sk)
{
ireq->ir_mark = inet_request_mark(sk, skb);
}
+struct request_sock *inet_reqsk_alloc(const struct request_sock_ops *ops,
+ struct sock *sk_listener)
+{
+ struct request_sock *req = reqsk_alloc(ops);
+
+ if (req) {
+ struct inet_request_sock *ireq = inet_rsk(req);
+
+ kmemcheck_annotate_bitfield(ireq, flags);
+ ireq->opt = NULL;
+ atomic64_set(&ireq->ir_cookie, 0);
+ ireq->ireq_state = TCP_NEW_SYN_RECV;
+ write_pnet(&ireq->ireq_net, sock_net(sk_listener));
+
+ /* Following is temporary. It is coupled with debugging
+ * helpers in reqsk_put() & reqsk_free()
+ */
+ atomic_set(&ireq->ireq_refcnt, 0);
+ }
+
+ return req;
+}
+EXPORT_SYMBOL(inet_reqsk_alloc);
+
int tcp_conn_request(struct request_sock_ops *rsk_ops,
const struct tcp_request_sock_ops *af_ops,
struct sock *sk, struct sk_buff *skb)