* Section: Macros, externs, and inlines
*/
-/* sock lock wrappers. */
-#define sctp_bh_lock_sock(sk) bh_lock_sock(sk)
-#define sctp_bh_unlock_sock(sk) bh_unlock_sock(sk)
-
/* SCTP SNMP MIB stats handlers */
#define SCTP_INC_STATS(net, field) SNMP_INC_STATS((net)->sctp.sctp_statistics, field)
#define SCTP_INC_STATS_BH(net, field) SNMP_INC_STATS_BH((net)->sctp.sctp_statistics, field)
* bottom halves on this lock, but a user may be in the lock too,
* so check if it is busy.
*/
- sctp_bh_lock_sock(sk);
+ bh_lock_sock(sk);
if (sk != rcvr->sk) {
/* Our cached sk is different from the rcvr->sk. This is
* be doing something with the new socket. Switch our veiw
* of the current sk.
*/
- sctp_bh_unlock_sock(sk);
+ bh_unlock_sock(sk);
sk = rcvr->sk;
- sctp_bh_lock_sock(sk);
+ bh_lock_sock(sk);
}
if (sock_owned_by_user(sk)) {
if (sctp_add_backlog(sk, skb)) {
- sctp_bh_unlock_sock(sk);
+ bh_unlock_sock(sk);
sctp_chunk_free(chunk);
skb = NULL; /* sctp_chunk_free already freed the skb */
goto discard_release;
sctp_inq_push(&chunk->rcvr->inqueue, chunk);
}
- sctp_bh_unlock_sock(sk);
+ bh_unlock_sock(sk);
/* Release the asoc/ep ref we took in the lookup calls. */
if (asoc)
*/
sk = rcvr->sk;
- sctp_bh_lock_sock(sk);
+ bh_lock_sock(sk);
if (sock_owned_by_user(sk)) {
if (sk_add_backlog(sk, skb, sk->sk_rcvbuf))
} else
sctp_inq_push(inqueue, chunk);
- sctp_bh_unlock_sock(sk);
+ bh_unlock_sock(sk);
/* If the chunk was backloged again, don't drop refs */
if (backloged)
goto out;
}
- sctp_bh_lock_sock(sk);
+ bh_lock_sock(sk);
/* If too many ICMPs get dropped on busy
* servers this needs to be solved differently.
/* Common cleanup code for icmp/icmpv6 error handler. */
void sctp_err_finish(struct sock *sk, struct sctp_association *asoc)
{
- sctp_bh_unlock_sock(sk);
+ bh_unlock_sock(sk);
sctp_association_put(asoc);
}
/* ignore bound-specific endpoints */
if (!sctp_is_ep_boundall(sk))
continue;
- sctp_bh_lock_sock(sk);
+ bh_lock_sock(sk);
if (sctp_asconf_mgmt(sp, addrw) < 0)
pr_debug("%s: sctp_asconf_mgmt failed\n", __func__);
- sctp_bh_unlock_sock(sk);
+ bh_unlock_sock(sk);
}
#if IS_ENABLED(CONFIG_IPV6)
free_next:
/* Check whether a task is in the sock. */
- sctp_bh_lock_sock(asoc->base.sk);
+ bh_lock_sock(asoc->base.sk);
if (sock_owned_by_user(asoc->base.sk)) {
pr_debug("%s: sock is busy\n", __func__);
asoc->base.sk->sk_err = -error;
out_unlock:
- sctp_bh_unlock_sock(asoc->base.sk);
+ bh_unlock_sock(asoc->base.sk);
sctp_transport_put(transport);
}
struct net *net = sock_net(asoc->base.sk);
int error = 0;
- sctp_bh_lock_sock(asoc->base.sk);
+ bh_lock_sock(asoc->base.sk);
if (sock_owned_by_user(asoc->base.sk)) {
pr_debug("%s: sock is busy: timer %d\n", __func__,
timeout_type);
asoc->base.sk->sk_err = -error;
out_unlock:
- sctp_bh_unlock_sock(asoc->base.sk);
+ bh_unlock_sock(asoc->base.sk);
sctp_association_put(asoc);
}
struct sctp_association *asoc = transport->asoc;
struct net *net = sock_net(asoc->base.sk);
- sctp_bh_lock_sock(asoc->base.sk);
+ bh_lock_sock(asoc->base.sk);
if (sock_owned_by_user(asoc->base.sk)) {
pr_debug("%s: sock is busy\n", __func__);
asoc->base.sk->sk_err = -error;
out_unlock:
- sctp_bh_unlock_sock(asoc->base.sk);
+ bh_unlock_sock(asoc->base.sk);
sctp_transport_put(transport);
}
struct sctp_association *asoc = transport->asoc;
struct net *net = sock_net(asoc->base.sk);
- sctp_bh_lock_sock(asoc->base.sk);
+ bh_lock_sock(asoc->base.sk);
if (sock_owned_by_user(asoc->base.sk)) {
pr_debug("%s: sock is busy\n", __func__);
asoc->state, asoc->ep, asoc, transport, GFP_ATOMIC);
out_unlock:
- sctp_bh_unlock_sock(asoc->base.sk);
+ bh_unlock_sock(asoc->base.sk);
sctp_association_put(asoc);
}
* the net layers still may.
*/
local_bh_disable();
- sctp_bh_lock_sock(sk);
+ bh_lock_sock(sk);
/* Hold the sock, since sk_common_release() will put sock_put()
* and we have just a little more cleanup.
sock_hold(sk);
sk_common_release(sk);
- sctp_bh_unlock_sock(sk);
+ bh_unlock_sock(sk);
local_bh_enable();
sock_put(sk);