read_lock_bh(&call->state_lock);
if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
- !test_and_set_bit(RXRPC_CALL_RELEASE, &call->events)) {
+ !test_and_set_bit(RXRPC_CALL_EV_RELEASE, &call->events)) {
rxrpc_get_call(call);
rxrpc_queue_call(call);
}
rb_insert_color(&call->sock_node, &rx->calls);
if (test_and_set_bit(RXRPC_CALL_HAS_USERID, &call->flags))
BUG();
- if (test_and_set_bit(RXRPC_CALL_ACCEPTED, &call->events))
+ if (test_and_set_bit(RXRPC_CALL_EV_ACCEPTED, &call->events))
BUG();
rxrpc_queue_call(call);
out_release:
_debug("release %p", call);
if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
- !test_and_set_bit(RXRPC_CALL_RELEASE, &call->events))
+ !test_and_set_bit(RXRPC_CALL_EV_RELEASE, &call->events))
rxrpc_queue_call(call);
out_discard:
write_unlock_bh(&call->state_lock);
switch (call->state) {
case RXRPC_CALL_SERVER_ACCEPTING:
call->state = RXRPC_CALL_SERVER_BUSY;
- if (test_and_set_bit(RXRPC_CALL_REJECT_BUSY, &call->events))
+ if (test_and_set_bit(RXRPC_CALL_EV_REJECT_BUSY, &call->events))
rxrpc_queue_call(call);
ret = 0;
goto out_release;
out_release:
_debug("release %p", call);
if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
- !test_and_set_bit(RXRPC_CALL_RELEASE, &call->events))
+ !test_and_set_bit(RXRPC_CALL_EV_RELEASE, &call->events))
rxrpc_queue_call(call);
out_discard:
write_unlock_bh(&call->state_lock);
try_to_del_timer_sync(&call->ack_timer);
read_lock_bh(&call->state_lock);
if (call->state <= RXRPC_CALL_COMPLETE &&
- !test_and_set_bit(RXRPC_CALL_ACK, &call->events))
+ !test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events))
rxrpc_queue_call(call);
read_unlock_bh(&call->state_lock);
}
if (resend & 1) {
_debug("SET RESEND");
- set_bit(RXRPC_CALL_RESEND, &call->events);
+ set_bit(RXRPC_CALL_EV_RESEND, &call->events);
}
if (resend & 2) {
} else {
_debug("KILL RESEND TIMER");
del_timer_sync(&call->resend_timer);
- clear_bit(RXRPC_CALL_RESEND_TIMER, &call->events);
+ clear_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events);
clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
}
read_unlock_bh(&call->state_lock);
if (call->state < RXRPC_CALL_COMPLETE &&
call->rx_data_post == call->rx_first_oos) {
_debug("drain rx oos now");
- set_bit(RXRPC_CALL_DRAIN_RX_OOS, &call->events);
+ set_bit(RXRPC_CALL_EV_DRAIN_RX_OOS, &call->events);
}
read_unlock(&call->state_lock);
del_timer_sync(&call->resend_timer);
clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
- clear_bit(RXRPC_CALL_RESEND_TIMER, &call->events);
+ clear_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events);
if (call->acks_window)
rxrpc_zap_tx_window(call);
iov[0].iov_len = sizeof(hdr);
/* deal with events of a final nature */
- if (test_bit(RXRPC_CALL_RELEASE, &call->events)) {
+ if (test_bit(RXRPC_CALL_EV_RELEASE, &call->events)) {
rxrpc_release_call(call);
- clear_bit(RXRPC_CALL_RELEASE, &call->events);
+ clear_bit(RXRPC_CALL_EV_RELEASE, &call->events);
}
- if (test_bit(RXRPC_CALL_RCVD_ERROR, &call->events)) {
+ if (test_bit(RXRPC_CALL_EV_RCVD_ERROR, &call->events)) {
int error;
- clear_bit(RXRPC_CALL_CONN_ABORT, &call->events);
- clear_bit(RXRPC_CALL_REJECT_BUSY, &call->events);
- clear_bit(RXRPC_CALL_ABORT, &call->events);
+ clear_bit(RXRPC_CALL_EV_CONN_ABORT, &call->events);
+ clear_bit(RXRPC_CALL_EV_REJECT_BUSY, &call->events);
+ clear_bit(RXRPC_CALL_EV_ABORT, &call->events);
error = call->conn->trans->peer->net_error;
_debug("post net error %d", error);
if (rxrpc_post_message(call, RXRPC_SKB_MARK_NET_ERROR,
error, true) < 0)
goto no_mem;
- clear_bit(RXRPC_CALL_RCVD_ERROR, &call->events);
+ clear_bit(RXRPC_CALL_EV_RCVD_ERROR, &call->events);
goto kill_ACKs;
}
- if (test_bit(RXRPC_CALL_CONN_ABORT, &call->events)) {
+ if (test_bit(RXRPC_CALL_EV_CONN_ABORT, &call->events)) {
ASSERTCMP(call->state, >, RXRPC_CALL_COMPLETE);
- clear_bit(RXRPC_CALL_REJECT_BUSY, &call->events);
- clear_bit(RXRPC_CALL_ABORT, &call->events);
+ clear_bit(RXRPC_CALL_EV_REJECT_BUSY, &call->events);
+ clear_bit(RXRPC_CALL_EV_ABORT, &call->events);
_debug("post conn abort");
if (rxrpc_post_message(call, RXRPC_SKB_MARK_LOCAL_ERROR,
call->conn->error, true) < 0)
goto no_mem;
- clear_bit(RXRPC_CALL_CONN_ABORT, &call->events);
+ clear_bit(RXRPC_CALL_EV_CONN_ABORT, &call->events);
goto kill_ACKs;
}
- if (test_bit(RXRPC_CALL_REJECT_BUSY, &call->events)) {
+ if (test_bit(RXRPC_CALL_EV_REJECT_BUSY, &call->events)) {
hdr.type = RXRPC_PACKET_TYPE_BUSY;
- genbit = RXRPC_CALL_REJECT_BUSY;
+ genbit = RXRPC_CALL_EV_REJECT_BUSY;
goto send_message;
}
- if (test_bit(RXRPC_CALL_ABORT, &call->events)) {
+ if (test_bit(RXRPC_CALL_EV_ABORT, &call->events)) {
ASSERTCMP(call->state, >, RXRPC_CALL_COMPLETE);
if (rxrpc_post_message(call, RXRPC_SKB_MARK_LOCAL_ERROR,
data = htonl(call->abort_code);
iov[1].iov_base = &data;
iov[1].iov_len = sizeof(data);
- genbit = RXRPC_CALL_ABORT;
+ genbit = RXRPC_CALL_EV_ABORT;
goto send_message;
}
- if (test_bit(RXRPC_CALL_ACK_FINAL, &call->events)) {
- genbit = RXRPC_CALL_ACK_FINAL;
+ if (test_bit(RXRPC_CALL_EV_ACK_FINAL, &call->events)) {
+ genbit = RXRPC_CALL_EV_ACK_FINAL;
ack.bufferSpace = htons(8);
ack.maxSkew = 0;
goto send_ACK;
}
- if (call->events & ((1 << RXRPC_CALL_RCVD_BUSY) |
- (1 << RXRPC_CALL_RCVD_ABORT))
+ if (call->events & ((1 << RXRPC_CALL_EV_RCVD_BUSY) |
+ (1 << RXRPC_CALL_EV_RCVD_ABORT))
) {
u32 mark;
- if (test_bit(RXRPC_CALL_RCVD_ABORT, &call->events))
+ if (test_bit(RXRPC_CALL_EV_RCVD_ABORT, &call->events))
mark = RXRPC_SKB_MARK_REMOTE_ABORT;
else
mark = RXRPC_SKB_MARK_BUSY;
if (rxrpc_post_message(call, mark, ECONNABORTED, true) < 0)
goto no_mem;
- clear_bit(RXRPC_CALL_RCVD_BUSY, &call->events);
- clear_bit(RXRPC_CALL_RCVD_ABORT, &call->events);
+ clear_bit(RXRPC_CALL_EV_RCVD_BUSY, &call->events);
+ clear_bit(RXRPC_CALL_EV_RCVD_ABORT, &call->events);
goto kill_ACKs;
}
- if (test_and_clear_bit(RXRPC_CALL_RCVD_ACKALL, &call->events)) {
+ if (test_and_clear_bit(RXRPC_CALL_EV_RCVD_ACKALL, &call->events)) {
_debug("do implicit ackall");
rxrpc_clear_tx_window(call);
}
- if (test_bit(RXRPC_CALL_LIFE_TIMER, &call->events)) {
+ if (test_bit(RXRPC_CALL_EV_LIFE_TIMER, &call->events)) {
write_lock_bh(&call->state_lock);
if (call->state <= RXRPC_CALL_COMPLETE) {
call->state = RXRPC_CALL_LOCALLY_ABORTED;
call->abort_code = RX_CALL_TIMEOUT;
- set_bit(RXRPC_CALL_ABORT, &call->events);
+ set_bit(RXRPC_CALL_EV_ABORT, &call->events);
}
write_unlock_bh(&call->state_lock);
ETIME, true) < 0)
goto no_mem;
- clear_bit(RXRPC_CALL_LIFE_TIMER, &call->events);
+ clear_bit(RXRPC_CALL_EV_LIFE_TIMER, &call->events);
goto kill_ACKs;
}
}
/* handle resending */
- if (test_and_clear_bit(RXRPC_CALL_RESEND_TIMER, &call->events))
+ if (test_and_clear_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events))
rxrpc_resend_timer(call);
- if (test_and_clear_bit(RXRPC_CALL_RESEND, &call->events))
+ if (test_and_clear_bit(RXRPC_CALL_EV_RESEND, &call->events))
rxrpc_resend(call);
/* consider sending an ordinary ACK */
- if (test_bit(RXRPC_CALL_ACK, &call->events)) {
+ if (test_bit(RXRPC_CALL_EV_ACK, &call->events)) {
_debug("send ACK: window: %d - %d { %lx }",
call->rx_data_eaten, call->ackr_win_top,
call->ackr_window[0]);
if (call->state > RXRPC_CALL_SERVER_ACK_REQUEST &&
call->ackr_reason != RXRPC_ACK_PING_RESPONSE) {
/* ACK by sending reply DATA packet in this state */
- clear_bit(RXRPC_CALL_ACK, &call->events);
+ clear_bit(RXRPC_CALL_EV_ACK, &call->events);
goto maybe_reschedule;
}
- genbit = RXRPC_CALL_ACK;
+ genbit = RXRPC_CALL_EV_ACK;
acks = kzalloc(call->ackr_win_top - call->rx_data_eaten,
GFP_NOFS);
/* handle completion of security negotiations on an incoming
* connection */
- if (test_and_clear_bit(RXRPC_CALL_SECURED, &call->events)) {
+ if (test_and_clear_bit(RXRPC_CALL_EV_SECURED, &call->events)) {
_debug("secured");
spin_lock_bh(&call->lock);
_debug("securing");
write_lock(&call->conn->lock);
if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
- !test_bit(RXRPC_CALL_RELEASE, &call->events)) {
+ !test_bit(RXRPC_CALL_EV_RELEASE, &call->events)) {
_debug("not released");
call->state = RXRPC_CALL_SERVER_ACCEPTING;
list_move_tail(&call->accept_link,
write_unlock(&call->conn->lock);
read_lock(&call->state_lock);
if (call->state < RXRPC_CALL_COMPLETE)
- set_bit(RXRPC_CALL_POST_ACCEPT, &call->events);
+ set_bit(RXRPC_CALL_EV_POST_ACCEPT, &call->events);
read_unlock(&call->state_lock);
}
spin_unlock_bh(&call->lock);
- if (!test_bit(RXRPC_CALL_POST_ACCEPT, &call->events))
+ if (!test_bit(RXRPC_CALL_EV_POST_ACCEPT, &call->events))
goto maybe_reschedule;
}
/* post a notification of an acceptable connection to the app */
- if (test_bit(RXRPC_CALL_POST_ACCEPT, &call->events)) {
+ if (test_bit(RXRPC_CALL_EV_POST_ACCEPT, &call->events)) {
_debug("post accept");
if (rxrpc_post_message(call, RXRPC_SKB_MARK_NEW_CALL,
0, false) < 0)
goto no_mem;
- clear_bit(RXRPC_CALL_POST_ACCEPT, &call->events);
+ clear_bit(RXRPC_CALL_EV_POST_ACCEPT, &call->events);
goto maybe_reschedule;
}
/* handle incoming call acceptance */
- if (test_and_clear_bit(RXRPC_CALL_ACCEPTED, &call->events)) {
+ if (test_and_clear_bit(RXRPC_CALL_EV_ACCEPTED, &call->events)) {
_debug("accepted");
ASSERTCMP(call->rx_data_post, ==, 0);
call->rx_data_post = 1;
read_lock_bh(&call->state_lock);
if (call->state < RXRPC_CALL_COMPLETE)
- set_bit(RXRPC_CALL_DRAIN_RX_OOS, &call->events);
+ set_bit(RXRPC_CALL_EV_DRAIN_RX_OOS, &call->events);
read_unlock_bh(&call->state_lock);
}
/* drain the out of sequence received packet queue into the packet Rx
* queue */
- if (test_and_clear_bit(RXRPC_CALL_DRAIN_RX_OOS, &call->events)) {
+ if (test_and_clear_bit(RXRPC_CALL_EV_DRAIN_RX_OOS, &call->events)) {
while (call->rx_data_post == call->rx_first_oos)
if (rxrpc_drain_rx_oos_queue(call) < 0)
break;
}
switch (genbit) {
- case RXRPC_CALL_ABORT:
+ case RXRPC_CALL_EV_ABORT:
clear_bit(genbit, &call->events);
- clear_bit(RXRPC_CALL_RCVD_ABORT, &call->events);
+ clear_bit(RXRPC_CALL_EV_RCVD_ABORT, &call->events);
goto kill_ACKs;
- case RXRPC_CALL_ACK_FINAL:
+ case RXRPC_CALL_EV_ACK_FINAL:
write_lock_bh(&call->state_lock);
if (call->state == RXRPC_CALL_CLIENT_FINAL_ACK)
call->state = RXRPC_CALL_COMPLETE;
kill_ACKs:
del_timer_sync(&call->ack_timer);
- if (test_and_clear_bit(RXRPC_CALL_ACK_FINAL, &call->events))
+ if (test_and_clear_bit(RXRPC_CALL_EV_ACK_FINAL, &call->events))
rxrpc_put_call(call);
- clear_bit(RXRPC_CALL_ACK, &call->events);
+ clear_bit(RXRPC_CALL_EV_ACK, &call->events);
maybe_reschedule:
if (call->events || !skb_queue_empty(&call->rx_queue)) {
read_lock_bh(&call->state_lock);
if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
- !test_and_set_bit(RXRPC_CALL_RELEASE, &call->events))
+ !test_and_set_bit(RXRPC_CALL_EV_RELEASE, &call->events))
rxrpc_queue_call(call);
read_unlock_bh(&call->state_lock);
}
read_lock(&call->state_lock);
switch (call->state) {
case RXRPC_CALL_LOCALLY_ABORTED:
- if (!test_and_set_bit(RXRPC_CALL_ABORT, &call->events))
+ if (!test_and_set_bit(RXRPC_CALL_EV_ABORT, &call->events))
rxrpc_queue_call(call);
case RXRPC_CALL_REMOTELY_ABORTED:
read_unlock(&call->state_lock);
_debug("+++ ABORTING STATE %d +++\n", call->state);
call->state = RXRPC_CALL_LOCALLY_ABORTED;
call->abort_code = RX_CALL_DEAD;
- set_bit(RXRPC_CALL_ABORT, &call->events);
+ set_bit(RXRPC_CALL_EV_ABORT, &call->events);
rxrpc_queue_call(call);
}
write_unlock(&call->state_lock);
_debug("abort call %p", call);
call->state = RXRPC_CALL_LOCALLY_ABORTED;
call->abort_code = RX_CALL_DEAD;
- if (!test_and_set_bit(RXRPC_CALL_ABORT, &call->events))
+ if (!test_and_set_bit(RXRPC_CALL_EV_ABORT, &call->events))
sched = true;
}
- if (!test_and_set_bit(RXRPC_CALL_RELEASE, &call->events))
+ if (!test_and_set_bit(RXRPC_CALL_EV_RELEASE, &call->events))
sched = true;
if (sched)
rxrpc_queue_call(call);
_enter("{%d}", call->debug_id);
read_lock_bh(&call->state_lock);
if (call->state < RXRPC_CALL_COMPLETE) {
- set_bit(RXRPC_CALL_LIFE_TIMER, &call->events);
+ set_bit(RXRPC_CALL_EV_LIFE_TIMER, &call->events);
rxrpc_queue_call(call);
}
read_unlock_bh(&call->state_lock);
return;
clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
- if (!test_and_set_bit(RXRPC_CALL_RESEND_TIMER, &call->events))
+ if (!test_and_set_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events))
rxrpc_queue_call(call);
}
read_lock_bh(&call->state_lock);
if (call->state < RXRPC_CALL_COMPLETE &&
- !test_and_set_bit(RXRPC_CALL_ACK, &call->events))
+ !test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events))
rxrpc_queue_call(call);
read_unlock_bh(&call->state_lock);
}
call->state = state;
call->abort_code = abort_code;
if (state == RXRPC_CALL_LOCALLY_ABORTED)
- set_bit(RXRPC_CALL_CONN_ABORT, &call->events);
+ set_bit(RXRPC_CALL_EV_CONN_ABORT, &call->events);
else
- set_bit(RXRPC_CALL_RCVD_ABORT, &call->events);
+ set_bit(RXRPC_CALL_EV_RCVD_ABORT, &call->events);
rxrpc_queue_call(call);
}
write_unlock(&call->state_lock);
if (call) {
read_lock(&call->state_lock);
if (call->state < RXRPC_CALL_COMPLETE &&
- !test_and_set_bit(RXRPC_CALL_SECURED, &call->events))
+ !test_and_set_bit(RXRPC_CALL_EV_SECURED, &call->events))
rxrpc_queue_call(call);
read_unlock(&call->state_lock);
}
if (call->state != RXRPC_CALL_COMPLETE &&
call->state < RXRPC_CALL_NETWORK_ERROR) {
call->state = RXRPC_CALL_NETWORK_ERROR;
- set_bit(RXRPC_CALL_RCVD_ERROR, &call->events);
+ set_bit(RXRPC_CALL_EV_RCVD_ERROR, &call->events);
rxrpc_queue_call(call);
}
write_unlock(&call->state_lock);
_debug("drain rx oos now");
read_lock(&call->state_lock);
if (call->state < RXRPC_CALL_COMPLETE &&
- !test_and_set_bit(RXRPC_CALL_DRAIN_RX_OOS, &call->events))
+ !test_and_set_bit(RXRPC_CALL_EV_DRAIN_RX_OOS, &call->events))
rxrpc_queue_call(call);
read_unlock(&call->state_lock);
}
call->acks_latest = serial;
_debug("implicit ACKALL %%%u", call->acks_latest);
- set_bit(RXRPC_CALL_RCVD_ACKALL, &call->events);
+ set_bit(RXRPC_CALL_EV_RCVD_ACKALL, &call->events);
write_unlock_bh(&call->state_lock);
if (try_to_del_timer_sync(&call->resend_timer) >= 0) {
- clear_bit(RXRPC_CALL_RESEND_TIMER, &call->events);
- clear_bit(RXRPC_CALL_RESEND, &call->events);
+ clear_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events);
+ clear_bit(RXRPC_CALL_EV_RESEND, &call->events);
clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
}
break;
if (call->state < RXRPC_CALL_COMPLETE) {
call->state = RXRPC_CALL_REMOTELY_ABORTED;
call->abort_code = abort_code;
- set_bit(RXRPC_CALL_RCVD_ABORT, &call->events);
+ set_bit(RXRPC_CALL_EV_RCVD_ABORT, &call->events);
rxrpc_queue_call(call);
}
goto free_packet_unlock;
switch (call->state) {
case RXRPC_CALL_CLIENT_SEND_REQUEST:
call->state = RXRPC_CALL_SERVER_BUSY;
- set_bit(RXRPC_CALL_RCVD_BUSY, &call->events);
+ set_bit(RXRPC_CALL_EV_RCVD_BUSY, &call->events);
rxrpc_queue_call(call);
case RXRPC_CALL_SERVER_BUSY:
goto free_packet_unlock;
if (call->state <= RXRPC_CALL_COMPLETE) {
call->state = RXRPC_CALL_LOCALLY_ABORTED;
call->abort_code = RX_PROTOCOL_ERROR;
- set_bit(RXRPC_CALL_ABORT, &call->events);
+ set_bit(RXRPC_CALL_EV_ABORT, &call->events);
rxrpc_queue_call(call);
}
free_packet_unlock:
if (call->state <= RXRPC_CALL_COMPLETE) {
call->state = RXRPC_CALL_LOCALLY_ABORTED;
call->abort_code = RX_PROTOCOL_ERROR;
- set_bit(RXRPC_CALL_ABORT, &call->events);
+ set_bit(RXRPC_CALL_EV_ABORT, &call->events);
rxrpc_queue_call(call);
}
write_unlock_bh(&call->state_lock);
read_lock(&call->state_lock);
switch (call->state) {
case RXRPC_CALL_LOCALLY_ABORTED:
- if (!test_and_set_bit(RXRPC_CALL_ABORT, &call->events)) {
+ if (!test_and_set_bit(RXRPC_CALL_EV_ABORT, &call->events)) {
rxrpc_queue_call(call);
goto free_unlock;
}
/* resend last packet of a completed call */
_debug("final ack again");
rxrpc_get_call(call);
- set_bit(RXRPC_CALL_ACK_FINAL, &call->events);
+ set_bit(RXRPC_CALL_EV_ACK_FINAL, &call->events);
rxrpc_queue_call(call);
goto free_unlock;
default:
* Events that can be raised on a call.
*/
enum rxrpc_call_event {
- RXRPC_CALL_RCVD_ACKALL, /* ACKALL or reply received */
- RXRPC_CALL_RCVD_BUSY, /* busy packet received */
- RXRPC_CALL_RCVD_ABORT, /* abort packet received */
- RXRPC_CALL_RCVD_ERROR, /* network error received */
- RXRPC_CALL_ACK_FINAL, /* need to generate final ACK (and release call) */
- RXRPC_CALL_ACK, /* need to generate ACK */
- RXRPC_CALL_REJECT_BUSY, /* need to generate busy message */
- RXRPC_CALL_ABORT, /* need to generate abort */
- RXRPC_CALL_CONN_ABORT, /* local connection abort generated */
- RXRPC_CALL_RESEND_TIMER, /* Tx resend timer expired */
- RXRPC_CALL_RESEND, /* Tx resend required */
- RXRPC_CALL_DRAIN_RX_OOS, /* drain the Rx out of sequence queue */
- RXRPC_CALL_LIFE_TIMER, /* call's lifetimer ran out */
- RXRPC_CALL_ACCEPTED, /* incoming call accepted by userspace app */
- RXRPC_CALL_SECURED, /* incoming call's connection is now secure */
- RXRPC_CALL_POST_ACCEPT, /* need to post an "accept?" message to the app */
- RXRPC_CALL_RELEASE, /* need to release the call's resources */
+ RXRPC_CALL_EV_RCVD_ACKALL, /* ACKALL or reply received */
+ RXRPC_CALL_EV_RCVD_BUSY, /* busy packet received */
+ RXRPC_CALL_EV_RCVD_ABORT, /* abort packet received */
+ RXRPC_CALL_EV_RCVD_ERROR, /* network error received */
+ RXRPC_CALL_EV_ACK_FINAL, /* need to generate final ACK (and release call) */
+ RXRPC_CALL_EV_ACK, /* need to generate ACK */
+ RXRPC_CALL_EV_REJECT_BUSY, /* need to generate busy message */
+ RXRPC_CALL_EV_ABORT, /* need to generate abort */
+ RXRPC_CALL_EV_CONN_ABORT, /* local connection abort generated */
+ RXRPC_CALL_EV_RESEND_TIMER, /* Tx resend timer expired */
+ RXRPC_CALL_EV_RESEND, /* Tx resend required */
+ RXRPC_CALL_EV_DRAIN_RX_OOS, /* drain the Rx out of sequence queue */
+ RXRPC_CALL_EV_LIFE_TIMER, /* call's lifetimer ran out */
+ RXRPC_CALL_EV_ACCEPTED, /* incoming call accepted by userspace app */
+ RXRPC_CALL_EV_SECURED, /* incoming call's connection is now secure */
+ RXRPC_CALL_EV_POST_ACCEPT, /* need to post an "accept?" message to the app */
+ RXRPC_CALL_EV_RELEASE, /* need to release the call's resources */
};
/*
if (call->state < RXRPC_CALL_COMPLETE) {
call->abort_code = abort_code;
call->state = RXRPC_CALL_LOCALLY_ABORTED;
- set_bit(RXRPC_CALL_ABORT, &call->events);
+ set_bit(RXRPC_CALL_EV_ABORT, &call->events);
}
write_unlock_bh(&call->state_lock);
}
if (call->state <= RXRPC_CALL_COMPLETE) {
call->state = RXRPC_CALL_LOCALLY_ABORTED;
call->abort_code = abort_code;
- set_bit(RXRPC_CALL_ABORT, &call->events);
+ set_bit(RXRPC_CALL_EV_ABORT, &call->events);
del_timer_sync(&call->resend_timer);
del_timer_sync(&call->ack_timer);
- clear_bit(RXRPC_CALL_RESEND_TIMER, &call->events);
- clear_bit(RXRPC_CALL_ACK, &call->events);
+ clear_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events);
+ clear_bit(RXRPC_CALL_EV_ACK, &call->events);
clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
rxrpc_queue_call(call);
}
if (try_to_del_timer_sync(&call->resend_timer) >= 0) {
clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
if (call->state < RXRPC_CALL_COMPLETE &&
- !test_and_set_bit(RXRPC_CALL_RESEND_TIMER, &call->events))
+ !test_and_set_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events))
rxrpc_queue_call(call);
}
read_unlock_bh(&call->state_lock);
read_lock_bh(&call->state_lock);
if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
- !test_and_set_bit(RXRPC_CALL_RELEASE, &call->events))
+ !test_and_set_bit(RXRPC_CALL_EV_RELEASE, &call->events))
rxrpc_queue_call(call);
read_unlock_bh(&call->state_lock);
}
/* get an extra ref on the call for the final-ACK generator to
* release */
rxrpc_get_call(call);
- set_bit(RXRPC_CALL_ACK_FINAL, &call->events);
+ set_bit(RXRPC_CALL_EV_ACK_FINAL, &call->events);
if (try_to_del_timer_sync(&call->ack_timer) >= 0)
rxrpc_queue_call(call);
break;