Skip to content

Commit

Permalink
rxrpc: Calls shouldn't hold socket refs
Browse files Browse the repository at this point in the history
rxrpc calls shouldn't hold refs on the sock struct.  This was done so that
the socket wouldn't go away whilst the call was in progress, such that the
call could reach the socket's queues.

However, we can mark the socket as requiring an RCU release and rely on the
RCU read lock.

To make this work, we do:

 (1) rxrpc_release_call() removes the call's call user ID.  This is now
     only called from socket operations and not from the call processor:

	rxrpc_accept_call() / rxrpc_kernel_accept_call()
	rxrpc_reject_call() / rxrpc_kernel_reject_call()
	rxrpc_kernel_end_call()
	rxrpc_release_calls_on_socket()
	rxrpc_recvmsg()

     Though it is also called in the cleanup path of
     rxrpc_accept_incoming_call() before we assign a user ID.

 (2) Pass the socket pointer into rxrpc_release_call() rather than getting
     it from the call so that we can get rid of uninitialised calls.

 (3) Fix call processor queueing to pass a ref to the work queue and to
     release that ref at the end of the processor function (or to pass it
     back to the work queue if we have to requeue).

 (4) Skip out of the call processor function asap if the call is complete
     and don't requeue it if the call is complete.

 (5) Clean up the call immediately that the refcount reaches 0 rather than
     trying to defer it.  Actual deallocation is deferred to RCU, however.

 (6) Don't hold socket refs for allocated calls.

 (7) Use the RCU read lock when queueing a message on a socket and treat
     the call's socket pointer according to RCU rules and check it for
     NULL.

     We also need to use the RCU read lock when viewing a call through
     procfs.

 (8) Transmit the final ACK/ABORT to a client call in rxrpc_release_call()
     if this hasn't been done yet so that we can then disconnect the call.
     Once the call is disconnected, it won't have any access to the
     connection struct and the UDP socket for the call work processor to be
     able to send the ACK.  Terminal retransmission will be handled by the
     connection processor.

 (9) Release all calls immediately on the closing of a socket rather than
     trying to defer this.  Incomplete calls will be aborted.

The call refcount model is much simplified.  Refs are held on the call by:

 (1) A socket's user ID tree.

 (2) A socket's incoming call secureq and acceptq.

 (3) A kernel service that has a call in progress.

 (4) A queued call work processor.  We have to take care to put any call
     that we failed to queue.

 (5) sk_buffs on a socket's receive queue.  A future patch will get rid of
     this.

Whilst we're at it, we can do:

 (1) Get rid of the RXRPC_CALL_EV_RELEASE event.  Release is now done
     entirely from the socket routines and never from the call's processor.

 (2) Get rid of the RXRPC_CALL_DEAD state.  Calls now end in the
     RXRPC_CALL_COMPLETE state.

 (3) Get rid of the rxrpc_call::destroyer work item.  Calls are now torn
     down when their refcount reaches 0 and then handed over to RCU for
     final cleanup.

 (4) Get rid of the rxrpc_call::deadspan timer.  Calls are cleaned up
     immediately they're finished with and don't hang around.
     Post-completion retransmission is handled by the connection processor
     once the call is disconnected.

 (5) Get rid of the dead call expiry setting as there's no longer a timer
     to set.

 (6) rxrpc_destroy_all_calls() can just check that the call list is empty.

Signed-off-by: David Howells <[email protected]>
  • Loading branch information
dhowells committed Sep 7, 2016
1 parent 6543ac5 commit 8d94aa3
Show file tree
Hide file tree
Showing 11 changed files with 303 additions and 279 deletions.
4 changes: 2 additions & 2 deletions net/rxrpc/af_rxrpc.c
Original file line number Diff line number Diff line change
Expand Up @@ -294,8 +294,7 @@ EXPORT_SYMBOL(rxrpc_kernel_begin_call);
void rxrpc_kernel_end_call(struct socket *sock, struct rxrpc_call *call)
{
_enter("%d{%d}", call->debug_id, atomic_read(&call->usage));
rxrpc_remove_user_ID(rxrpc_sk(sock->sk), call);
rxrpc_purge_queue(&call->knlrecv_queue);
rxrpc_release_call(rxrpc_sk(sock->sk), call);
rxrpc_put_call(call, rxrpc_call_put);
}
EXPORT_SYMBOL(rxrpc_kernel_end_call);
Expand Down Expand Up @@ -558,6 +557,7 @@ static int rxrpc_create(struct net *net, struct socket *sock, int protocol,
return -ENOMEM;

sock_init_data(sock, sk);
sock_set_flag(sk, SOCK_RCU_FREE);
sk->sk_state = RXRPC_UNBOUND;
sk->sk_write_space = rxrpc_write_space;
sk->sk_max_ack_backlog = 0;
Expand Down
15 changes: 5 additions & 10 deletions net/rxrpc/ar-internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -35,8 +35,6 @@ struct rxrpc_crypt {
#define rxrpc_queue_delayed_work(WS,D) \
queue_delayed_work(rxrpc_workqueue, (WS), (D))

#define rxrpc_queue_call(CALL) rxrpc_queue_work(&(CALL)->processor)

struct rxrpc_connection;

/*
Expand Down Expand Up @@ -397,7 +395,6 @@ enum rxrpc_call_event {
RXRPC_CALL_EV_ACCEPTED, /* incoming call accepted by userspace app */
RXRPC_CALL_EV_SECURED, /* incoming call's connection is now secure */
RXRPC_CALL_EV_POST_ACCEPT, /* need to post an "accept?" message to the app */
RXRPC_CALL_EV_RELEASE, /* need to release the call's resources */
};

/*
Expand All @@ -417,7 +414,6 @@ enum rxrpc_call_state {
RXRPC_CALL_SERVER_SEND_REPLY, /* - server sending reply */
RXRPC_CALL_SERVER_AWAIT_ACK, /* - server awaiting final ACK */
RXRPC_CALL_COMPLETE, /* - call complete */
RXRPC_CALL_DEAD, /* - call is dead */
NR__RXRPC_CALL_STATES
};

Expand All @@ -442,12 +438,10 @@ struct rxrpc_call {
struct rcu_head rcu;
struct rxrpc_connection *conn; /* connection carrying call */
struct rxrpc_peer *peer; /* Peer record for remote address */
struct rxrpc_sock *socket; /* socket responsible */
struct rxrpc_sock __rcu *socket; /* socket responsible */
struct timer_list lifetimer; /* lifetime remaining on call */
struct timer_list deadspan; /* reap timer for re-ACK'ing, etc */
struct timer_list ack_timer; /* ACK generation timer */
struct timer_list resend_timer; /* Tx resend timer */
struct work_struct destroyer; /* call destroyer */
struct work_struct processor; /* packet processor and ACK generator */
rxrpc_notify_rx_t notify_rx; /* kernel service Rx notification function */
struct list_head link; /* link in master call list */
Expand Down Expand Up @@ -558,7 +552,6 @@ void rxrpc_process_call(struct work_struct *);
extern const char *const rxrpc_call_states[];
extern const char *const rxrpc_call_completions[];
extern unsigned int rxrpc_max_call_lifetime;
extern unsigned int rxrpc_dead_call_expiry;
extern struct kmem_cache *rxrpc_call_jar;
extern struct list_head rxrpc_calls;
extern rwlock_t rxrpc_call_lock;
Expand All @@ -571,8 +564,10 @@ struct rxrpc_call *rxrpc_new_client_call(struct rxrpc_sock *,
struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *,
struct rxrpc_connection *,
struct sk_buff *);
void rxrpc_release_call(struct rxrpc_call *);
void rxrpc_release_call(struct rxrpc_sock *, struct rxrpc_call *);
void rxrpc_release_calls_on_socket(struct rxrpc_sock *);
bool __rxrpc_queue_call(struct rxrpc_call *);
bool rxrpc_queue_call(struct rxrpc_call *);
void rxrpc_see_call(struct rxrpc_call *);
void rxrpc_get_call(struct rxrpc_call *, enum rxrpc_call_trace);
void rxrpc_put_call(struct rxrpc_call *, enum rxrpc_call_trace);
Expand Down Expand Up @@ -835,6 +830,7 @@ extern const char *rxrpc_acks(u8 reason);
/*
* output.c
*/
int rxrpc_send_call_packet(struct rxrpc_call *, u8);
int rxrpc_send_data_packet(struct rxrpc_connection *, struct sk_buff *);

/*
Expand Down Expand Up @@ -880,7 +876,6 @@ extern const struct file_operations rxrpc_connection_seq_fops;
/*
* recvmsg.c
*/
void rxrpc_remove_user_ID(struct rxrpc_sock *, struct rxrpc_call *);
int rxrpc_recvmsg(struct socket *, struct msghdr *, size_t, int);

/*
Expand Down
55 changes: 15 additions & 40 deletions net/rxrpc/call_accept.c
Original file line number Diff line number Diff line change
Expand Up @@ -163,13 +163,7 @@ static int rxrpc_accept_incoming_call(struct rxrpc_local *local,
_debug("invalid");
read_unlock_bh(&local->services_lock);

read_lock_bh(&call->state_lock);
if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
!test_and_set_bit(RXRPC_CALL_EV_RELEASE, &call->events)) {
rxrpc_get_call(call, rxrpc_call_got);
rxrpc_queue_call(call);
}
read_unlock_bh(&call->state_lock);
rxrpc_release_call(rx, call);
rxrpc_put_call(call, rxrpc_call_put);
ret = -ECONNREFUSED;
error:
Expand Down Expand Up @@ -236,13 +230,11 @@ void rxrpc_accept_incoming_calls(struct rxrpc_local *local)
if (sk_acceptq_is_full(&rx->sk))
goto backlog_full;
sk_acceptq_added(&rx->sk);
sock_hold(&rx->sk);
read_unlock_bh(&local->services_lock);

ret = rxrpc_accept_incoming_call(local, rx, skb, &srx);
if (ret < 0)
sk_acceptq_removed(&rx->sk);
sock_put(&rx->sk);
switch (ret) {
case -ECONNRESET: /* old calls are ignored */
case -ECONNABORTED: /* aborted calls are reaborted or ignored */
Expand Down Expand Up @@ -333,9 +325,6 @@ struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx,
case RXRPC_CALL_COMPLETE:
ret = call->error;
goto out_release;
case RXRPC_CALL_DEAD:
ret = -ETIME;
goto out_discard;
default:
BUG();
}
Expand All @@ -350,24 +339,20 @@ struct rxrpc_call *rxrpc_accept_call(struct rxrpc_sock *rx,
BUG();
if (test_and_set_bit(RXRPC_CALL_EV_ACCEPTED, &call->events))
BUG();
rxrpc_queue_call(call);

write_unlock_bh(&call->state_lock);
write_unlock(&rx->call_lock);
rxrpc_queue_call(call);
_leave(" = %p{%d}", call, call->debug_id);
return call;

/* if the call is already dying or dead, then we leave the socket's ref
* on it to be released by rxrpc_dead_call_expired() as induced by
* rxrpc_release_call() */
out_release:
_debug("release %p", call);
if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
!test_and_set_bit(RXRPC_CALL_EV_RELEASE, &call->events))
rxrpc_queue_call(call);
out_discard:
write_unlock_bh(&call->state_lock);
_debug("discard %p", call);
write_unlock(&rx->call_lock);
_debug("release %p", call);
rxrpc_release_call(rx, call);
_leave(" = %d", ret);
return ERR_PTR(ret);
out:
write_unlock(&rx->call_lock);
_leave(" = %d", ret);
Expand All @@ -390,8 +375,11 @@ int rxrpc_reject_call(struct rxrpc_sock *rx)
write_lock(&rx->call_lock);

ret = -ENODATA;
if (list_empty(&rx->acceptq))
goto out;
if (list_empty(&rx->acceptq)) {
write_unlock(&rx->call_lock);
_leave(" = -ENODATA");
return -ENODATA;
}

/* dequeue the first call and check it's still valid */
call = list_entry(rx->acceptq.next, struct rxrpc_call, accept_link);
Expand All @@ -407,30 +395,17 @@ int rxrpc_reject_call(struct rxrpc_sock *rx)
if (test_and_set_bit(RXRPC_CALL_EV_REJECT_BUSY, &call->events))
rxrpc_queue_call(call);
ret = 0;
goto out_release;
break;
case RXRPC_CALL_COMPLETE:
ret = call->error;
goto out_release;
case RXRPC_CALL_DEAD:
ret = -ETIME;
goto out_discard;
break;
default:
BUG();
}

/* if the call is already dying or dead, then we leave the socket's ref
* on it to be released by rxrpc_dead_call_expired() as induced by
* rxrpc_release_call() */
out_release:
_debug("release %p", call);
if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
!test_and_set_bit(RXRPC_CALL_EV_RELEASE, &call->events))
rxrpc_queue_call(call);
out_discard:
write_unlock_bh(&call->state_lock);
_debug("discard %p", call);
out:
write_unlock(&rx->call_lock);
rxrpc_release_call(rx, call);
_leave(" = %d", ret);
return ret;
}
Expand Down
74 changes: 30 additions & 44 deletions net/rxrpc/call_event.c
Original file line number Diff line number Diff line change
Expand Up @@ -811,8 +811,9 @@ static int rxrpc_post_message(struct rxrpc_call *call, u32 mark, u32 error,
}

/*
* handle background processing of incoming call packets and ACK / abort
* generation
* Handle background processing of incoming call packets and ACK / abort
* generation. A ref on the call is donated to us by whoever queued the work
* item.
*/
void rxrpc_process_call(struct work_struct *work)
{
Expand All @@ -827,6 +828,7 @@ void rxrpc_process_call(struct work_struct *work)
unsigned long bits;
__be32 data, pad;
size_t len;
bool requeue = false;
int loop, nbit, ioc, ret, mtu;
u32 serial, abort_code = RX_PROTOCOL_ERROR;
u8 *acks = NULL;
Expand All @@ -838,6 +840,11 @@ void rxrpc_process_call(struct work_struct *work)
call->debug_id, rxrpc_call_states[call->state], call->events,
(jiffies - call->creation_jif) / (HZ / 10));

if (call->state >= RXRPC_CALL_COMPLETE) {
rxrpc_put_call(call, rxrpc_call_put);
return;
}

if (!call->conn)
goto skip_msg_init;

Expand Down Expand Up @@ -1088,16 +1095,21 @@ void rxrpc_process_call(struct work_struct *work)
spin_lock_bh(&call->lock);

if (call->state == RXRPC_CALL_SERVER_SECURING) {
struct rxrpc_sock *rx;
_debug("securing");
write_lock(&call->socket->call_lock);
if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
!test_bit(RXRPC_CALL_EV_RELEASE, &call->events)) {
_debug("not released");
call->state = RXRPC_CALL_SERVER_ACCEPTING;
list_move_tail(&call->accept_link,
&call->socket->acceptq);
rcu_read_lock();
rx = rcu_dereference(call->socket);
if (rx) {
write_lock(&rx->call_lock);
if (!test_bit(RXRPC_CALL_RELEASED, &call->flags)) {
_debug("not released");
call->state = RXRPC_CALL_SERVER_ACCEPTING;
list_move_tail(&call->accept_link,
&rx->acceptq);
}
write_unlock(&rx->call_lock);
}
write_unlock(&call->socket->call_lock);
rcu_read_unlock();
read_lock(&call->state_lock);
if (call->state < RXRPC_CALL_COMPLETE)
set_bit(RXRPC_CALL_EV_POST_ACCEPT, &call->events);
Expand Down Expand Up @@ -1139,11 +1151,6 @@ void rxrpc_process_call(struct work_struct *work)
goto maybe_reschedule;
}

if (test_bit(RXRPC_CALL_EV_RELEASE, &call->events)) {
rxrpc_release_call(call);
clear_bit(RXRPC_CALL_EV_RELEASE, &call->events);
}

/* other events may have been raised since we started checking */
goto maybe_reschedule;

Expand Down Expand Up @@ -1209,10 +1216,8 @@ void rxrpc_process_call(struct work_struct *work)
&msg, iov, ioc, len);
if (ret < 0) {
_debug("sendmsg failed: %d", ret);
read_lock_bh(&call->state_lock);
if (call->state < RXRPC_CALL_DEAD)
rxrpc_queue_call(call);
read_unlock_bh(&call->state_lock);
if (call->state < RXRPC_CALL_COMPLETE)
requeue = true;
goto error;
}

Expand Down Expand Up @@ -1245,41 +1250,22 @@ void rxrpc_process_call(struct work_struct *work)

kill_ACKs:
del_timer_sync(&call->ack_timer);
if (test_and_clear_bit(RXRPC_CALL_EV_ACK_FINAL, &call->events))
rxrpc_put_call(call, rxrpc_call_put);
clear_bit(RXRPC_CALL_EV_ACK, &call->events);

maybe_reschedule:
if (call->events || !skb_queue_empty(&call->rx_queue)) {
read_lock_bh(&call->state_lock);
if (call->state < RXRPC_CALL_DEAD)
rxrpc_queue_call(call);
read_unlock_bh(&call->state_lock);
}

/* don't leave aborted connections on the accept queue */
if (call->state >= RXRPC_CALL_COMPLETE &&
!list_empty(&call->accept_link)) {
_debug("X unlinking once-pending call %p { e=%lx f=%lx c=%x }",
call, call->events, call->flags, call->conn->proto.cid);

read_lock_bh(&call->state_lock);
if (!test_bit(RXRPC_CALL_RELEASED, &call->flags) &&
!test_and_set_bit(RXRPC_CALL_EV_RELEASE, &call->events))
rxrpc_queue_call(call);
read_unlock_bh(&call->state_lock);
if (call->state < RXRPC_CALL_COMPLETE)
requeue = true;
}

error:
kfree(acks);

/* because we don't want two CPUs both processing the work item for one
* call at the same time, we use a flag to note when it's busy; however
* this means there's a race between clearing the flag and setting the
* work pending bit and the work item being processed again */
if (call->events && !work_pending(&call->processor)) {
if ((requeue || call->events) && !work_pending(&call->processor)) {
_debug("jumpstart %x", call->conn->proto.cid);
rxrpc_queue_call(call);
__rxrpc_queue_call(call);
} else {
rxrpc_put_call(call, rxrpc_call_put);
}

_leave("");
Expand Down
Loading

0 comments on commit 8d94aa3

Please sign in to comment.