Skip to content

Commit

Permalink
xprtrdma: Move credit update to RPC reply handler
Browse files Browse the repository at this point in the history
Reduce work in the receive CQ handler, which can be run at hardware
interrupt level, by moving the RPC/RDMA credit update logic to the
RPC reply handler.

This has some additional benefits: More header sanity checking is
done before trusting the incoming credit value, and the receive CQ
handler no longer touches the RPC/RDMA header (the CPU stalls while
waiting for the header contents to be brought into the cache).

This further extends work begun by commit e7ce710 ("xprtrdma:
Avoid deadlock when credit window is reset").

Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Reviewed-by: Steve Wise <swise@opengridcomputing.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
  • Loading branch information
chucklever authored and amschuma-ntap committed Jan 30, 2015
1 parent 3eb3581 commit eba8ff6
Show file tree
Hide file tree
Showing 3 changed files with 10 additions and 16 deletions.
10 changes: 8 additions & 2 deletions net/sunrpc/xprtrdma/rpc_rdma.c
Original file line number Diff line number Diff line change
Expand Up @@ -736,7 +736,7 @@ rpcrdma_reply_handler(struct rpcrdma_rep *rep)
struct rpc_xprt *xprt = rep->rr_xprt;
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
__be32 *iptr;
int rdmalen, status;
int credits, rdmalen, status;
unsigned long cwnd;

/* Check status. If bad, signal disconnect and return rep to pool */
Expand Down Expand Up @@ -871,8 +871,14 @@ rpcrdma_reply_handler(struct rpcrdma_rep *rep)
break;
}

credits = be32_to_cpu(headerp->rm_credit);
if (credits == 0)
credits = 1; /* don't deadlock */
else if (credits > r_xprt->rx_buf.rb_max_requests)
credits = r_xprt->rx_buf.rb_max_requests;

cwnd = xprt->cwnd;
xprt->cwnd = atomic_read(&r_xprt->rx_buf.rb_credits) << RPC_CWNDSHIFT;
xprt->cwnd = credits << RPC_CWNDSHIFT;
if (xprt->cwnd > cwnd)
xprt_release_rqst_cong(rqst->rq_task);

Expand Down
15 changes: 2 additions & 13 deletions net/sunrpc/xprtrdma/verbs.c
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@

#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/prefetch.h>
#include <asm/bitops.h>

#include "xprt_rdma.h"
Expand Down Expand Up @@ -298,17 +299,7 @@ rpcrdma_recvcq_process_wc(struct ib_wc *wc, struct list_head *sched_list)
rep->rr_len = wc->byte_len;
ib_dma_sync_single_for_cpu(rdmab_to_ia(rep->rr_buffer)->ri_id->device,
rep->rr_iov.addr, rep->rr_len, DMA_FROM_DEVICE);

if (rep->rr_len >= 16) {
struct rpcrdma_msg *p = (struct rpcrdma_msg *)rep->rr_base;
unsigned int credits = ntohl(p->rm_credit);

if (credits == 0)
credits = 1; /* don't deadlock */
else if (credits > rep->rr_buffer->rb_max_requests)
credits = rep->rr_buffer->rb_max_requests;
atomic_set(&rep->rr_buffer->rb_credits, credits);
}
prefetch(rep->rr_base);

out_schedule:
list_add_tail(&rep->rr_list, sched_list);
Expand Down Expand Up @@ -480,7 +471,6 @@ rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event)
case RDMA_CM_EVENT_DEVICE_REMOVAL:
connstate = -ENODEV;
connected:
atomic_set(&rpcx_to_rdmax(ep->rep_xprt)->rx_buf.rb_credits, 1);
dprintk("RPC: %s: %sconnected\n",
__func__, connstate > 0 ? "" : "dis");
ep->rep_connected = connstate;
Expand Down Expand Up @@ -1186,7 +1176,6 @@ rpcrdma_buffer_create(struct rpcrdma_buffer *buf, struct rpcrdma_ep *ep,

buf->rb_max_requests = cdata->max_requests;
spin_lock_init(&buf->rb_lock);
atomic_set(&buf->rb_credits, 1);

/* Need to allocate:
* 1. arrays for send and recv pointers
Expand Down
1 change: 0 additions & 1 deletion net/sunrpc/xprtrdma/xprt_rdma.h
Original file line number Diff line number Diff line change
Expand Up @@ -248,7 +248,6 @@ struct rpcrdma_req {
*/
struct rpcrdma_buffer {
spinlock_t rb_lock; /* protects indexes */
atomic_t rb_credits; /* most recent server credits */
int rb_max_requests;/* client max requests */
struct list_head rb_mws; /* optional memory windows/fmrs/frmrs */
struct list_head rb_all;
Expand Down

0 comments on commit eba8ff6

Please sign in to comment.