1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48#include "xprt_rdma.h"
49
50#include <linux/highmem.h>
51
52#ifdef RPC_DEBUG
53# define RPCDBG_FACILITY RPCDBG_TRANS
54#endif
55
56enum rpcrdma_chunktype {
57 rpcrdma_noch = 0,
58 rpcrdma_readch,
59 rpcrdma_areadch,
60 rpcrdma_writech,
61 rpcrdma_replych
62};
63
64#ifdef RPC_DEBUG
65static const char transfertypes[][12] = {
66 "pure inline",
67 " read chunk",
68 "*read chunk",
69 "write chunk",
70 "reply chunk"
71};
72#endif
73
74
75
76
77
78
79
80
81
82
83
84
85static int
86rpcrdma_convert_iovs(struct xdr_buf *xdrbuf, unsigned int pos,
87 enum rpcrdma_chunktype type, struct rpcrdma_mr_seg *seg, int nsegs)
88{
89 int len, n = 0, p;
90
91 if (pos == 0 && xdrbuf->head[0].iov_len) {
92 seg[n].mr_page = NULL;
93 seg[n].mr_offset = xdrbuf->head[0].iov_base;
94 seg[n].mr_len = xdrbuf->head[0].iov_len;
95 ++n;
96 }
97
98 if (xdrbuf->page_len && (xdrbuf->pages[0] != NULL)) {
99 if (n == nsegs)
100 return 0;
101 seg[n].mr_page = xdrbuf->pages[0];
102 seg[n].mr_offset = (void *)(unsigned long) xdrbuf->page_base;
103 seg[n].mr_len = min_t(u32,
104 PAGE_SIZE - xdrbuf->page_base, xdrbuf->page_len);
105 len = xdrbuf->page_len - seg[n].mr_len;
106 ++n;
107 p = 1;
108 while (len > 0) {
109 if (n == nsegs)
110 return 0;
111 seg[n].mr_page = xdrbuf->pages[p];
112 seg[n].mr_offset = NULL;
113 seg[n].mr_len = min_t(u32, PAGE_SIZE, len);
114 len -= seg[n].mr_len;
115 ++n;
116 ++p;
117 }
118 }
119
120 if (xdrbuf->tail[0].iov_len) {
121
122
123 if (xdrbuf->tail[0].iov_len < 4 && xprt_rdma_pad_optimize)
124 return n;
125 if (n == nsegs)
126 return 0;
127 seg[n].mr_page = NULL;
128 seg[n].mr_offset = xdrbuf->tail[0].iov_base;
129 seg[n].mr_len = xdrbuf->tail[0].iov_len;
130 ++n;
131 }
132
133 return n;
134}
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169static unsigned int
170rpcrdma_create_chunks(struct rpc_rqst *rqst, struct xdr_buf *target,
171 struct rpcrdma_msg *headerp, enum rpcrdma_chunktype type)
172{
173 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
174 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_task->tk_xprt);
175 int nsegs, nchunks = 0;
176 unsigned int pos;
177 struct rpcrdma_mr_seg *seg = req->rl_segments;
178 struct rpcrdma_read_chunk *cur_rchunk = NULL;
179 struct rpcrdma_write_array *warray = NULL;
180 struct rpcrdma_write_chunk *cur_wchunk = NULL;
181 __be32 *iptr = headerp->rm_body.rm_chunks;
182
183 if (type == rpcrdma_readch || type == rpcrdma_areadch) {
184
185 cur_rchunk = (struct rpcrdma_read_chunk *) iptr;
186 } else {
187
188 *iptr++ = xdr_zero;
189 if (type == rpcrdma_replych)
190 *iptr++ = xdr_zero;
191 warray = (struct rpcrdma_write_array *) iptr;
192 cur_wchunk = (struct rpcrdma_write_chunk *) (warray + 1);
193 }
194
195 if (type == rpcrdma_replych || type == rpcrdma_areadch)
196 pos = 0;
197 else
198 pos = target->head[0].iov_len;
199
200 nsegs = rpcrdma_convert_iovs(target, pos, type, seg, RPCRDMA_MAX_SEGS);
201 if (nsegs == 0)
202 return 0;
203
204 do {
205
206 int n = rpcrdma_register_external(seg, nsegs,
207 cur_wchunk != NULL, r_xprt);
208 if (n <= 0)
209 goto out;
210 if (cur_rchunk) {
211 cur_rchunk->rc_discrim = xdr_one;
212
213 cur_rchunk->rc_position = htonl(pos);
214 cur_rchunk->rc_target.rs_handle = htonl(seg->mr_rkey);
215 cur_rchunk->rc_target.rs_length = htonl(seg->mr_len);
216 xdr_encode_hyper(
217 (__be32 *)&cur_rchunk->rc_target.rs_offset,
218 seg->mr_base);
219 dprintk("RPC: %s: read chunk "
220 "elem %d@0x%llx:0x%x pos %u (%s)\n", __func__,
221 seg->mr_len, (unsigned long long)seg->mr_base,
222 seg->mr_rkey, pos, n < nsegs ? "more" : "last");
223 cur_rchunk++;
224 r_xprt->rx_stats.read_chunk_count++;
225 } else {
226 cur_wchunk->wc_target.rs_handle = htonl(seg->mr_rkey);
227 cur_wchunk->wc_target.rs_length = htonl(seg->mr_len);
228 xdr_encode_hyper(
229 (__be32 *)&cur_wchunk->wc_target.rs_offset,
230 seg->mr_base);
231 dprintk("RPC: %s: %s chunk "
232 "elem %d@0x%llx:0x%x (%s)\n", __func__,
233 (type == rpcrdma_replych) ? "reply" : "write",
234 seg->mr_len, (unsigned long long)seg->mr_base,
235 seg->mr_rkey, n < nsegs ? "more" : "last");
236 cur_wchunk++;
237 if (type == rpcrdma_replych)
238 r_xprt->rx_stats.reply_chunk_count++;
239 else
240 r_xprt->rx_stats.write_chunk_count++;
241 r_xprt->rx_stats.total_rdma_request += seg->mr_len;
242 }
243 nchunks++;
244 seg += n;
245 nsegs -= n;
246 } while (nsegs);
247
248
249 req->rl_nchunks = nchunks;
250
251 BUG_ON(nchunks == 0);
252
253
254
255
256 if (cur_rchunk) {
257 iptr = (__be32 *) cur_rchunk;
258 *iptr++ = xdr_zero;
259 *iptr++ = xdr_zero;
260 *iptr++ = xdr_zero;
261 } else {
262 warray->wc_discrim = xdr_one;
263 warray->wc_nchunks = htonl(nchunks);
264 iptr = (__be32 *) cur_wchunk;
265 if (type == rpcrdma_writech) {
266 *iptr++ = xdr_zero;
267 *iptr++ = xdr_zero;
268 }
269 }
270
271
272
273
274 return (unsigned char *)iptr - (unsigned char *)headerp;
275
276out:
277 for (pos = 0; nchunks--;)
278 pos += rpcrdma_deregister_external(
279 &req->rl_segments[pos], r_xprt, NULL);
280 return 0;
281}
282
283
284
285
286
287
288
289
290static int
291rpcrdma_inline_pullup(struct rpc_rqst *rqst, int pad)
292{
293 int i, npages, curlen;
294 int copy_len;
295 unsigned char *srcp, *destp;
296 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
297
298 destp = rqst->rq_svec[0].iov_base;
299 curlen = rqst->rq_svec[0].iov_len;
300 destp += curlen;
301
302
303
304
305 pad -= (curlen + 36);
306 if (pad < 0 || rqst->rq_slen - curlen < RPCRDMA_INLINE_PAD_THRESH)
307 pad = 0;
308
309 dprintk("RPC: %s: pad %d destp 0x%p len %d hdrlen %d\n",
310 __func__, pad, destp, rqst->rq_slen, curlen);
311
312 copy_len = rqst->rq_snd_buf.page_len;
313 r_xprt->rx_stats.pullup_copy_count += copy_len;
314 npages = PAGE_ALIGN(rqst->rq_snd_buf.page_base+copy_len) >> PAGE_SHIFT;
315 for (i = 0; copy_len && i < npages; i++) {
316 if (i == 0)
317 curlen = PAGE_SIZE - rqst->rq_snd_buf.page_base;
318 else
319 curlen = PAGE_SIZE;
320 if (curlen > copy_len)
321 curlen = copy_len;
322 dprintk("RPC: %s: page %d destp 0x%p len %d curlen %d\n",
323 __func__, i, destp, copy_len, curlen);
324 srcp = kmap_atomic(rqst->rq_snd_buf.pages[i],
325 KM_SKB_SUNRPC_DATA);
326 if (i == 0)
327 memcpy(destp, srcp+rqst->rq_snd_buf.page_base, curlen);
328 else
329 memcpy(destp, srcp, curlen);
330 kunmap_atomic(srcp, KM_SKB_SUNRPC_DATA);
331 rqst->rq_svec[0].iov_len += curlen;
332 destp += curlen;
333 copy_len -= curlen;
334 }
335 if (rqst->rq_snd_buf.tail[0].iov_len) {
336 curlen = rqst->rq_snd_buf.tail[0].iov_len;
337 if (destp != rqst->rq_snd_buf.tail[0].iov_base) {
338 memcpy(destp,
339 rqst->rq_snd_buf.tail[0].iov_base, curlen);
340 r_xprt->rx_stats.pullup_copy_count += curlen;
341 }
342 dprintk("RPC: %s: tail destp 0x%p len %d curlen %d\n",
343 __func__, destp, copy_len, curlen);
344 rqst->rq_svec[0].iov_len += curlen;
345 }
346
347 return pad;
348}
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363int
364rpcrdma_marshal_req(struct rpc_rqst *rqst)
365{
366 struct rpc_xprt *xprt = rqst->rq_task->tk_xprt;
367 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
368 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
369 char *base;
370 size_t hdrlen, rpclen, padlen;
371 enum rpcrdma_chunktype rtype, wtype;
372 struct rpcrdma_msg *headerp;
373
374
375
376
377
378 base = rqst->rq_svec[0].iov_base;
379 rpclen = rqst->rq_svec[0].iov_len;
380
381
382 headerp = (struct rpcrdma_msg *) req->rl_base;
383
384 headerp->rm_xid = rqst->rq_xid;
385 headerp->rm_vers = xdr_one;
386 headerp->rm_credit = htonl(r_xprt->rx_buf.rb_max_requests);
387 headerp->rm_type = htonl(RDMA_MSG);
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408 if (rqst->rq_rcv_buf.buflen <= RPCRDMA_INLINE_READ_THRESHOLD(rqst))
409 wtype = rpcrdma_noch;
410 else if (rqst->rq_rcv_buf.page_len == 0)
411 wtype = rpcrdma_replych;
412 else if (rqst->rq_rcv_buf.flags & XDRBUF_READ)
413 wtype = rpcrdma_writech;
414 else
415 wtype = rpcrdma_replych;
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431 if (rqst->rq_snd_buf.len <= RPCRDMA_INLINE_WRITE_THRESHOLD(rqst))
432 rtype = rpcrdma_noch;
433 else if (rqst->rq_snd_buf.page_len == 0)
434 rtype = rpcrdma_areadch;
435 else
436 rtype = rpcrdma_readch;
437
438
439 if (rtype != rpcrdma_noch && wtype == rpcrdma_replych)
440 wtype = rpcrdma_noch;
441 BUG_ON(rtype != rpcrdma_noch && wtype != rpcrdma_noch);
442
443 if (r_xprt->rx_ia.ri_memreg_strategy == RPCRDMA_BOUNCEBUFFERS &&
444 (rtype != rpcrdma_noch || wtype != rpcrdma_noch)) {
445
446 dprintk("RPC: %s: too much data (%d/%d) for inline\n",
447 __func__, rqst->rq_rcv_buf.len, rqst->rq_snd_buf.len);
448 return -1;
449 }
450
451 hdrlen = 28;
452 padlen = 0;
453
454
455
456
457
458
459 if (rtype == rpcrdma_noch) {
460
461 padlen = rpcrdma_inline_pullup(rqst,
462 RPCRDMA_INLINE_PAD_VALUE(rqst));
463
464 if (padlen) {
465 headerp->rm_type = htonl(RDMA_MSGP);
466 headerp->rm_body.rm_padded.rm_align =
467 htonl(RPCRDMA_INLINE_PAD_VALUE(rqst));
468 headerp->rm_body.rm_padded.rm_thresh =
469 htonl(RPCRDMA_INLINE_PAD_THRESH);
470 headerp->rm_body.rm_padded.rm_pempty[0] = xdr_zero;
471 headerp->rm_body.rm_padded.rm_pempty[1] = xdr_zero;
472 headerp->rm_body.rm_padded.rm_pempty[2] = xdr_zero;
473 hdrlen += 2 * sizeof(u32);
474 BUG_ON(wtype != rpcrdma_noch);
475
476 } else {
477 headerp->rm_body.rm_nochunks.rm_empty[0] = xdr_zero;
478 headerp->rm_body.rm_nochunks.rm_empty[1] = xdr_zero;
479 headerp->rm_body.rm_nochunks.rm_empty[2] = xdr_zero;
480
481 rpclen = rqst->rq_svec[0].iov_len;
482
483
484
485
486
487
488
489
490
491
492 if (wtype == rpcrdma_noch &&
493 r_xprt->rx_ia.ri_memreg_strategy > RPCRDMA_REGISTER)
494 wtype = rpcrdma_replych;
495 }
496 }
497
498
499
500
501
502 if (rtype != rpcrdma_noch) {
503 hdrlen = rpcrdma_create_chunks(rqst,
504 &rqst->rq_snd_buf, headerp, rtype);
505 wtype = rtype;
506
507 } else if (wtype != rpcrdma_noch) {
508 hdrlen = rpcrdma_create_chunks(rqst,
509 &rqst->rq_rcv_buf, headerp, wtype);
510 }
511
512 if (hdrlen == 0)
513 return -1;
514
515 dprintk("RPC: %s: %s: hdrlen %zd rpclen %zd padlen %zd"
516 " headerp 0x%p base 0x%p lkey 0x%x\n",
517 __func__, transfertypes[wtype], hdrlen, rpclen, padlen,
518 headerp, base, req->rl_iov.lkey);
519
520
521
522
523
524
525
526
527 req->rl_send_iov[0].addr = req->rl_iov.addr;
528 req->rl_send_iov[0].length = hdrlen;
529 req->rl_send_iov[0].lkey = req->rl_iov.lkey;
530
531 req->rl_send_iov[1].addr = req->rl_iov.addr + (base - req->rl_base);
532 req->rl_send_iov[1].length = rpclen;
533 req->rl_send_iov[1].lkey = req->rl_iov.lkey;
534
535 req->rl_niovs = 2;
536
537 if (padlen) {
538 struct rpcrdma_ep *ep = &r_xprt->rx_ep;
539
540 req->rl_send_iov[2].addr = ep->rep_pad.addr;
541 req->rl_send_iov[2].length = padlen;
542 req->rl_send_iov[2].lkey = ep->rep_pad.lkey;
543
544 req->rl_send_iov[3].addr = req->rl_send_iov[1].addr + rpclen;
545 req->rl_send_iov[3].length = rqst->rq_slen - rpclen;
546 req->rl_send_iov[3].lkey = req->rl_iov.lkey;
547
548 req->rl_niovs = 4;
549 }
550
551 return 0;
552}
553
554
555
556
557
558static int
559rpcrdma_count_chunks(struct rpcrdma_rep *rep, unsigned int max, int wrchunk, __be32 **iptrp)
560{
561 unsigned int i, total_len;
562 struct rpcrdma_write_chunk *cur_wchunk;
563
564 i = ntohl(**iptrp);
565 if (i > max)
566 return -1;
567 cur_wchunk = (struct rpcrdma_write_chunk *) (*iptrp + 1);
568 total_len = 0;
569 while (i--) {
570 struct rpcrdma_segment *seg = &cur_wchunk->wc_target;
571 ifdebug(FACILITY) {
572 u64 off;
573 xdr_decode_hyper((__be32 *)&seg->rs_offset, &off);
574 dprintk("RPC: %s: chunk %d@0x%llx:0x%x\n",
575 __func__,
576 ntohl(seg->rs_length),
577 (unsigned long long)off,
578 ntohl(seg->rs_handle));
579 }
580 total_len += ntohl(seg->rs_length);
581 ++cur_wchunk;
582 }
583
584 if (wrchunk) {
585 __be32 *w = (__be32 *) cur_wchunk;
586 if (*w++ != xdr_zero)
587 return -1;
588 cur_wchunk = (struct rpcrdma_write_chunk *) w;
589 }
590 if ((char *) cur_wchunk > rep->rr_base + rep->rr_len)
591 return -1;
592
593 *iptrp = (__be32 *) cur_wchunk;
594 return total_len;
595}
596
597
598
599
600static void
601rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
602{
603 int i, npages, curlen, olen;
604 char *destp;
605
606 curlen = rqst->rq_rcv_buf.head[0].iov_len;
607 if (curlen > copy_len) {
608 curlen = copy_len;
609 rqst->rq_rcv_buf.head[0].iov_len = curlen;
610 }
611
612 dprintk("RPC: %s: srcp 0x%p len %d hdrlen %d\n",
613 __func__, srcp, copy_len, curlen);
614
615
616 rqst->rq_rcv_buf.head[0].iov_base = srcp;
617 srcp += curlen;
618 copy_len -= curlen;
619
620 olen = copy_len;
621 i = 0;
622 rpcx_to_rdmax(rqst->rq_xprt)->rx_stats.fixup_copy_count += olen;
623 if (copy_len && rqst->rq_rcv_buf.page_len) {
624 npages = PAGE_ALIGN(rqst->rq_rcv_buf.page_base +
625 rqst->rq_rcv_buf.page_len) >> PAGE_SHIFT;
626 for (; i < npages; i++) {
627 if (i == 0)
628 curlen = PAGE_SIZE - rqst->rq_rcv_buf.page_base;
629 else
630 curlen = PAGE_SIZE;
631 if (curlen > copy_len)
632 curlen = copy_len;
633 dprintk("RPC: %s: page %d"
634 " srcp 0x%p len %d curlen %d\n",
635 __func__, i, srcp, copy_len, curlen);
636 destp = kmap_atomic(rqst->rq_rcv_buf.pages[i],
637 KM_SKB_SUNRPC_DATA);
638 if (i == 0)
639 memcpy(destp + rqst->rq_rcv_buf.page_base,
640 srcp, curlen);
641 else
642 memcpy(destp, srcp, curlen);
643 flush_dcache_page(rqst->rq_rcv_buf.pages[i]);
644 kunmap_atomic(destp, KM_SKB_SUNRPC_DATA);
645 srcp += curlen;
646 copy_len -= curlen;
647 if (copy_len == 0)
648 break;
649 }
650 rqst->rq_rcv_buf.page_len = olen - copy_len;
651 } else
652 rqst->rq_rcv_buf.page_len = 0;
653
654 if (copy_len && rqst->rq_rcv_buf.tail[0].iov_len) {
655 curlen = copy_len;
656 if (curlen > rqst->rq_rcv_buf.tail[0].iov_len)
657 curlen = rqst->rq_rcv_buf.tail[0].iov_len;
658 if (rqst->rq_rcv_buf.tail[0].iov_base != srcp)
659 memcpy(rqst->rq_rcv_buf.tail[0].iov_base, srcp, curlen);
660 dprintk("RPC: %s: tail srcp 0x%p len %d curlen %d\n",
661 __func__, srcp, copy_len, curlen);
662 rqst->rq_rcv_buf.tail[0].iov_len = curlen;
663 copy_len -= curlen; ++i;
664 } else
665 rqst->rq_rcv_buf.tail[0].iov_len = 0;
666
667 if (pad) {
668
669 unsigned char *p = rqst->rq_rcv_buf.tail[0].iov_base;
670 while (pad--)
671 p[rqst->rq_rcv_buf.tail[0].iov_len++] = 0;
672 }
673
674 if (copy_len)
675 dprintk("RPC: %s: %d bytes in"
676 " %d extra segments (%d lost)\n",
677 __func__, olen, i, copy_len);
678
679
680 rqst->rq_private_buf = rqst->rq_rcv_buf;
681}
682
683
684
685
686
687
688
689void
690rpcrdma_conn_func(struct rpcrdma_ep *ep)
691{
692 struct rpc_xprt *xprt = ep->rep_xprt;
693
694 spin_lock_bh(&xprt->transport_lock);
695 if (++xprt->connect_cookie == 0)
696 ++xprt->connect_cookie;
697 if (ep->rep_connected > 0) {
698 if (!xprt_test_and_set_connected(xprt))
699 xprt_wake_pending_tasks(xprt, 0);
700 } else {
701 if (xprt_test_and_clear_connected(xprt))
702 xprt_wake_pending_tasks(xprt, -ENOTCONN);
703 }
704 spin_unlock_bh(&xprt->transport_lock);
705}
706
707
708
709
710
711static void
712rpcrdma_unbind_func(struct rpcrdma_rep *rep)
713{
714 wake_up(&rep->rr_unbind);
715}
716
717
718
719
720
721
722void
723rpcrdma_reply_handler(struct rpcrdma_rep *rep)
724{
725 struct rpcrdma_msg *headerp;
726 struct rpcrdma_req *req;
727 struct rpc_rqst *rqst;
728 struct rpc_xprt *xprt = rep->rr_xprt;
729 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
730 __be32 *iptr;
731 int i, rdmalen, status;
732
733
734 if (rep->rr_len == ~0U) {
735 rpcrdma_recv_buffer_put(rep);
736 if (r_xprt->rx_ep.rep_connected == 1) {
737 r_xprt->rx_ep.rep_connected = -EIO;
738 rpcrdma_conn_func(&r_xprt->rx_ep);
739 }
740 return;
741 }
742 if (rep->rr_len < 28) {
743 dprintk("RPC: %s: short/invalid reply\n", __func__);
744 goto repost;
745 }
746 headerp = (struct rpcrdma_msg *) rep->rr_base;
747 if (headerp->rm_vers != xdr_one) {
748 dprintk("RPC: %s: invalid version %d\n",
749 __func__, ntohl(headerp->rm_vers));
750 goto repost;
751 }
752
753
754 spin_lock(&xprt->transport_lock);
755 rqst = xprt_lookup_rqst(xprt, headerp->rm_xid);
756 if (rqst == NULL) {
757 spin_unlock(&xprt->transport_lock);
758 dprintk("RPC: %s: reply 0x%p failed "
759 "to match any request xid 0x%08x len %d\n",
760 __func__, rep, headerp->rm_xid, rep->rr_len);
761repost:
762 r_xprt->rx_stats.bad_reply_count++;
763 rep->rr_func = rpcrdma_reply_handler;
764 if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, &r_xprt->rx_ep, rep))
765 rpcrdma_recv_buffer_put(rep);
766
767 return;
768 }
769
770
771 req = rpcr_to_rdmar(rqst);
772
773 dprintk("RPC: %s: reply 0x%p completes request 0x%p\n"
774 " RPC request 0x%p xid 0x%08x\n",
775 __func__, rep, req, rqst, headerp->rm_xid);
776
777 BUG_ON(!req || req->rl_reply);
778
779
780 req->rl_reply = rep;
781
782
783
784 switch (headerp->rm_type) {
785 case htonl(RDMA_MSG):
786
787
788
789 if (headerp->rm_body.rm_chunks[0] != xdr_zero ||
790 (headerp->rm_body.rm_chunks[1] == xdr_zero &&
791 headerp->rm_body.rm_chunks[2] != xdr_zero) ||
792 (headerp->rm_body.rm_chunks[1] != xdr_zero &&
793 req->rl_nchunks == 0))
794 goto badheader;
795 if (headerp->rm_body.rm_chunks[1] != xdr_zero) {
796
797
798 iptr = &headerp->rm_body.rm_chunks[2];
799 rdmalen = rpcrdma_count_chunks(rep,
800 req->rl_nchunks, 1, &iptr);
801
802 if (rdmalen < 0 || *iptr++ != xdr_zero)
803 goto badheader;
804 rep->rr_len -=
805 ((unsigned char *)iptr - (unsigned char *)headerp);
806 status = rep->rr_len + rdmalen;
807 r_xprt->rx_stats.total_rdma_reply += rdmalen;
808
809 if (rdmalen &= 3) {
810 rdmalen = 4 - rdmalen;
811 status += rdmalen;
812 }
813 } else {
814
815 rdmalen = 0;
816 iptr = (__be32 *)((unsigned char *)headerp + 28);
817 rep->rr_len -= 28;
818 status = rep->rr_len;
819 }
820
821 rpcrdma_inline_fixup(rqst, (char *)iptr, rep->rr_len, rdmalen);
822 break;
823
824 case htonl(RDMA_NOMSG):
825
826 if (headerp->rm_body.rm_chunks[0] != xdr_zero ||
827 headerp->rm_body.rm_chunks[1] != xdr_zero ||
828 headerp->rm_body.rm_chunks[2] != xdr_one ||
829 req->rl_nchunks == 0)
830 goto badheader;
831 iptr = (__be32 *)((unsigned char *)headerp + 28);
832 rdmalen = rpcrdma_count_chunks(rep, req->rl_nchunks, 0, &iptr);
833 if (rdmalen < 0)
834 goto badheader;
835 r_xprt->rx_stats.total_rdma_reply += rdmalen;
836
837 status = rdmalen;
838 break;
839
840badheader:
841 default:
842 dprintk("%s: invalid rpcrdma reply header (type %d):"
843 " chunks[012] == %d %d %d"
844 " expected chunks <= %d\n",
845 __func__, ntohl(headerp->rm_type),
846 headerp->rm_body.rm_chunks[0],
847 headerp->rm_body.rm_chunks[1],
848 headerp->rm_body.rm_chunks[2],
849 req->rl_nchunks);
850 status = -EIO;
851 r_xprt->rx_stats.bad_reply_count++;
852 break;
853 }
854
855
856
857 if (req->rl_nchunks) switch (r_xprt->rx_ia.ri_memreg_strategy) {
858 case RPCRDMA_MEMWINDOWS:
859 for (i = 0; req->rl_nchunks-- > 1;)
860 i += rpcrdma_deregister_external(
861 &req->rl_segments[i], r_xprt, NULL);
862
863 rep->rr_func = rpcrdma_unbind_func;
864 (void) rpcrdma_deregister_external(&req->rl_segments[i],
865 r_xprt, rep);
866 break;
867 case RPCRDMA_MEMWINDOWS_ASYNC:
868 for (i = 0; req->rl_nchunks--;)
869 i += rpcrdma_deregister_external(&req->rl_segments[i],
870 r_xprt, NULL);
871 break;
872 default:
873 break;
874 }
875
876 dprintk("RPC: %s: xprt_complete_rqst(0x%p, 0x%p, %d)\n",
877 __func__, xprt, rqst, status);
878 xprt_complete_rqst(rqst->rq_task, status);
879 spin_unlock(&xprt->transport_lock);
880}