1 /*
2  * Copyright (c) 2014, Cisco Systems, Inc. All rights reserved.
3  *
4  * LICENSE_BEGIN
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
28  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
29  * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
30  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
31  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
32  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
33  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
35  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  *
38  * LICENSE_END
39  *
40  *
41  */
42 
43 #include <stdlib.h>
44 #include <stdio.h>
45 #include <string.h>
46 #include <errno.h>
47 #include <sys/mman.h>
48 #include <fcntl.h>
49 #include <unistd.h>
50 
51 #include <netinet/in.h>
52 #include <infiniband/verbs.h>
53 
54 #include "kcompat.h"
55 #include "cq_enet_desc.h"
56 #include "wq_enet_desc.h"
57 #include "rq_enet_desc.h"
58 
59 #include "usnic_abi.h"
60 #include "usnic_direct.h"
61 #include "usd.h"
62 #include "usd_ib_cmd.h"
63 #include "usd_util.h"
64 #include "usd_vnic.h"
65 #include "usd_device.h"
66 
67 static int usd_create_qp_ud(struct usd_qp_impl *qp);
68 
69 /*
70  * Remove a usecount on a VF, free it if it goes to zero
71  */
72 static void
usd_unmap_vf(struct usd_device * dev,struct usd_vf * vf)73 usd_unmap_vf(
74     struct usd_device *dev,
75     struct usd_vf *vf)
76 {
77     uint32_t i;
78     --vf->vf_refcnt;
79 
80     if (vf->vf_refcnt == 0) {
81 
82         /* unlink from list (logic works for uninit struct also) */
83         if (vf->vf_next != NULL)
84             vf->vf_next->vf_prev = vf->vf_prev;
85         if (vf->vf_prev != NULL)
86             vf->vf_prev->vf_next = vf->vf_next;
87         if (dev->ud_vf_list == vf)
88             dev->ud_vf_list = vf->vf_next;
89 
90         if (vf->vf_vdev != NULL)
91             vnic_dev_unregister(vf->vf_vdev);
92         if (vf->vf_bar0.vaddr != MAP_FAILED) {
93             munmap(vf->vf_bar0.vaddr, vf->vf_bar_map_len);
94         }
95         for (i = 0; i < sizeof(vf->iomaps)/sizeof(vf->iomaps[0]); i++) {
96             if (vf->iomaps[i].bus_addr != 0 &&
97                     vf->iomaps[i].vaddr != MAP_FAILED) {
98                 munmap(vf->iomaps[i].vaddr, vf->iomaps[i].len);
99             }
100         }
101 
102         free(vf);
103     }
104 }
105 
106 static int
usd_map_one_res(struct usd_device * dev,struct usd_vf * vf,struct usnic_vnic_barres_info * barres)107 usd_map_one_res(struct usd_device *dev, struct usd_vf *vf,
108                     struct usnic_vnic_barres_info *barres)
109 {
110     struct vnic_dev_iomap_info* iomap;
111     off64_t offset;
112     uint64_t page_size = sysconf(_SC_PAGE_SIZE);
113 
114     iomap = &vf->iomaps[barres->type];
115     iomap->bus_addr = barres->bus_addr;
116     iomap->len = (barres->len + (page_size - 1)) & (~(page_size - 1));
117 
118     offset = USNIC_ENCODE_PGOFF(vf->vf_id, USNIC_MMAP_RES, barres->type);
119     iomap->vaddr = mmap64(NULL, iomap->len, PROT_READ + PROT_WRITE,
120                         MAP_SHARED, dev->ud_ctx->ucx_ib_dev_fd, offset);
121     if (iomap->vaddr == MAP_FAILED) {
122         usd_err("Failed to map res type %d, bus_addr 0x%lx, len 0x%lx\n",
123                 barres->type, iomap->bus_addr, iomap->len);
124         return -errno;
125     }
126     vnic_dev_upd_res_vaddr(vf->vf_vdev, iomap);
127 
128     return 0;
129 }
130 
131 static int
usd_map_vnic_res(struct usd_device * dev,struct usd_vf * vf,struct usd_vf_info * vfip)132 usd_map_vnic_res(struct usd_device *dev, struct usd_vf *vf,
133                     struct usd_vf_info *vfip)
134 {
135     int i, err;
136 
137     /* unmap bar0 */
138     if (vf->vf_bar0.vaddr != MAP_FAILED) {
139         munmap(vf->vf_bar0.vaddr, vf->vf_bar_map_len);
140         vf->vf_bar0.vaddr = MAP_FAILED;
141     }
142 
143     for (i = RES_TYPE_EOL + 1; i < RES_TYPE_MAX; i++) {
144         if (vfip->barres[i].bus_addr != 0) {
145             err = usd_map_one_res(dev, vf, &vfip->barres[i]);
146             if (err)
147                 return err;
148         } else {
149             /* Disable any other res not reported by kernel module */
150             struct vnic_dev_iomap_info iomap;
151             iomap.vaddr = 0;
152             iomap.bus_addr = vnic_dev_get_res_bus_addr(
153                                         vf->vf_vdev, i, 0);
154             iomap.len = vnic_dev_get_res_type_len(
155                                         vf->vf_vdev, i);
156             vnic_dev_upd_res_vaddr(vf->vf_vdev, &iomap);
157         }
158     }
159 
160     return 0;
161 }
162 
163 /*
164  * Create a VF structure if we don't already have one in use,
165  * update refcnt
166  */
167 static int
usd_map_vf(struct usd_device * dev,struct usd_vf_info * vfip,struct usd_vf ** vf_o)168 usd_map_vf(
169     struct usd_device *dev,
170     struct usd_vf_info *vfip,
171     struct usd_vf **vf_o)
172 {
173     struct usd_vf *vf;
174     off64_t offset;
175     int ret;
176 
177     /* find matching VF */
178     vf = dev->ud_vf_list;
179     while (vf != NULL) {
180         if (vf->vf_id == vfip->vi_vfid) break;
181         vf = vf->vf_next;
182     }
183 
184     /* Was VF actually found? If not, create and add */
185     if (vf == NULL) {
186         vf = calloc(sizeof(*vf), 1);
187         if (vf == NULL) {
188             ret = -errno;
189             goto out;
190         }
191 
192         /* Fill in function */
193         vf->vf_id = vfip->vi_vfid;
194         vf->vf_refcnt = 1;
195         vf->vf_bar0.bus_addr = vfip->vi_bar_bus_addr;
196         vf->vf_bar0.len = vfip->vi_bar_len;
197 
198         /* map BAR0 HEAD first to get res info */
199         if (vfip->vi_barhead_len > 0) {
200             offset = USNIC_ENCODE_PGOFF(vf->vf_id, USNIC_MMAP_BARHEAD, 0);
201             vf->vf_bar_map_len = vfip->vi_barhead_len;
202         } else {
203             offset = USNIC_ENCODE_PGOFF(vf->vf_id, USNIC_MMAP_BAR, 0);
204             vf->vf_bar_map_len = vfip->vi_bar_len;
205         }
206         vf->vf_bar0.vaddr = mmap64(NULL, vf->vf_bar_map_len,
207                                  PROT_READ + PROT_WRITE, MAP_SHARED,
208                                  dev->ud_ctx->ucx_ib_dev_fd,
209                                  offset);
210         if (vf->vf_bar0.vaddr == MAP_FAILED) {
211             usd_err("Failed to map bar0\n");
212             ret = -errno;
213             goto out;
214         }
215 
216         /* Register it */
217         vf->vf_vdev = vnic_dev_alloc_discover(NULL, NULL, (void *)dev,
218                                         &vf->vf_bar0, 1);
219         if (vf->vf_vdev == NULL) {
220             ret = -ENOENT;
221             goto out;
222         }
223 
224         /* map individual vnic resource seperately */
225         if (dev->ud_ctx->ucx_caps[USNIC_CAP_MAP_PER_RES] > 0) {
226             ret = usd_map_vnic_res(dev, vf, vfip);
227             if (ret)
228                 goto out;
229         }
230 
231         /* link it in */
232         vf->vf_next = dev->ud_vf_list;
233         dev->ud_vf_list = vf;
234 
235         if (vf->vf_next != NULL)
236             vf->vf_next->vf_prev = vf;
237         vf->vf_prev = NULL;
238 
239     /* Found existing VF, bump reference count */
240     } else {
241         ++vf->vf_refcnt;
242     }
243 
244     *vf_o = vf;
245 
246     return 0;
247 
248  out:
249     if (vf != NULL)
250         usd_unmap_vf(dev, vf);
251     return ret;
252 }
253 
254 static void
usd_get_vf(struct usd_vf * vf)255 usd_get_vf(
256     struct usd_vf *vf)
257 {
258     ++vf->vf_refcnt;
259 }
260 
261 /*
262  * Get a cq interrupt source
263  */
264 static struct usd_cq_comp_intr *
usd_get_cq_intr(struct usd_cq_impl * cq,struct usd_vf * vf)265 usd_get_cq_intr(
266     struct usd_cq_impl *cq,
267     struct usd_vf *vf)
268 {
269     struct usd_context *uctx;
270     struct usd_cq_comp_intr *intr;
271     int ret;
272 
273     uctx = cq->ucq_dev->ud_ctx;
274 
275     pthread_mutex_lock(&uctx->ucx_mutex);
276     LIST_FOREACH(intr, &uctx->ucx_intr_list, uci_ctx_link) {
277         if (intr->uci_offset == cq->intr_offset) {
278             intr->uci_refcnt ++;
279             goto out;
280         }
281     }
282 
283     intr = calloc(sizeof(*intr), 1);
284     if (intr != NULL) {
285         ret = vnic_grpmbrintr_alloc(vf->vf_vdev, &intr->uci_vintr,
286                                         cq->intr_offset);
287         if (ret) {
288             usd_err("Failed to alloc cq completion intr\n");
289             free(intr);
290             pthread_mutex_unlock(&uctx->ucx_mutex);
291             return NULL;
292         }
293 
294         /* init host interrupt registers */
295         iowrite32(0, &intr->uci_vintr.ctrl->coalescing_timer);
296         iowrite32(0, &intr->uci_vintr.ctrl->coalescing_type);
297         iowrite32(1, &intr->uci_vintr.ctrl->mask_on_assertion);
298         iowrite32(0, &intr->uci_vintr.ctrl->int_credits);
299         iowrite32(0, &intr->uci_vintr.ctrl->mask);    /* unmask */
300 
301         intr->uci_offset = cq->intr_offset;
302         intr->uci_refcnt = 1;
303         LIST_INSERT_HEAD(&uctx->ucx_intr_list, intr, uci_ctx_link);
304     }
305 
306 out:
307     pthread_mutex_unlock(&uctx->ucx_mutex);
308     return intr;
309 }
310 
311 /*
312  * put a cq interrupt source
313  */
314 static void
usd_put_cq_intr(struct usd_cq_impl * cq)315 usd_put_cq_intr(
316     struct usd_cq_impl *cq)
317 {
318     struct usd_context *uctx;
319     struct usd_cq_comp_intr *intr;
320 
321     uctx = cq->ucq_dev->ud_ctx;
322 
323     pthread_mutex_lock(&uctx->ucx_mutex);
324     LIST_FOREACH(intr, &uctx->ucx_intr_list, uci_ctx_link) {
325         if (intr->uci_offset == cq->intr_offset) {
326             intr->uci_refcnt--;
327             if (intr->uci_refcnt == 0)
328                 vnic_grpmbrintr_free(&intr->uci_vintr);
329             break;
330         }
331     }
332 
333     if (intr != NULL) {
334         LIST_REMOVE(intr, uci_ctx_link);
335         free(intr);
336     }
337     pthread_mutex_unlock(&uctx->ucx_mutex);
338 }
339 
340 
341 
342 /*
343  * Function that does whatever is needed to make a CQ go away
344  */
345 int
usd_destroy_cq(struct usd_cq * ucq)346 usd_destroy_cq(
347     struct usd_cq *ucq)
348 {
349     struct usd_cq_impl *cq;
350 
351     cq = to_cqi(ucq);
352 
353     if (cq->ucq_intr != NULL) {
354         usd_put_cq_intr(cq);
355         cq->ucq_intr = NULL;
356     }
357     if (cq->ucq_state & USD_QS_VERBS_CREATED)
358         usd_ib_cmd_destroy_cq(cq->ucq_dev, cq);
359 
360     if (cq->ucq_state & USD_QS_VF_MAPPED)
361         usd_unmap_vf(cq->ucq_dev, cq->ucq_vf);
362 
363     if (cq->ucq_desc_ring != NULL)
364         usd_free_mr(cq->ucq_desc_ring);
365     if (cq->ucq_rq_map != NULL)
366         free(cq->ucq_rq_map);
367     if (cq->ucq_wq_map != NULL)
368         free(cq->ucq_wq_map);
369     free(cq);
370 
371     return 0;
372 }
373 
374 static int
usd_vnic_wq_init(struct usd_wq * wq,struct usd_vf * vf,uint64_t desc_ring)375 usd_vnic_wq_init(
376     struct usd_wq *wq,
377     struct usd_vf *vf,
378     uint64_t desc_ring)
379 {
380     struct vnic_wq *vwq;
381     int ret;
382 
383     vwq = &wq->uwq_vnic_wq;
384 
385     /* get address of control register */
386     vwq->ctrl = vnic_dev_get_res(vf->vf_vdev, RES_TYPE_WQ, wq->uwq_index);
387     if (vwq->ctrl == NULL)
388         return -EINVAL;
389 
390     ret = vnic_wq_disable(vwq);
391     if (ret != 0)
392         return ret;
393 
394     writeq(desc_ring, &vwq->ctrl->ring_base);
395     iowrite32(wq->uwq_num_entries, &vwq->ctrl->ring_size);
396     iowrite32(0, &vwq->ctrl->fetch_index);
397     iowrite32(0, &vwq->ctrl->posted_index);
398     iowrite32(wq->uwq_cq->ucq_index, &vwq->ctrl->cq_index);
399     iowrite32(0, &vwq->ctrl->error_interrupt_enable);
400     iowrite32(0, &vwq->ctrl->error_interrupt_offset);
401     iowrite32(0, &vwq->ctrl->error_status);
402 
403     wq->uwq_state |= USD_QS_VNIC_INITIALIZED;
404     wq->uwq_next_desc = wq->uwq_desc_ring;
405     wq->uwq_send_credits = wq->uwq_num_entries - 1;
406 
407     return 0;
408 }
409 
410 /*
411  * Allocate the resources for a previously created WQ for UD QP
412  */
413 static int
usd_create_wq_ud(struct usd_qp_impl * qp)414 usd_create_wq_ud(
415     struct usd_qp_impl *qp)
416 {
417     struct usd_wq *wq;
418     uint32_t ring_size;
419     int ret;
420 
421     wq = &qp->uq_wq;
422 
423     /* Allocate resources for WQ */
424     ring_size = sizeof(struct wq_enet_desc) * wq->uwq_num_entries;
425     ret = usd_alloc_mr(qp->uq_dev, ring_size, (void **)&wq->uwq_desc_ring);
426     if (ret != 0)
427         return ret;
428 
429     ret = usd_vnic_wq_init(wq, qp->uq_vf, (uint64_t)wq->uwq_desc_ring);
430     if (ret != 0)
431         goto out;
432 
433     return 0;
434 
435 out:
436     if (wq->uwq_desc_ring != NULL) {
437         usd_free_mr(wq->uwq_desc_ring);
438         wq->uwq_desc_ring = NULL;
439     }
440     return ret;
441 }
442 
443 /*
444  * Allocate the resources for a previously created WQ
445  */
446 static int
usd_create_wq_pio(struct usd_qp_impl * qp)447 usd_create_wq_pio(
448     struct usd_qp_impl *qp)
449 {
450     uint32_t pio_memsize;
451     uint32_t used_size;
452     uint32_t ring_size;
453     void *pio_vaddr;
454     uint64_t pio_paddr;
455     uint64_t ivaddr;
456     struct usd_wq *wq;
457     struct usd_device *dev;
458     int ret;
459 
460     dev = qp->uq_dev;
461     if (dev->ud_ctx->ucx_caps[USNIC_CAP_PIO] == 0 ||
462         vnic_dev_get_res_bus_addr(qp->uq_vf->vf_vdev, RES_TYPE_MEM, 0) == 0) {
463         usd_err("dev does not support PIO\n");
464         return -ENODEV;
465     }
466 
467     pio_memsize = vnic_dev_get_res_count(qp->uq_vf->vf_vdev, RES_TYPE_MEM);
468     pio_vaddr = vnic_dev_get_res(qp->uq_vf->vf_vdev, RES_TYPE_MEM, 0);
469 
470     ret = usd_get_piopa(qp);
471     if (ret != 0)
472         return ret;
473     pio_paddr = qp->uq_attrs.uqa_pio_paddr;
474 
475     /* 512-byte alignment must match */
476     if ((((uint64_t)pio_vaddr ^ pio_paddr) & 511) != 0) {
477         fprintf(stderr, "Alignment mismatch, %p vs 0x%lx, cannot do PIO\n",
478                 pio_vaddr, pio_paddr);
479         return -ENXIO;
480     }
481 
482     /* skip past size */
483     ivaddr = (uintptr_t)pio_vaddr;
484     ivaddr += sizeof(uint64_t);
485 
486     /* round up to 512 bytes */
487     ivaddr = (ivaddr + 511) & ~511;
488 
489     /* WQ ring goes first.  Allow space for 64-byte write of last desc */
490     wq = &qp->uq_wq;
491     ring_size = wq->uwq_num_entries * sizeof(struct wq_enet_desc);
492     ring_size += 64 - sizeof(struct wq_enet_desc);
493     wq->pio_v_wq_addr = (void *)ivaddr;
494     wq->pio_p_wq_addr = pio_paddr + ivaddr - (uint64_t)pio_vaddr;
495     ivaddr += ring_size;
496 
497     /* round up to 64 bytes */
498     ivaddr = (ivaddr + 63) & ~63;
499 
500     /* we keep a copy of the ring, also */
501     ret = usd_alloc_mr(qp->uq_dev, ring_size, (void **)&wq->uwq_desc_ring);
502     if (ret != 0)
503         return ret;
504 
505     /* packet buffer */
506     wq->pio_v_pkt_buf = (void *)ivaddr;
507     wq->pio_p_pkt_buf = pio_paddr + ivaddr - (uint64_t)pio_vaddr;
508     ivaddr += wq->uwq_num_entries * 256;
509 
510     used_size = ivaddr - (uintptr_t)pio_vaddr;
511     if (used_size > pio_memsize) {
512         ret = -ENOMEM;
513         goto out;
514     }
515 
516     ret = usd_vnic_wq_init(wq, qp->uq_vf, wq->pio_p_wq_addr);
517     if (ret != 0)
518         goto out;
519 
520     return 0;
521 
522 out:
523     if (wq->uwq_desc_ring != NULL) {
524         usd_free_mr(wq->uwq_desc_ring);
525         wq->uwq_desc_ring = NULL;
526     }
527     return ret;
528 }
529 
530 /*
531  * Allocate the resources for a previously created WQ
532  */
533 static int
usd_create_wq(struct usd_qp_impl * qp)534 usd_create_wq(
535     struct usd_qp_impl *qp)
536 {
537     struct usd_wq *wq;
538     int ret;
539 
540     switch (qp->uq_attrs.uqa_qtype) {
541     case USD_QTY_UD_PIO:
542         ret = usd_create_wq_pio(qp);
543         break;
544     case USD_QTY_UD:
545         ret = usd_create_wq_ud(qp);
546         break;
547     default:
548         ret = -1;
549         break;
550     }
551 
552     if (ret == 0) {
553         wq = &qp->uq_wq;
554         wq->uwq_post_index_mask = (wq->uwq_num_entries-1);
555         wq->uwq_post_index = 1;
556         wq->uwq_last_comp = (wq->uwq_num_entries-1);
557     }
558 
559     return ret;
560 }
561 
562 static int
usd_vnic_rq_init(struct usd_rq * rq,struct usd_vf * vf,uint64_t desc_ring)563 usd_vnic_rq_init(
564     struct usd_rq *rq,
565     struct usd_vf *vf,
566     uint64_t desc_ring)
567 {
568     struct vnic_rq *vrq;
569     int ret;
570 
571     vrq = &rq->urq_vnic_rq;
572 
573     /* get address of control register */
574     vrq->ctrl = vnic_dev_get_res(vf->vf_vdev, RES_TYPE_RQ, rq->urq_index);
575     if (vrq->ctrl == NULL)
576         return -EINVAL;
577 
578     ret = vnic_rq_disable(vrq);
579     if (ret != 0)
580         return ret;
581 
582     writeq(desc_ring, &vrq->ctrl->ring_base);
583     iowrite32(rq->urq_num_entries, &vrq->ctrl->ring_size);
584     iowrite32(0, &vrq->ctrl->fetch_index);
585     iowrite32(0, &vrq->ctrl->posted_index);
586     iowrite32(rq->urq_cq->ucq_index, &vrq->ctrl->cq_index);
587     iowrite32(0, &vrq->ctrl->error_interrupt_enable);
588     iowrite32(0, &vrq->ctrl->error_interrupt_offset);
589     iowrite32(0, &vrq->ctrl->error_status);
590 
591     rq->urq_state |= USD_QS_VNIC_INITIALIZED;
592     rq->urq_next_desc = rq->urq_desc_ring;
593     rq->urq_recv_credits = rq->urq_num_entries - 1;
594 
595     return 0;
596 }
597 
598 /*
599  * Allocate the resources for a previously created RQ
600  */
601 static int
usd_create_rq(struct usd_qp_impl * qp)602 usd_create_rq(struct usd_qp_impl *qp)
603 {
604     struct usd_rq *rq;
605     uint32_t ring_size;
606     int ret;
607 
608     rq = &qp->uq_rq;
609 
610     /* Allocate resources for RQ */
611     ring_size = sizeof(struct rq_enet_desc) * rq->urq_num_entries;
612     ret = usd_alloc_mr(qp->uq_dev, ring_size, (void **)&rq->urq_desc_ring);
613     if (ret != 0)
614         return ret;
615 
616     ret = usd_vnic_rq_init(rq, qp->uq_vf, (uint64_t)rq->urq_desc_ring);
617     if (ret != 0)
618         goto out;
619 
620     rq->urq_post_index_mask = (rq->urq_num_entries-1);
621     rq->urq_post_index = 0;
622     rq->urq_last_comp = (rq->urq_num_entries-1);
623 
624     return 0;
625 out:
626     if (rq->urq_desc_ring != NULL) {
627         usd_free_mr(rq->urq_desc_ring);
628         rq->urq_desc_ring = NULL;
629     }
630     return ret;
631 }
632 
633 static int
usd_vnic_disable_qp(struct usd_qp_impl * qp)634 usd_vnic_disable_qp(
635     struct usd_qp_impl *qp)
636 {
637     struct usd_rq *rq;
638     struct usd_wq *wq;
639     int ret;
640 
641     wq = &qp->uq_wq;
642     rq = &qp->uq_rq;
643 
644     /* disable both queues */
645     ret = vnic_wq_disable(&wq->uwq_vnic_wq);
646     if (ret != 0)
647         return ret;
648     ret = vnic_rq_disable(&rq->urq_vnic_rq);
649 
650     return ret;
651 }
652 
653 static void
usd_vnic_enable_qp(struct usd_qp_impl * qp)654 usd_vnic_enable_qp(
655     struct usd_qp_impl *qp)
656 {
657     struct usd_rq *rq;
658     struct usd_wq *wq;
659 
660     wq = &qp->uq_wq;
661     rq = &qp->uq_rq;
662 
663     vnic_rq_enable(&rq->urq_vnic_rq);
664     vnic_wq_enable(&wq->uwq_vnic_wq);
665 }
666 
667 /*
668  * QP has been created and resources allocated.  Issue the IB commands to
669  * change the state to INIT/RTR/RTS to trigger filter creation and enable the
670  * QP to send and receive traffic.
671  */
672 static int
usd_enable_verbs_qp(struct usd_qp_impl * qp)673 usd_enable_verbs_qp(
674     struct usd_qp_impl *qp)
675 {
676     struct usd_rq *rq;
677     struct usd_wq *wq;
678     struct usd_device *dev;
679     int ret;
680 
681     dev = qp->uq_dev;
682     wq = &qp->uq_wq;
683     rq = &qp->uq_rq;
684 
685     /* XXX is this really necessary? */
686     ret = usd_vnic_disable_qp(qp);
687     if (ret != 0) {
688         goto out;
689     }
690 
691     /* state to INIT */
692     ret = usd_ib_cmd_modify_qp(dev, qp, IBV_QPS_INIT);
693     if (ret != 0) {
694         goto out;
695     }
696 
697     /* state to "ready to receive," enable rq */
698     ret = usd_ib_cmd_modify_qp(dev, qp, IBV_QPS_RTR);
699     if (ret != 0) {
700         goto out;
701     }
702 
703     /* state to "ready to send," enable wq */
704     ret = usd_ib_cmd_modify_qp(dev, qp, IBV_QPS_RTS);
705     if (ret != 0) {
706         goto out;
707     }
708 
709     usd_vnic_enable_qp(qp);
710     rq->urq_state |= USD_QS_READY;
711     wq->uwq_state |= USD_QS_READY;
712 
713  out:
714     return ret;
715 }
716 
717 /*
718  * Public interface to disable a QP
719  */
720 int
usd_disable_qp(struct usd_qp * uqp)721 usd_disable_qp(
722     struct usd_qp *uqp)
723 {
724     struct usd_qp_impl *qp;
725 
726     qp = to_qpi(uqp);
727     usd_vnic_disable_qp(qp);
728     return 0;
729 }
730 
731 /*
732  * Public interface to enable a QP
733  */
734 int
usd_enable_qp(struct usd_qp * uqp)735 usd_enable_qp(
736     struct usd_qp *uqp)
737 {
738     struct usd_qp_impl *qp;
739 
740     qp = to_qpi(uqp);
741     usd_vnic_enable_qp(qp);
742     return 0;
743 }
744 
745 /*
746  * Public interface to create a CQ
747  * First, issue the verbs command to create a CW instance in the driver.
748  * Second, allocate the data structures so that poll_cq can succeed, though
749  * we will not actually have VIC resources allocated until the first create_qp
750  * that uses this CQ.  We will finish configuring the CQ at that time.
751  */
752 int
usd_create_cq(struct usd_device * dev,struct usd_cq_init_attr * init_attr,struct usd_cq ** cq_o)753 usd_create_cq(
754     struct usd_device *dev,
755     struct usd_cq_init_attr *init_attr,
756     struct usd_cq **cq_o)
757 {
758     unsigned num_entries;
759     int comp_vec;
760     unsigned qp_per_vf;
761     struct usd_cq *ucq;
762     struct usd_cq_impl *cq;
763     unsigned ring_size;
764     int ret;
765 
766     if (init_attr == NULL)
767         return -EINVAL;
768 
769     num_entries = init_attr->num_entries;
770     comp_vec = init_attr->comp_vec;
771 
772     /* Make sure device ready */
773     ret = usd_device_ready(dev);
774     if (ret != 0) {
775         return ret;
776     }
777 
778     if (num_entries > dev->ud_attrs.uda_max_cqe) {
779         return -EINVAL;
780     }
781 
782     if (init_attr->comp_fd != -1) {
783         if (dev->ud_ctx->ucx_caps[USD_CAP_GRP_INTR] == 0) {
784             usd_err("CQ completion event is not supported\n");
785             return -EINVAL;
786         }
787         if (comp_vec >= (int)dev->ud_attrs.uda_num_comp_vectors) {
788             usd_err("too large comp_vec (%d) requested, num_comp_vectors=%d\n",
789                     comp_vec, (int)dev->ud_attrs.uda_num_comp_vectors);
790             return -EINVAL;
791         }
792     }
793 
794     cq = (struct usd_cq_impl *)calloc(sizeof(*cq), 1);
795     if (cq == NULL) {
796         ret = -errno;
797         goto out;
798     }
799 
800     qp_per_vf = dev->ud_attrs.uda_qp_per_vf;
801 
802     cq->ucq_wq_map = calloc(qp_per_vf, sizeof(struct usd_wq *));
803     cq->ucq_rq_map = calloc(qp_per_vf, sizeof(struct usd_rq *));
804     if (cq->ucq_wq_map == NULL || cq->ucq_rq_map == NULL) {
805         ret = -ENOMEM;
806         goto out;
807     }
808 
809     cq->ucq_dev = dev;
810 
811     /* add 1 and round up to next POW2 and min() with 64 */
812     num_entries = 1 << msbit(num_entries);
813     if (num_entries < 64) {
814         num_entries = 64;
815     }
816 
817     cq->ucq_num_entries = num_entries;
818 
819     ring_size = sizeof(struct cq_desc) * num_entries;
820     ret = usd_alloc_mr(dev, ring_size, &cq->ucq_desc_ring);
821     if (ret != 0)
822         goto out;
823     memset(cq->ucq_desc_ring, 0, ring_size);
824 
825     /*
826      * kernel currently has no support for handling negative comp_vec values,
827      * just use 0 which is guaranteed to be available
828      */
829     if (comp_vec < 0)
830         comp_vec = 0;
831 
832     ret = usd_ib_cmd_create_cq(dev, cq, init_attr->ibv_cq, init_attr->comp_fd,
833                                 comp_vec);
834     if (ret != 0)
835         goto out;
836 
837     cq->ucq_state |= USD_QS_VERBS_CREATED;
838 
839     /* initialize polling variables */
840     cq->ucq_cqe_mask = num_entries - 1;
841     cq->ucq_color_shift = msbit(num_entries) - 1;
842     cq->comp_fd = init_attr->comp_fd;
843     cq->comp_vec = comp_vec;
844     cq->comp_req_notify = init_attr->comp_req_notify;
845 
846     ucq = to_usdcq(cq);
847     ucq->ucq_num_entries = num_entries - 1;
848     *cq_o = to_usdcq(cq);
849     return 0;
850 
851 out:
852     if (cq != NULL) {
853         usd_destroy_cq(to_usdcq(cq));
854     }
855     return ret;
856 }
857 
858 /*
859  * Finish CQ creation after first QP has been created.  Associate a vf
860  * and configure the CQ on the VIC.  It's OK if CQ is already configured, but
861  * VFs must match.
862  */
863 static int
usd_finish_create_cq(struct usd_cq_impl * cq,struct usd_vf * vf)864 usd_finish_create_cq(
865     struct usd_cq_impl *cq,
866     struct usd_vf *vf)
867 {
868     struct vnic_cq *vcq;
869 
870     if (cq->ucq_state & USD_QS_VNIC_INITIALIZED) {
871         if (cq->ucq_vf == vf) {
872             return 0;
873         } else {
874             usd_err("Cannot share CQ across VFs\n");
875             return -EINVAL;
876         }
877     }
878 
879     vcq = &cq->ucq_vnic_cq;
880     vcq->index = cq->ucq_index;
881     vcq->vdev = vf->vf_vdev;
882 
883     vcq->ctrl = vnic_dev_get_res(vcq->vdev, RES_TYPE_CQ, vcq->index);
884     if (vcq->ctrl == NULL)
885         return -EINVAL;
886 
887     cq->ucq_vf = vf;
888     usd_get_vf(vf);     /* bump the reference count */
889     cq->ucq_state |= USD_QS_VF_MAPPED;
890 
891     /*
892      * Tell the VIC about this CQ
893      */
894     {
895         unsigned int cq_flow_control_enable = 0;
896         unsigned int cq_color_enable = 1;
897         unsigned int cq_head = 0;
898         unsigned int cq_tail = 0;
899         unsigned int cq_tail_color = 1;
900         unsigned int cq_entry_enable = 1;
901         unsigned int cq_msg_enable = 0;
902         unsigned int cq_intr_enable = 0;
903         unsigned int cq_intr_offset = 0;
904         uint64_t cq_msg_addr = 0;
905 
906         if (cq->comp_fd != -1) {
907             cq->ucq_intr = usd_get_cq_intr(cq, vf);
908             if (cq->ucq_intr == NULL) {
909                 usd_err("Failed to alloc cq completion intr\n");
910                 return -ENOMEM;
911             } else {
912                 cq_intr_enable = 1;
913                 cq_intr_offset = cq->intr_offset;
914             }
915         }
916 
917         cq->ucq_vnic_cq.ring.base_addr = (uintptr_t)cq->ucq_desc_ring;
918         cq->ucq_vnic_cq.ring.desc_count = cq->ucq_num_entries;
919 
920         vnic_cq_init(&cq->ucq_vnic_cq, cq_flow_control_enable,
921                 cq_color_enable, cq_head, cq_tail, cq_tail_color,
922                 cq_intr_enable, cq_entry_enable, cq_msg_enable,
923                 cq_intr_offset, cq_msg_addr);
924     }
925     cq->ucq_state |= USD_QS_VNIC_INITIALIZED;
926 
927     return 0;
928 }
929 
930 /*
931  * Fill in ops field for QP
932  */
933 static int
usd_qp_get_ops(struct usd_qp_impl * qp)934 usd_qp_get_ops(
935     struct usd_qp_impl *qp)
936 {
937     int tt;
938 
939 #define USD_TT(TY,TR) ((TY)<<16|(TR))
940     tt = USD_TT(qp->uq_attrs.uqa_transport, qp->uq_attrs.uqa_qtype);
941 
942     switch (tt) {
943     case USD_TT(USD_QTR_UDP, USD_QTY_UD):
944         qp->uq_qp.uq_ops = usd_qp_ops_ud_udp;
945         break;
946     case USD_TT(USD_QTR_UDP, USD_QTY_UD_PIO):
947         qp->uq_qp.uq_ops = usd_qp_ops_ud_pio_udp;
948         break;
949     case USD_TT(USD_QTR_RAW, USD_QTY_UD):
950         qp->uq_qp.uq_ops = usd_qp_ops_ud_raw;
951         break;
952     default:
953         return -EINVAL;
954     }
955 
956     return 0;
957 }
958 
959 /*
960  * Convert user's filter into internal representation
961  */
962 static int
usd_filter_alloc(struct usd_device * dev,struct usd_filter * filt,struct usd_qp_filter * qfilt)963 usd_filter_alloc(
964     struct usd_device *dev,
965     struct usd_filter *filt,
966     struct usd_qp_filter *qfilt)
967 {
968     struct sockaddr_in sin;
969     int ret;
970     int s;
971 
972     switch (filt->uf_type) {
973     case USD_FTY_UDP_SOCK:
974         qfilt->qf_type = USD_FTY_UDP_SOCK;
975         qfilt->qf_filter.qf_udp.u_sockfd = filt->uf_filter.uf_udp_sock.u_sock;
976         break;
977 
978     case USD_FTY_UDP:
979         qfilt->qf_type = USD_FTY_UDP;
980         qfilt->qf_filter.qf_udp.u_sockfd = -1;
981 
982         s = socket(AF_INET, SOCK_DGRAM, 0);
983         if (s == -1)
984             return -errno;
985         memset(&sin, 0, sizeof(sin));
986         sin.sin_family = AF_INET;
987         sin.sin_addr.s_addr = dev->ud_attrs.uda_ipaddr_be;
988         sin.sin_port = htons(filt->uf_filter.uf_udp.u_port);
989         ret = bind(s, (struct sockaddr *)&sin, sizeof(sin));
990         if (ret == -1) {
991             ret = -errno;
992             close(s);
993             return ret;
994         }
995 
996         /* save the socket */
997         qfilt->qf_filter.qf_udp.u_sockfd = s;
998         break;
999 
1000     default:
1001         return -EINVAL;
1002     }
1003 
1004     return 0;
1005 }
1006 
1007 /*
1008  * Fill in local address given filter and return from verbs QP create
1009  */
1010 static int
usd_get_qp_local_addr(struct usd_qp_impl * qp)1011 usd_get_qp_local_addr(
1012     struct usd_qp_impl *qp)
1013 {
1014     socklen_t addrlen;
1015     int ret;
1016 
1017     switch (qp->uq_attrs.uqa_transport) {
1018 
1019     case USD_QTR_UDP:
1020         /* find out what address we got */
1021         addrlen = sizeof(qp->uq_attrs.uqa_local_addr.ul_addr.ul_udp.u_addr);
1022         ret = getsockname(qp->uq_filter.qf_filter.qf_udp.u_sockfd,
1023          (struct sockaddr *) &qp->uq_attrs.uqa_local_addr.ul_addr.ul_udp.u_addr,
1024          &addrlen);
1025         if (ret == -1)
1026             return -errno;
1027         break;
1028 
1029     default:
1030         break;
1031     }
1032     return 0;
1033 }
1034 
1035 static void
usd_filter_free(struct usd_qp_filter * qfilt)1036 usd_filter_free(
1037     struct usd_qp_filter *qfilt)
1038 {
1039     switch (qfilt->qf_type) {
1040     case USD_FTY_UDP:
1041         close(qfilt->qf_filter.qf_udp.u_sockfd);
1042         break;
1043     default:
1044         break;
1045     }
1046 }
1047 
1048 /*
1049  * Destroy a QP
1050  */
1051 int
usd_destroy_qp(struct usd_qp * uqp)1052 usd_destroy_qp(
1053     struct usd_qp *uqp)
1054 {
1055     struct usd_wq *wq;
1056     struct usd_rq *rq;
1057     struct usd_qp_impl *qp;
1058 
1059     qp = to_qpi(uqp);
1060 
1061     wq = &qp->uq_wq;
1062     rq = &qp->uq_rq;
1063 
1064     if (wq->uwq_state & USD_QS_READY)
1065         usd_disable_qp(uqp);
1066 
1067     if (rq->urq_state & USD_QS_VNIC_ALLOCATED)
1068         vnic_rq_free(&rq->urq_vnic_rq);
1069 
1070     if (wq->uwq_state & USD_QS_VF_MAPPED)
1071         usd_unmap_vf(qp->uq_dev, qp->uq_vf);
1072 
1073     if (wq->uwq_state & USD_QS_VERBS_CREATED)
1074         usd_ib_cmd_destroy_qp(qp->uq_dev, qp);
1075 
1076     if (rq->urq_state & USD_QS_FILTER_ALLOC)
1077         usd_filter_free(&qp->uq_filter);
1078 
1079     if (rq->urq_context != NULL)
1080         free(rq->urq_context);
1081     if (wq->uwq_post_info != NULL)
1082         free(wq->uwq_post_info);
1083     if (wq->uwq_copybuf != NULL)
1084         usd_free_mr(wq->uwq_copybuf);
1085     if (wq->uwq_desc_ring != NULL)
1086         usd_free_mr(wq->uwq_desc_ring);
1087     if (rq->urq_desc_ring != NULL)
1088         usd_free_mr(rq->urq_desc_ring);
1089 
1090     free(qp);
1091 
1092     return 0;
1093 }
1094 
1095 /*
1096  * Create a normal or PIO UD QP
1097  */
1098 static int
usd_create_qp_ud(struct usd_qp_impl * qp)1099 usd_create_qp_ud(
1100     struct usd_qp_impl *qp)
1101 {
1102     struct usd_device *dev;
1103     unsigned num_wq_entries;
1104     unsigned num_rq_entries;
1105     struct usd_vf_info vf_info;
1106     struct usd_vf *vf;
1107     struct usd_rq *rq;
1108     struct usd_wq *wq;
1109     struct usd_cq_impl *wcq;
1110     struct usd_cq_impl *rcq;
1111     size_t copybuf_size;
1112     int ret;
1113 
1114     dev = qp->uq_dev;
1115     vf = NULL;
1116 
1117     wq = &qp->uq_wq;
1118     rq = &qp->uq_rq;
1119     wcq = wq->uwq_cq;
1120     rcq = rq->urq_cq;
1121 
1122     ret = usd_qp_get_ops(qp);
1123     if (ret != 0) {
1124         goto fail;
1125     }
1126 
1127     num_wq_entries = wq->uwq_num_entries;
1128     num_rq_entries = rq->urq_num_entries;
1129 
1130     rq->urq_context = calloc(sizeof(void *), num_rq_entries);
1131     wq->uwq_post_info = calloc(sizeof(struct usd_wq_post_info), num_wq_entries);
1132     if (rq->urq_context == NULL || wq->uwq_post_info == NULL) {
1133         ret = -ENOMEM;
1134         goto fail;
1135     }
1136 
1137     /*
1138      * Issue verbs command to create the QP.  This does not actually
1139      * instanstiate the filter in the VIC yet, need to bring the
1140      * verbs QP up to RTR state for that
1141      */
1142     memset(&vf_info, 0, sizeof(vf_info));
1143     ret = usd_ib_cmd_create_qp(dev, qp, &vf_info);
1144     if (ret != 0) {
1145         goto fail;
1146     }
1147 
1148     /* verbs create_qp command has been completed */
1149     rq->urq_state |= USD_QS_VERBS_CREATED;
1150     wq->uwq_state |= USD_QS_VERBS_CREATED;
1151 
1152     /*
1153      * Create/regmr for wq copybuf after verbs QP is created
1154      * because QP number information may be needed to register
1155      * mr under shared PD
1156      */
1157     copybuf_size = USD_SEND_MAX_COPY * num_wq_entries;
1158     ret = usd_alloc_mr(dev, copybuf_size, (void **)&wq->uwq_copybuf);
1159     if (ret != 0)
1160         goto fail;
1161 
1162     ret = usd_map_vf(dev, &vf_info, &vf);
1163     if (ret != 0) {
1164         goto fail;
1165     }
1166 
1167     qp->uq_vf = vf;
1168     rq->urq_state |= USD_QS_VF_MAPPED;
1169     wq->uwq_state |= USD_QS_VF_MAPPED;
1170 
1171     /*
1172      * Now that we have a VF, we can finish creating the CQs.
1173      * It's OK if rcq==wcq, finish_create_cq allows for CQ sharing
1174      */
1175     ret = usd_finish_create_cq(wcq, vf);
1176     if (ret != 0) {
1177         goto fail;
1178     }
1179     ret = usd_finish_create_cq(rcq, vf);
1180     if (ret != 0) {
1181         goto fail;
1182     }
1183 
1184     /* define the WQ and RQ to the VIC */
1185     ret = usd_create_wq(qp);
1186     if (ret != 0) {
1187         goto fail;
1188     }
1189     ret = usd_create_rq(qp);
1190     if (ret != 0) {
1191         goto fail;
1192     }
1193 
1194     /* Issue commands to driver to enable the QP */
1195     ret = usd_enable_verbs_qp(qp);
1196     if (ret != 0) {
1197         goto fail;
1198     }
1199 
1200     /* Attach WQ and RQ to CW */
1201     rcq->ucq_rq_map[rq->urq_index] = rq;
1202     wcq->ucq_wq_map[wq->uwq_index] = wq;
1203 
1204     qp->uq_attrs.uqa_max_send_credits = wq->uwq_num_entries - 1;
1205     qp->uq_attrs.uqa_max_recv_credits = rq->urq_num_entries - 1;
1206     qp->uq_attrs.uqa_max_inline = USD_SEND_MAX_COPY -
1207         qp->uq_attrs.uqa_hdr_len;
1208 
1209     /* build local address */
1210     ret = usd_get_qp_local_addr(qp);
1211     if (ret != 0) {
1212         goto fail;
1213     }
1214 
1215     return 0;
1216 
1217  fail:
1218     return ret;
1219 }
1220 
1221 /*
1222  * Public interface to create QP
1223  */
1224 int
usd_create_qp(struct usd_device * dev,enum usd_qp_transport transport,enum usd_qp_type qtype,struct usd_cq * wucq,struct usd_cq * rucq,unsigned num_send_credits,unsigned num_recv_credits,struct usd_filter * filt,struct usd_qp ** uqp_o)1225 usd_create_qp(
1226     struct usd_device *dev,
1227     enum usd_qp_transport transport,
1228     enum usd_qp_type qtype,
1229     struct usd_cq *wucq,
1230     struct usd_cq *rucq,
1231     unsigned num_send_credits,
1232     unsigned num_recv_credits,
1233     struct usd_filter *filt,
1234     struct usd_qp **uqp_o)
1235 {
1236     struct usd_qp_impl *qp;
1237     unsigned num_rq_entries;
1238     unsigned num_wq_entries;
1239     struct usd_cq_impl *wcq;
1240     struct usd_cq_impl *rcq;
1241     struct usd_rq *rq;
1242     struct usd_wq *wq;
1243     int ret;
1244 
1245     qp = NULL;
1246 
1247     /* Make sure device ready */
1248     ret = usd_device_ready(dev);
1249     if (ret != 0) {
1250         goto fail;
1251     }
1252 
1253     qp = calloc(sizeof(*qp), 1);
1254     if (qp == NULL) {
1255         ret = -ENOMEM;
1256         goto fail;
1257     }
1258 
1259     qp->uq_dev = dev;
1260     qp->uq_attrs.uqa_transport = transport;
1261     qp->uq_attrs.uqa_qtype = qtype;
1262 
1263     ret = usd_qp_get_ops(qp);
1264     if (ret != 0) {
1265         goto fail;
1266     }
1267 
1268     if (num_recv_credits > dev->ud_attrs.uda_max_recv_credits) {
1269         ret = -EINVAL;
1270         goto fail;
1271     }
1272     /* Add 1 and round num_entries up to POW2 and min to 32 */
1273     num_rq_entries = 1 << msbit(num_recv_credits);
1274     if (num_rq_entries < 32) num_rq_entries = 32;
1275 
1276     if (num_send_credits > dev->ud_attrs.uda_max_send_credits) {
1277         ret = -EINVAL;
1278         goto fail;
1279     }
1280     num_wq_entries = 1 << msbit(num_send_credits);
1281     if (num_wq_entries < 32) num_wq_entries = 32;
1282 
1283     rcq = to_cqi(rucq);
1284     wcq = to_cqi(wucq);
1285 
1286     rq = &qp->uq_rq;
1287     rq->urq_num_entries = num_rq_entries;
1288     rq->urq_cq = rcq;
1289 
1290     wq = &qp->uq_wq;
1291     wq->uwq_num_entries = num_wq_entries;
1292     wq->uwq_cq = wcq;
1293 
1294     /* do filter setup */
1295     ret = usd_filter_alloc(dev, filt, &qp->uq_filter);
1296     if (ret != 0) {
1297         goto fail;
1298     }
1299     rq->urq_state |= USD_QS_FILTER_ALLOC;
1300 
1301     /* Fill in some attrs */
1302     switch (transport) {
1303     case USD_QTR_UDP:
1304         qp->uq_attrs.uqa_hdr_len = sizeof(struct usd_udp_hdr);
1305         break;
1306     case USD_QTR_RAW:
1307         qp->uq_attrs.uqa_hdr_len = 0;
1308         break;
1309     }
1310 
1311     /*
1312      * Now, do the type-specific configuration
1313      */
1314     switch (qtype) {
1315     case USD_QTY_UD:
1316     case USD_QTY_UD_PIO:
1317         ret = usd_create_qp_ud(qp);
1318         if (ret != 0) {
1319             goto fail;
1320         }
1321         break;
1322     default:
1323         ret = -EINVAL;
1324         goto fail;
1325         break;
1326     }
1327 
1328     *uqp_o = to_usdqp(qp);
1329     return 0;
1330 
1331 fail:
1332     if (qp != NULL) {
1333         usd_destroy_qp(to_usdqp(qp));
1334     }
1335     return ret;
1336 }
1337 
1338 /*
1339  * Return attributes of a QP
1340  */
1341 int
usd_get_qp_attrs(struct usd_qp * uqp,struct usd_qp_attrs * qattrs)1342 usd_get_qp_attrs(
1343     struct usd_qp *uqp,
1344     struct usd_qp_attrs *qattrs)
1345 {
1346     struct usd_qp_impl *qp;
1347 
1348     qp = to_qpi(uqp);
1349     *qattrs = qp->uq_attrs;
1350     return 0;
1351 }
1352 
usd_get_completion_fd(struct usd_device * dev,int * comp_fd_o)1353 int usd_get_completion_fd(struct usd_device *dev, int *comp_fd_o)
1354 {
1355     if (dev == NULL || comp_fd_o == NULL)
1356         return -EINVAL;
1357 
1358     return usd_ib_cmd_create_comp_channel(dev, comp_fd_o);
1359 }
1360 
usd_put_completion_fd(struct usd_device * dev,int comp_fd)1361 int usd_put_completion_fd(struct usd_device *dev, int comp_fd)
1362 {
1363     if (dev == NULL || comp_fd < 0)
1364         return -EINVAL;
1365 
1366     if (close(comp_fd) == -1)
1367         return -errno;
1368 
1369     return 0;
1370 }
1371