diff options
Diffstat (limited to 'drivers/infiniband/hw/irdma/verbs.c')
| -rw-r--r-- | drivers/infiniband/hw/irdma/verbs.c | 50 |
1 files changed, 31 insertions, 19 deletions
diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c index 76ce6137f2ba..6d9af41a2884 100644 --- a/drivers/infiniband/hw/irdma/verbs.c +++ b/drivers/infiniband/hw/irdma/verbs.c @@ -27,7 +27,8 @@ static int irdma_query_device(struct ib_device *ibdev, irdma_fw_minor_ver(&rf->sc_dev); props->device_cap_flags = IB_DEVICE_MEM_WINDOW | IB_DEVICE_MEM_MGT_EXTENSIONS; - props->kernel_cap_flags = IBK_LOCAL_DMA_LKEY; + if (hw_attrs->uk_attrs.hw_rev < IRDMA_GEN_3) + props->kernel_cap_flags = IBK_LOCAL_DMA_LKEY; props->vendor_id = pcidev->vendor; props->vendor_part_id = pcidev->device; @@ -771,7 +772,6 @@ static int irdma_cqp_create_qp_cmd(struct irdma_qp *iwqp) cqp_info = &cqp_request->info; qp_info = &cqp_request->info.in.u.qp_create.info; - memset(qp_info, 0, sizeof(*qp_info)); qp_info->mac_valid = true; qp_info->cq_num_valid = true; qp_info->next_iwarp_state = IRDMA_QP_STATE_IDLE; @@ -2029,6 +2029,7 @@ static int irdma_resize_cq(struct ib_cq *ibcq, int entries, struct irdma_pci_f *rf; struct irdma_cq_buf *cq_buf = NULL; unsigned long flags; + u8 cqe_size; int ret; iwdev = to_iwdev(ibcq->device); @@ -2045,7 +2046,7 @@ static int irdma_resize_cq(struct ib_cq *ibcq, int entries, return -EINVAL; if (!iwcq->user_mode) { - entries++; + entries += 2; if (!iwcq->sc_cq.cq_uk.avoid_mem_cflct && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) @@ -2053,6 +2054,10 @@ static int irdma_resize_cq(struct ib_cq *ibcq, int entries, if (entries & 1) entries += 1; /* cq size must be an even number */ + + cqe_size = iwcq->sc_cq.cq_uk.avoid_mem_cflct ? 64 : 32; + if (entries * cqe_size == IRDMA_HW_PAGE_SIZE) + entries += 2; } info.cq_size = max(entries, 4); @@ -2306,8 +2311,8 @@ static int irdma_setup_kmode_srq(struct irdma_device *iwdev, ukinfo->srq_size = depth >> shift; ukinfo->shadow_area = mem->va + ring_size; - info->shadow_area_pa = info->srq_pa + ring_size; info->srq_pa = mem->pa; + info->shadow_area_pa = info->srq_pa + ring_size; return 0; } @@ -2384,6 +2389,7 @@ static int irdma_create_srq(struct ib_srq *ibsrq, info.vsi = &iwdev->vsi; info.pd = &iwpd->sc_pd; + iwsrq->sc_srq.srq_uk.lock = &iwsrq->lock; err_code = irdma_sc_srq_init(&iwsrq->sc_srq, &info); if (err_code) goto free_dmem; @@ -2483,6 +2489,7 @@ static int irdma_create_cq(struct ib_cq *ibcq, int err_code; int entries = attr->cqe; bool cqe_64byte_ena; + u8 cqe_size; err_code = cq_validate_flags(attr->flags, dev->hw_attrs.uk_attrs.hw_rev); if (err_code) @@ -2503,11 +2510,13 @@ static int irdma_create_cq(struct ib_cq *ibcq, spin_lock_init(&iwcq->lock); INIT_LIST_HEAD(&iwcq->resize_list); INIT_LIST_HEAD(&iwcq->cmpl_generated); + iwcq->cq_num = cq_num; info.dev = dev; ukinfo->cq_size = max(entries, 4); ukinfo->cq_id = cq_num; cqe_64byte_ena = dev->hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_64_BYTE_CQE ? true : false; + cqe_size = cqe_64byte_ena ? 64 : 32; ukinfo->avoid_mem_cflct = cqe_64byte_ena; iwcq->ibcq.cqe = info.cq_uk_init_info.cq_size; if (attr->comp_vector < rf->ceqs_count) @@ -2580,13 +2589,16 @@ static int irdma_create_cq(struct ib_cq *ibcq, goto cq_free_rsrc; } - entries++; + entries += 2; if (!cqe_64byte_ena && dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) entries *= 2; if (entries & 1) entries += 1; /* cq size must be an even number */ + if (entries * cqe_size == IRDMA_HW_PAGE_SIZE) + entries += 2; + ukinfo->cq_size = entries; if (cqe_64byte_ena) @@ -3102,12 +3114,10 @@ static int irdma_hw_alloc_stag(struct irdma_device *iwdev, cqp_info = &cqp_request->info; info = &cqp_info->in.u.alloc_stag.info; - memset(info, 0, sizeof(*info)); info->page_size = PAGE_SIZE; info->stag_idx = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S; info->pd_id = iwpd->sc_pd.pd_id; info->total_len = iwmr->len; - info->all_memory = pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY; info->remote_access = true; cqp_info->cqp_cmd = IRDMA_OP_ALLOC_STAG; cqp_info->post_sq = 1; @@ -3118,7 +3128,7 @@ static int irdma_hw_alloc_stag(struct irdma_device *iwdev, if (status) return status; - iwmr->is_hwreg = 1; + iwmr->is_hwreg = true; return 0; } @@ -3252,7 +3262,6 @@ static int irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr, cqp_info = &cqp_request->info; stag_info = &cqp_info->in.u.mr_reg_non_shared.info; - memset(stag_info, 0, sizeof(*stag_info)); stag_info->va = iwpbl->user_base; stag_info->stag_idx = iwmr->stag >> IRDMA_CQPSQ_STAG_IDX_S; stag_info->stag_key = (u8)iwmr->stag; @@ -3262,7 +3271,7 @@ static int irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr, if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.feature_flags & IRDMA_FEATURE_ATOMIC_OPS) stag_info->remote_atomics_en = (access & IB_ACCESS_REMOTE_ATOMIC) ? 1 : 0; stag_info->pd_id = iwpd->sc_pd.pd_id; - stag_info->all_memory = pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY; + stag_info->all_memory = iwmr->dma_mr; if (stag_info->access_rights & IRDMA_ACCESS_FLAGS_ZERO_BASED) stag_info->addr_type = IRDMA_ADDR_TYPE_ZERO_BASED; else @@ -3289,7 +3298,7 @@ static int irdma_hwreg_mr(struct irdma_device *iwdev, struct irdma_mr *iwmr, irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request); if (!ret) - iwmr->is_hwreg = 1; + iwmr->is_hwreg = true; return ret; } @@ -3646,7 +3655,6 @@ static int irdma_hwdereg_mr(struct ib_mr *ib_mr) cqp_info = &cqp_request->info; info = &cqp_info->in.u.dealloc_stag.info; - memset(info, 0, sizeof(*info)); info->pd_id = iwpd->sc_pd.pd_id; info->stag_idx = ib_mr->rkey >> IRDMA_CQPSQ_STAG_IDX_S; info->mr = true; @@ -3662,7 +3670,7 @@ static int irdma_hwdereg_mr(struct ib_mr *ib_mr) if (status) return status; - iwmr->is_hwreg = 0; + iwmr->is_hwreg = false; return 0; } @@ -3785,9 +3793,10 @@ static struct ib_mr *irdma_rereg_user_mr(struct ib_mr *ib_mr, int flags, * @size: size of memory to register * @access: Access rights * @iova_start: start of virtual address for physical buffers + * @dma_mr: Flag indicating whether this region is a PD DMA MR */ struct ib_mr *irdma_reg_phys_mr(struct ib_pd *pd, u64 addr, u64 size, int access, - u64 *iova_start) + u64 *iova_start, bool dma_mr) { struct irdma_device *iwdev = to_iwdev(pd->device); struct irdma_pbl *iwpbl; @@ -3804,6 +3813,7 @@ struct ib_mr *irdma_reg_phys_mr(struct ib_pd *pd, u64 addr, u64 size, int access iwpbl = &iwmr->iwpbl; iwpbl->iwmr = iwmr; iwmr->type = IRDMA_MEMREG_TYPE_MEM; + iwmr->dma_mr = dma_mr; iwpbl->user_base = *iova_start; stag = irdma_create_stag(iwdev); if (!stag) { @@ -3842,7 +3852,7 @@ static struct ib_mr *irdma_get_dma_mr(struct ib_pd *pd, int acc) { u64 kva = 0; - return irdma_reg_phys_mr(pd, 0, 0, acc, &kva); + return irdma_reg_phys_mr(pd, 0, 0, acc, &kva, true); } /** @@ -4077,7 +4087,7 @@ static int irdma_post_send(struct ib_qp *ibqp, break; case IB_WR_LOCAL_INV: info.op_type = IRDMA_OP_TYPE_INV_STAG; - info.local_fence = info.read_fence; + info.local_fence = true; info.op.inv_local_stag.target_stag = ib_wr->ex.invalidate_rkey; err = irdma_uk_stag_local_invalidate(ukqp, &info, true); break; @@ -4504,7 +4514,7 @@ static int irdma_req_notify_cq(struct ib_cq *ibcq, } if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) && - (!irdma_cq_empty(iwcq) || !list_empty(&iwcq->cmpl_generated))) + (!irdma_uk_cq_empty(ukcq) || !list_empty(&iwcq->cmpl_generated))) ret = 1; spin_unlock_irqrestore(&iwcq->lock, flags); @@ -5203,7 +5213,7 @@ static int irdma_create_user_ah(struct ib_ah *ibah, struct irdma_ah *parent_ah; int err; - if (udata && udata->outlen < IRDMA_CREATE_AH_MIN_RESP_LEN) + if (udata->outlen < IRDMA_CREATE_AH_MIN_RESP_LEN) return -EINVAL; err = irdma_setup_ah(ibah, attr); @@ -5499,7 +5509,9 @@ void irdma_ib_dealloc_device(struct ib_device *ibdev) irdma_rt_deinit_hw(iwdev); if (!iwdev->is_vport) { irdma_ctrl_deinit_hw(iwdev->rf); - if (iwdev->rf->vchnl_wq) + if (iwdev->rf->vchnl_wq) { destroy_workqueue(iwdev->rf->vchnl_wq); + mutex_destroy(&iwdev->rf->sc_dev.vchnl_mutex); + } } } |
