diff options
Diffstat (limited to 'drivers/infiniband')
196 files changed, 6129 insertions, 10922 deletions
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index 0c98dd3dee67..f6aa1a964573 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c @@ -33,6 +33,7 @@ * SOFTWARE. */ +#include <linux/if_vlan.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/slab.h> @@ -955,7 +956,7 @@ int rdma_query_gid(struct ib_device *device, u32 port_num, { struct ib_gid_table *table; unsigned long flags; - int res = -EINVAL; + int res; if (!rdma_is_port_valid(device, port_num)) return -EINVAL; @@ -963,9 +964,15 @@ int rdma_query_gid(struct ib_device *device, u32 port_num, table = rdma_gid_table(device, port_num); read_lock_irqsave(&table->rwlock, flags); - if (index < 0 || index >= table->sz || - !is_gid_entry_valid(table->data_vec[index])) + if (index < 0 || index >= table->sz) { + res = -EINVAL; goto done; + } + + if (!is_gid_entry_valid(table->data_vec[index])) { + res = -ENOENT; + goto done; + } memcpy(gid, &table->data_vec[index]->attr.gid, sizeof(*gid)); res = 0; diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index c903b74f46a4..35f0d5e7533d 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -3322,7 +3322,7 @@ static int cm_lap_handler(struct cm_work *work) ret = cm_init_av_by_path(param->alternate_path, NULL, &alt_av); if (ret) { rdma_destroy_ah_attr(&ah_attr); - return -EINVAL; + goto deref; } spin_lock_irq(&cm_id_priv->lock); diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 704ce595542c..c447526288f4 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -67,8 +67,8 @@ static const char * const cma_events[] = { [RDMA_CM_EVENT_TIMEWAIT_EXIT] = "timewait exit", }; -static void cma_set_mgid(struct rdma_id_private *id_priv, struct sockaddr *addr, - union ib_gid *mgid); +static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid, + enum ib_gid_type gid_type); const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event) { @@ -453,7 +453,7 @@ static void _cma_attach_to_dev(struct rdma_id_private *id_priv, id_priv->id.device = cma_dev->device; id_priv->id.route.addr.dev_addr.transport = rdma_node_get_transport(cma_dev->device->node_type); - list_add_tail(&id_priv->list, &cma_dev->id_list); + list_add_tail(&id_priv->device_item, &cma_dev->id_list); trace_cm_id_attach(id_priv, cma_dev->device); } @@ -470,7 +470,7 @@ static void cma_attach_to_dev(struct rdma_id_private *id_priv, static void cma_release_dev(struct rdma_id_private *id_priv) { mutex_lock(&lock); - list_del(&id_priv->list); + list_del_init(&id_priv->device_item); cma_dev_put(id_priv->cma_dev); id_priv->cma_dev = NULL; id_priv->id.device = NULL; @@ -766,6 +766,7 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv) unsigned int p; u16 pkey, index; enum ib_port_state port_state; + int ret; int i; cma_dev = NULL; @@ -784,9 +785,14 @@ static int cma_resolve_ib_dev(struct rdma_id_private *id_priv) if (ib_get_cached_port_state(cur_dev->device, p, &port_state)) continue; - for (i = 0; !rdma_query_gid(cur_dev->device, - p, i, &gid); - i++) { + + for (i = 0; i < cur_dev->device->port_data[p].immutable.gid_tbl_len; + ++i) { + ret = rdma_query_gid(cur_dev->device, p, i, + &gid); + if (ret) + continue; + if (!memcmp(&gid, dgid, sizeof(gid))) { cma_dev = cur_dev; sgid = gid; @@ -854,6 +860,7 @@ __rdma_create_id(struct net *net, rdma_cm_event_handler event_handler, init_completion(&id_priv->comp); refcount_set(&id_priv->refcount, 1); mutex_init(&id_priv->handler_mutex); + INIT_LIST_HEAD(&id_priv->device_item); INIT_LIST_HEAD(&id_priv->listen_list); INIT_LIST_HEAD(&id_priv->mc_list); get_random_bytes(&id_priv->seq_num, sizeof id_priv->seq_num); @@ -1647,7 +1654,7 @@ static struct rdma_id_private *cma_find_listener( return id_priv; list_for_each_entry(id_priv_dev, &id_priv->listen_list, - listen_list) { + listen_item) { if (id_priv_dev->id.device == cm_id->device && cma_match_net_dev(&id_priv_dev->id, net_dev, req)) @@ -1756,14 +1763,15 @@ static void _cma_cancel_listens(struct rdma_id_private *id_priv) * Remove from listen_any_list to prevent added devices from spawning * additional listen requests. */ - list_del(&id_priv->list); + list_del_init(&id_priv->listen_any_item); while (!list_empty(&id_priv->listen_list)) { - dev_id_priv = list_entry(id_priv->listen_list.next, - struct rdma_id_private, listen_list); + dev_id_priv = + list_first_entry(&id_priv->listen_list, + struct rdma_id_private, listen_item); /* sync with device removal to avoid duplicate destruction */ - list_del_init(&dev_id_priv->list); - list_del(&dev_id_priv->listen_list); + list_del_init(&dev_id_priv->device_item); + list_del_init(&dev_id_priv->listen_item); mutex_unlock(&lock); rdma_destroy_id(&dev_id_priv->id); @@ -1838,17 +1846,19 @@ static void destroy_mc(struct rdma_id_private *id_priv, if (dev_addr->bound_dev_if) ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if); - if (ndev) { + if (ndev && !send_only) { + enum ib_gid_type gid_type; union ib_gid mgid; - cma_set_mgid(id_priv, (struct sockaddr *)&mc->addr, - &mgid); - - if (!send_only) - cma_igmp_send(ndev, &mgid, false); - - dev_put(ndev); + gid_type = id_priv->cma_dev->default_gid_type + [id_priv->id.port_num - + rdma_start_port( + id_priv->cma_dev->device)]; + cma_iboe_set_mgid((struct sockaddr *)&mc->addr, &mgid, + gid_type); + cma_igmp_send(ndev, &mgid, false); } + dev_put(ndev); cancel_work_sync(&mc->iboe_join.work); } @@ -2564,7 +2574,7 @@ static int cma_listen_on_dev(struct rdma_id_private *id_priv, ret = rdma_listen(&dev_id_priv->id, id_priv->backlog); if (ret) goto err_listen; - list_add_tail(&dev_id_priv->listen_list, &id_priv->listen_list); + list_add_tail(&dev_id_priv->listen_item, &id_priv->listen_list); return 0; err_listen: /* Caller must destroy this after releasing lock */ @@ -2580,13 +2590,13 @@ static int cma_listen_on_all(struct rdma_id_private *id_priv) int ret; mutex_lock(&lock); - list_add_tail(&id_priv->list, &listen_any_list); + list_add_tail(&id_priv->listen_any_item, &listen_any_list); list_for_each_entry(cma_dev, &dev_list, list) { ret = cma_listen_on_dev(id_priv, cma_dev, &to_destroy); if (ret) { /* Prevent racing with cma_process_remove() */ if (to_destroy) - list_del_init(&to_destroy->list); + list_del_init(&to_destroy->device_item); goto err_listen; } } @@ -4031,8 +4041,7 @@ static int cma_resolve_ib_udp(struct rdma_id_private *id_priv, memset(&req, 0, sizeof req); offset = cma_user_data_offset(id_priv); - req.private_data_len = offset + conn_param->private_data_len; - if (req.private_data_len < conn_param->private_data_len) + if (check_add_overflow(offset, conn_param->private_data_len, &req.private_data_len)) return -EINVAL; if (req.private_data_len) { @@ -4091,8 +4100,7 @@ static int cma_connect_ib(struct rdma_id_private *id_priv, memset(&req, 0, sizeof req); offset = cma_user_data_offset(id_priv); - req.private_data_len = offset + conn_param->private_data_len; - if (req.private_data_len < conn_param->private_data_len) + if (check_add_overflow(offset, conn_param->private_data_len, &req.private_data_len)) return -EINVAL; if (req.private_data_len) { @@ -4895,7 +4903,7 @@ static int cma_netdev_callback(struct notifier_block *self, unsigned long event, mutex_lock(&lock); list_for_each_entry(cma_dev, &dev_list, list) - list_for_each_entry(id_priv, &cma_dev->id_list, list) { + list_for_each_entry(id_priv, &cma_dev->id_list, device_item) { ret = cma_netdev_change(ndev, id_priv); if (ret) goto out; @@ -4955,10 +4963,10 @@ static void cma_process_remove(struct cma_device *cma_dev) mutex_lock(&lock); while (!list_empty(&cma_dev->id_list)) { struct rdma_id_private *id_priv = list_first_entry( - &cma_dev->id_list, struct rdma_id_private, list); + &cma_dev->id_list, struct rdma_id_private, device_item); - list_del(&id_priv->listen_list); - list_del_init(&id_priv->list); + list_del_init(&id_priv->listen_item); + list_del_init(&id_priv->device_item); cma_id_get(id_priv); mutex_unlock(&lock); @@ -5035,7 +5043,7 @@ static int cma_add_one(struct ib_device *device) mutex_lock(&lock); list_add_tail(&cma_dev->list, &dev_list); - list_for_each_entry(id_priv, &listen_any_list, list) { + list_for_each_entry(id_priv, &listen_any_list, listen_any_item) { ret = cma_listen_on_dev(id_priv, cma_dev, &to_destroy); if (ret) goto free_listen; diff --git a/drivers/infiniband/core/cma_priv.h b/drivers/infiniband/core/cma_priv.h index f92f101ea981..757a0ef79872 100644 --- a/drivers/infiniband/core/cma_priv.h +++ b/drivers/infiniband/core/cma_priv.h @@ -55,8 +55,15 @@ struct rdma_id_private { struct rdma_bind_list *bind_list; struct hlist_node node; - struct list_head list; /* listen_any_list or cma_device.list */ - struct list_head listen_list; /* per device listens */ + union { + struct list_head device_item; /* On cma_device->id_list */ + struct list_head listen_any_item; /* On listen_any_list */ + }; + union { + /* On rdma_id_private->listen_list */ + struct list_head listen_item; + struct list_head listen_list; + }; struct cma_device *cma_dev; struct list_head mc_list; diff --git a/drivers/infiniband/core/counters.c b/drivers/infiniband/core/counters.c index df9e6c5e4ddf..af59486fe418 100644 --- a/drivers/infiniband/core/counters.c +++ b/drivers/infiniband/core/counters.c @@ -106,6 +106,38 @@ static int __rdma_counter_bind_qp(struct rdma_counter *counter, return ret; } +int rdma_counter_modify(struct ib_device *dev, u32 port, + unsigned int index, bool enable) +{ + struct rdma_hw_stats *stats; + int ret = 0; + + if (!dev->ops.modify_hw_stat) + return -EOPNOTSUPP; + + stats = ib_get_hw_stats_port(dev, port); + if (!stats || index >= stats->num_counters || + !(stats->descs[index].flags & IB_STAT_FLAG_OPTIONAL)) + return -EINVAL; + + mutex_lock(&stats->lock); + + if (enable != test_bit(index, stats->is_disabled)) + goto out; + + ret = dev->ops.modify_hw_stat(dev, port, index, enable); + if (ret) + goto out; + + if (enable) + clear_bit(index, stats->is_disabled); + else + set_bit(index, stats->is_disabled); +out: + mutex_unlock(&stats->lock); + return ret; +} + static struct rdma_counter *alloc_and_bind(struct ib_device *dev, u32 port, struct ib_qp *qp, enum rdma_nl_counter_mode mode) @@ -165,7 +197,7 @@ static struct rdma_counter *alloc_and_bind(struct ib_device *dev, u32 port, return counter; err_mode: - kfree(counter->stats); + rdma_free_hw_stats_struct(counter->stats); err_stats: rdma_restrack_put(&counter->res); kfree(counter); @@ -186,7 +218,7 @@ static void rdma_counter_free(struct rdma_counter *counter) mutex_unlock(&port_counter->lock); rdma_restrack_del(&counter->res); - kfree(counter->stats); + rdma_free_hw_stats_struct(counter->stats); kfree(counter); } @@ -618,7 +650,7 @@ void rdma_counter_init(struct ib_device *dev) fail: for (i = port; i >= rdma_start_port(dev); i--) { port_counter = &dev->port_data[port].port_counter; - kfree(port_counter->hstats); + rdma_free_hw_stats_struct(port_counter->hstats); port_counter->hstats = NULL; mutex_destroy(&port_counter->lock); } @@ -631,7 +663,7 @@ void rdma_counter_release(struct ib_device *dev) rdma_for_each_port(dev, port) { port_counter = &dev->port_data[port].port_counter; - kfree(port_counter->hstats); + rdma_free_hw_stats_struct(port_counter->hstats); mutex_destroy(&port_counter->lock); } } diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index f4814bb7f082..a311df07b1bd 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -2461,7 +2461,8 @@ int ib_find_gid(struct ib_device *device, union ib_gid *gid, ++i) { ret = rdma_query_gid(device, port, i, &tmp_gid); if (ret) - return ret; + continue; + if (!memcmp(&tmp_gid, gid, sizeof *gid)) { *port_num = port; if (index) @@ -2676,6 +2677,7 @@ void ib_set_device_ops(struct ib_device *dev, const struct ib_device_ops *ops) SET_DEVICE_OP(dev_ops, modify_cq); SET_DEVICE_OP(dev_ops, modify_device); SET_DEVICE_OP(dev_ops, modify_flow_action_esp); + SET_DEVICE_OP(dev_ops, modify_hw_stat); SET_DEVICE_OP(dev_ops, modify_port); SET_DEVICE_OP(dev_ops, modify_qp); SET_DEVICE_OP(dev_ops, modify_srq); diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c index 54f4feb604d8..358a2db38d23 100644 --- a/drivers/infiniband/core/iwpm_util.c +++ b/drivers/infiniband/core/iwpm_util.c @@ -762,7 +762,7 @@ int iwpm_send_hello(u8 nl_client, int iwpm_pid, u16 abi_version) { struct sk_buff *skb = NULL; struct nlmsghdr *nlh; - const char *err_str = ""; + const char *err_str; int ret = -EINVAL; skb = iwpm_create_nlmsg(RDMA_NL_IWPM_HELLO, &nlh, nl_client); diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c index e9b4b2cccaa0..f5aacaf7fb8e 100644 --- a/drivers/infiniband/core/nldev.c +++ b/drivers/infiniband/core/nldev.c @@ -154,6 +154,8 @@ static const struct nla_policy nldev_policy[RDMA_NLDEV_ATTR_MAX] = { [RDMA_NLDEV_NET_NS_FD] = { .type = NLA_U32 }, [RDMA_NLDEV_SYS_ATTR_NETNS_MODE] = { .type = NLA_U8 }, [RDMA_NLDEV_SYS_ATTR_COPY_ON_FORK] = { .type = NLA_U8 }, + [RDMA_NLDEV_ATTR_STAT_HWCOUNTER_INDEX] = { .type = NLA_U32 }, + [RDMA_NLDEV_ATTR_STAT_HWCOUNTER_DYNAMIC] = { .type = NLA_U8 }, }; static int put_driver_name_print_type(struct sk_buff *msg, const char *name, @@ -968,14 +970,21 @@ static int fill_stat_counter_hwcounters(struct sk_buff *msg, if (!table_attr) return -EMSGSIZE; - for (i = 0; i < st->num_counters; i++) - if (rdma_nl_stat_hwcounter_entry(msg, st->names[i], st->value[i])) + mutex_lock(&st->lock); + for (i = 0; i < st->num_counters; i++) { + if (test_bit(i, st->is_disabled)) + continue; + if (rdma_nl_stat_hwcounter_entry(msg, st->descs[i].name, + st->value[i])) goto err; + } + mutex_unlock(&st->lock); nla_nest_end(msg, table_attr); return 0; err: + mutex_unlock(&st->lock); nla_nest_cancel(msg, table_attr); return -EMSGSIZE; } @@ -1888,24 +1897,112 @@ static int nldev_set_sys_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh, return err; } +static int nldev_stat_set_mode_doit(struct sk_buff *msg, + struct netlink_ext_ack *extack, + struct nlattr *tb[], + struct ib_device *device, u32 port) +{ + u32 mode, mask = 0, qpn, cntn = 0; + int ret; + + /* Currently only counter for QP is supported */ + if (!tb[RDMA_NLDEV_ATTR_STAT_RES] || + nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES]) != RDMA_NLDEV_ATTR_RES_QP) + return -EINVAL; + + mode = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_MODE]); + if (mode == RDMA_COUNTER_MODE_AUTO) { + if (tb[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK]) + mask = nla_get_u32( + tb[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK]); + return rdma_counter_set_auto_mode(device, port, mask, extack); + } + + if (!tb[RDMA_NLDEV_ATTR_RES_LQPN]) + return -EINVAL; + + qpn = nla_get_u32(tb[RDMA_NLDEV_ATTR_RES_LQPN]); + if (tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]) { + cntn = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]); + ret = rdma_counter_bind_qpn(device, port, qpn, cntn); + if (ret) + return ret; + } else { + ret = rdma_counter_bind_qpn_alloc(device, port, qpn, &cntn); + if (ret) + return ret; + } + + if (nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, cntn) || + nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn)) { + ret = -EMSGSIZE; + goto err_fill; + } + + return 0; + +err_fill: + rdma_counter_unbind_qpn(device, port, qpn, cntn); + return ret; +} + +static int nldev_stat_set_counter_dynamic_doit(struct nlattr *tb[], + struct ib_device *device, + u32 port) +{ + struct rdma_hw_stats *stats; + int rem, i, index, ret = 0; + struct nlattr *entry_attr; + unsigned long *target; + + stats = ib_get_hw_stats_port(device, port); + if (!stats) + return -EINVAL; + + target = kcalloc(BITS_TO_LONGS(stats->num_counters), + sizeof(*stats->is_disabled), GFP_KERNEL); + if (!target) + return -ENOMEM; + + nla_for_each_nested(entry_attr, tb[RDMA_NLDEV_ATTR_STAT_HWCOUNTERS], + rem) { + index = nla_get_u32(entry_attr); + if ((index >= stats->num_counters) || + !(stats->descs[index].flags & IB_STAT_FLAG_OPTIONAL)) { + ret = -EINVAL; + goto out; + } + + set_bit(index, target); + } + + for (i = 0; i < stats->num_counters; i++) { + if (!(stats->descs[i].flags & IB_STAT_FLAG_OPTIONAL)) + continue; + + ret = rdma_counter_modify(device, port, i, test_bit(i, target)); + if (ret) + goto out; + } + +out: + kfree(target); + return ret; +} + static int nldev_stat_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh, struct netlink_ext_ack *extack) { - u32 index, port, mode, mask = 0, qpn, cntn = 0; struct nlattr *tb[RDMA_NLDEV_ATTR_MAX]; struct ib_device *device; struct sk_buff *msg; + u32 index, port; int ret; - ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, - nldev_policy, extack); - /* Currently only counter for QP is supported */ - if (ret || !tb[RDMA_NLDEV_ATTR_STAT_RES] || - !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || - !tb[RDMA_NLDEV_ATTR_PORT_INDEX] || !tb[RDMA_NLDEV_ATTR_STAT_MODE]) - return -EINVAL; - - if (nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_RES]) != RDMA_NLDEV_ATTR_RES_QP) + ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, nldev_policy, + extack); + if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || + !tb[RDMA_NLDEV_ATTR_PORT_INDEX]) return -EINVAL; index = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); @@ -1916,59 +2013,49 @@ static int nldev_stat_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh, port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); if (!rdma_is_port_valid(device, port)) { ret = -EINVAL; - goto err; + goto err_put_device; + } + + if (!tb[RDMA_NLDEV_ATTR_STAT_MODE] && + !tb[RDMA_NLDEV_ATTR_STAT_HWCOUNTERS]) { + ret = -EINVAL; + goto err_put_device; } msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) { ret = -ENOMEM; - goto err; + goto err_put_device; } nlh = nlmsg_put(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_STAT_SET), 0, 0); + if (fill_nldev_handle(msg, device) || + nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port)) { + ret = -EMSGSIZE; + goto err_free_msg; + } - mode = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_MODE]); - if (mode == RDMA_COUNTER_MODE_AUTO) { - if (tb[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK]) - mask = nla_get_u32( - tb[RDMA_NLDEV_ATTR_STAT_AUTO_MODE_MASK]); - ret = rdma_counter_set_auto_mode(device, port, mask, extack); + if (tb[RDMA_NLDEV_ATTR_STAT_MODE]) { + ret = nldev_stat_set_mode_doit(msg, extack, tb, device, port); if (ret) - goto err_msg; - } else { - if (!tb[RDMA_NLDEV_ATTR_RES_LQPN]) - goto err_msg; - qpn = nla_get_u32(tb[RDMA_NLDEV_ATTR_RES_LQPN]); - if (tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]) { - cntn = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]); - ret = rdma_counter_bind_qpn(device, port, qpn, cntn); - } else { - ret = rdma_counter_bind_qpn_alloc(device, port, - qpn, &cntn); - } - if (ret) - goto err_msg; + goto err_free_msg; + } - if (fill_nldev_handle(msg, device) || - nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port) || - nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_COUNTER_ID, cntn) || - nla_put_u32(msg, RDMA_NLDEV_ATTR_RES_LQPN, qpn)) { - ret = -EMSGSIZE; - goto err_fill; - } + if (tb[RDMA_NLDEV_ATTR_STAT_HWCOUNTERS]) { + ret = nldev_stat_set_counter_dynamic_doit(tb, device, port); + if (ret) + goto err_free_msg; } nlmsg_end(msg, nlh); ib_device_put(device); return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); -err_fill: - rdma_counter_unbind_qpn(device, port, qpn, cntn); -err_msg: +err_free_msg: nlmsg_free(msg); -err: +err_put_device: ib_device_put(device); return ret; } @@ -2103,9 +2190,13 @@ static int stat_get_doit_default_counter(struct sk_buff *skb, goto err_stats; } for (i = 0; i < num_cnts; i++) { + if (test_bit(i, stats->is_disabled)) + continue; + v = stats->value[i] + rdma_counter_get_hwstat_value(device, port, i); - if (rdma_nl_stat_hwcounter_entry(msg, stats->names[i], v)) { + if (rdma_nl_stat_hwcounter_entry(msg, + stats->descs[i].name, v)) { ret = -EMSGSIZE; goto err_table; } @@ -2253,6 +2344,99 @@ static int nldev_stat_get_dumpit(struct sk_buff *skb, return ret; } +static int nldev_stat_get_counter_status_doit(struct sk_buff *skb, + struct nlmsghdr *nlh, + struct netlink_ext_ack *extack) +{ + struct nlattr *tb[RDMA_NLDEV_ATTR_MAX], *table, *entry; + struct rdma_hw_stats *stats; + struct ib_device *device; + struct sk_buff *msg; + u32 devid, port; + int ret, i; + + ret = nlmsg_parse(nlh, 0, tb, RDMA_NLDEV_ATTR_MAX - 1, + nldev_policy, extack); + if (ret || !tb[RDMA_NLDEV_ATTR_DEV_INDEX] || + !tb[RDMA_NLDEV_ATTR_PORT_INDEX]) + return -EINVAL; + + devid = nla_get_u32(tb[RDMA_NLDEV_ATTR_DEV_INDEX]); + device = ib_device_get_by_index(sock_net(skb->sk), devid); + if (!device) + return -EINVAL; + + port = nla_get_u32(tb[RDMA_NLDEV_ATTR_PORT_INDEX]); + if (!rdma_is_port_valid(device, port)) { + ret = -EINVAL; + goto err; + } + + stats = ib_get_hw_stats_port(device, port); + if (!stats) { + ret = -EINVAL; + goto err; + } + + msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); + if (!msg) { + ret = -ENOMEM; + goto err; + } + + nlh = nlmsg_put( + msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, + RDMA_NL_GET_TYPE(RDMA_NL_NLDEV, RDMA_NLDEV_CMD_STAT_GET_STATUS), + 0, 0); + + ret = -EMSGSIZE; + if (fill_nldev_handle(msg, device) || + nla_put_u32(msg, RDMA_NLDEV_ATTR_PORT_INDEX, port)) + goto err_msg; + + table = nla_nest_start(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTERS); + if (!table) + goto err_msg; + + mutex_lock(&stats->lock); + for (i = 0; i < stats->num_counters; i++) { + entry = nla_nest_start(msg, + RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY); + if (!entry) + goto err_msg_table; + + if (nla_put_string(msg, + RDMA_NLDEV_ATTR_STAT_HWCOUNTER_ENTRY_NAME, + stats->descs[i].name) || + nla_put_u32(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_INDEX, i)) + goto err_msg_entry; + + if ((stats->descs[i].flags & IB_STAT_FLAG_OPTIONAL) && + (nla_put_u8(msg, RDMA_NLDEV_ATTR_STAT_HWCOUNTER_DYNAMIC, + !test_bit(i, stats->is_disabled)))) + goto err_msg_entry; + + nla_nest_end(msg, entry); + } + mutex_unlock(&stats->lock); + + nla_nest_end(msg, table); + nlmsg_end(msg, nlh); + ib_device_put(device); + return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid); + +err_msg_entry: + nla_nest_cancel(msg, entry); +err_msg_table: + mutex_unlock(&stats->lock); + nla_nest_cancel(msg, table); +err_msg: + nlmsg_free(msg); +err: + ib_device_put(device); + return ret; +} + static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = { [RDMA_NLDEV_CMD_GET] = { .doit = nldev_get_doit, @@ -2342,6 +2526,9 @@ static const struct rdma_nl_cbs nldev_cb_table[RDMA_NLDEV_NUM_OPS] = { .dump = nldev_res_get_mr_raw_dumpit, .flags = RDMA_NL_ADMIN_PERM, }, + [RDMA_NLDEV_CMD_STAT_GET_STATUS] = { + .doit = nldev_stat_get_counter_status_doit, + }, }; void __init nldev_init(void) diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c index 5221cce65675..5a3bd41b331c 100644 --- a/drivers/infiniband/core/rw.c +++ b/drivers/infiniband/core/rw.c @@ -282,15 +282,22 @@ static void rdma_rw_unmap_sg(struct ib_device *dev, struct scatterlist *sg, ib_dma_unmap_sg(dev, sg, sg_cnt, dir); } -static int rdma_rw_map_sg(struct ib_device *dev, struct scatterlist *sg, - u32 sg_cnt, enum dma_data_direction dir) +static int rdma_rw_map_sgtable(struct ib_device *dev, struct sg_table *sgt, + enum dma_data_direction dir) { - if (is_pci_p2pdma_page(sg_page(sg))) { + int nents; + + if (is_pci_p2pdma_page(sg_page(sgt->sgl))) { if (WARN_ON_ONCE(ib_uses_virt_dma(dev))) return 0; - return pci_p2pdma_map_sg(dev->dma_device, sg, sg_cnt, dir); + nents = pci_p2pdma_map_sg(dev->dma_device, sgt->sgl, + sgt->orig_nents, dir); + if (!nents) + return -EIO; + sgt->nents = nents; + return 0; } - return ib_dma_map_sg(dev, sg, sg_cnt, dir); + return ib_dma_map_sgtable_attrs(dev, sgt, dir, 0); } /** @@ -313,12 +320,16 @@ int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num, u64 remote_addr, u32 rkey, enum dma_data_direction dir) { struct ib_device *dev = qp->pd->device; + struct sg_table sgt = { + .sgl = sg, + .orig_nents = sg_cnt, + }; int ret; - ret = rdma_rw_map_sg(dev, sg, sg_cnt, dir); - if (!ret) - return -ENOMEM; - sg_cnt = ret; + ret = rdma_rw_map_sgtable(dev, &sgt, dir); + if (ret) + return ret; + sg_cnt = sgt.nents; /* * Skip to the S/G entry that sg_offset falls into: @@ -354,7 +365,7 @@ int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u32 port_num, return ret; out_unmap_sg: - rdma_rw_unmap_sg(dev, sg, sg_cnt, dir); + rdma_rw_unmap_sg(dev, sgt.sgl, sgt.orig_nents, dir); return ret; } EXPORT_SYMBOL(rdma_rw_ctx_init); @@ -385,6 +396,14 @@ int rdma_rw_ctx_signature_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, struct ib_device *dev = qp->pd->device; u32 pages_per_mr = rdma_rw_fr_page_list_len(qp->pd->device, qp->integrity_en); + struct sg_table sgt = { + .sgl = sg, + .orig_nents = sg_cnt, + }; + struct sg_table prot_sgt = { + .sgl = prot_sg, + .orig_nents = prot_sg_cnt, + }; struct ib_rdma_wr *rdma_wr; int count = 0, ret; @@ -394,18 +413,14 @@ int rdma_rw_ctx_signature_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, return -EINVAL; } - ret = rdma_rw_map_sg(dev, sg, sg_cnt, dir); - if (!ret) - return -ENOMEM; - sg_cnt = ret; + ret = rdma_rw_map_sgtable(dev, &sgt, dir); + if (ret) + return ret; if (prot_sg_cnt) { - ret = rdma_rw_map_sg(dev, prot_sg, prot_sg_cnt, dir); - if (!ret) { - ret = -ENOMEM; + ret = rdma_rw_map_sgtable(dev, &prot_sgt, dir); + if (ret) goto out_unmap_sg; - } - prot_sg_cnt = ret; } ctx->type = RDMA_RW_SIG_MR; @@ -426,10 +441,11 @@ int rdma_rw_ctx_signature_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, memcpy(ctx->reg->mr->sig_attrs, sig_attrs, sizeof(struct ib_sig_attrs)); - ret = ib_map_mr_sg_pi(ctx->reg->mr, sg, sg_cnt, NULL, prot_sg, - prot_sg_cnt, NULL, SZ_4K); + ret = ib_map_mr_sg_pi(ctx->reg->mr, sg, sgt.nents, NULL, prot_sg, + prot_sgt.nents, NULL, SZ_4K); if (unlikely(ret)) { - pr_err("failed to map PI sg (%u)\n", sg_cnt + prot_sg_cnt); + pr_err("failed to map PI sg (%u)\n", + sgt.nents + prot_sgt.nents); goto out_destroy_sig_mr; } @@ -468,10 +484,10 @@ out_destroy_sig_mr: out_free_ctx: kfree(ctx->reg); out_unmap_prot_sg: - if (prot_sg_cnt) - rdma_rw_unmap_sg(dev, prot_sg, prot_sg_cnt, dir); + if (prot_sgt.nents) + rdma_rw_unmap_sg(dev, prot_sgt.sgl, prot_sgt.orig_nents, dir); out_unmap_sg: - rdma_rw_unmap_sg(dev, sg, sg_cnt, dir); + rdma_rw_unmap_sg(dev, sgt.sgl, sgt.orig_nents, dir); return ret; } EXPORT_SYMBOL(rdma_rw_ctx_signature_init); diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index c00f8e28aab7..74ecd7456a11 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c @@ -2262,7 +2262,6 @@ err1: void ib_sa_cleanup(void) { cancel_delayed_work(&ib_nl_timed_work); - flush_workqueue(ib_nl_wq); destroy_workqueue(ib_nl_wq); mcast_cleanup(); ib_unregister_client(&sa_client); diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c index 6146c3c1cbe5..84c53bd2a52d 100644 --- a/drivers/infiniband/core/sysfs.c +++ b/drivers/infiniband/core/sysfs.c @@ -433,6 +433,7 @@ static struct attribute *port_default_attrs[] = { &ib_port_attr_link_layer.attr, NULL }; +ATTRIBUTE_GROUPS(port_default); static ssize_t print_ndev(const struct ib_gid_attr *gid_attr, char *buf) { @@ -755,9 +756,9 @@ static void ib_port_release(struct kobject *kobj) for (i = 0; i != ARRAY_SIZE(port->groups); i++) kfree(port->groups[i].attrs); if (port->hw_stats_data) - kfree(port->hw_stats_data->stats); + rdma_free_hw_stats_struct(port->hw_stats_data->stats); kfree(port->hw_stats_data); - kfree(port); + kvfree(port); } static void ib_port_gid_attr_release(struct kobject *kobj) @@ -774,7 +775,7 @@ static void ib_port_gid_attr_release(struct kobject *kobj) static struct kobj_type port_type = { .release = ib_port_release, .sysfs_ops = &port_sysfs_ops, - .default_attrs = port_default_attrs + .default_groups = port_default_groups, }; static struct kobj_type gid_attr_type = { @@ -895,7 +896,7 @@ alloc_hw_stats_device(struct ib_device *ibdev) stats = ibdev->ops.alloc_hw_device_stats(ibdev); if (!stats) return ERR_PTR(-ENOMEM); - if (!stats->names || stats->num_counters <= 0) + if (!stats->descs || stats->num_counters <= 0) goto err_free_stats; /* @@ -911,7 +912,6 @@ alloc_hw_stats_device(struct ib_device *ibdev) if (!data->group.attrs) goto err_free_data; - mutex_init(&stats->lock); data->group.name = "hw_counters"; data->stats = stats; return data; @@ -919,14 +919,14 @@ alloc_hw_stats_device(struct ib_device *ibdev) err_free_data: kfree(data); err_free_stats: - kfree(stats); + rdma_free_hw_stats_struct(stats); return ERR_PTR(-ENOMEM); } void ib_device_release_hw_stats(struct hw_stats_device_data *data) { kfree(data->group.attrs); - kfree(data->stats); + rdma_free_hw_stats_struct(data->stats); kfree(data); } @@ -934,7 +934,8 @@ int ib_setup_device_attrs(struct ib_device *ibdev) { struct hw_stats_device_attribute *attr; struct hw_stats_device_data *data; - int i, ret; + bool opstat_skipped = false; + int i, ret, pos = 0; data = alloc_hw_stats_device(ibdev); if (IS_ERR(data)) { @@ -955,16 +956,23 @@ int ib_setup_device_attrs(struct ib_device *ibdev) data->stats->timestamp = jiffies; for (i = 0; i < data->stats->num_counters; i++) { - attr = &data->attrs[i]; + if (data->stats->descs[i].flags & IB_STAT_FLAG_OPTIONAL) { + opstat_skipped = true; + continue; + } + + WARN_ON(opstat_skipped); + attr = &data->attrs[pos]; sysfs_attr_init(&attr->attr.attr); - attr->attr.attr.name = data->stats->names[i]; + attr->attr.attr.name = data->stats->descs[i].name; attr->attr.attr.mode = 0444; attr->attr.show = hw_stat_device_show; attr->show = show_hw_stats; - data->group.attrs[i] = &attr->attr.attr; + data->group.attrs[pos] = &attr->attr.attr; + pos++; } - attr = &data->attrs[i]; + attr = &data->attrs[pos]; sysfs_attr_init(&attr->attr.attr); attr->attr.attr.name = "lifespan"; attr->attr.attr.mode = 0644; @@ -972,7 +980,7 @@ int ib_setup_device_attrs(struct ib_device *ibdev) attr->show = show_stats_lifespan; attr->attr.store = hw_stat_device_store; attr->store = set_stats_lifespan; - data->group.attrs[i] = &attr->attr.attr; + data->group.attrs[pos] = &attr->attr.attr; for (i = 0; i != ARRAY_SIZE(ibdev->groups); i++) if (!ibdev->groups[i]) { ibdev->groups[i] = &data->group; @@ -994,7 +1002,7 @@ alloc_hw_stats_port(struct ib_port *port, struct attribute_group *group) stats = ibdev->ops.alloc_hw_port_stats(port->ibdev, port->port_num); if (!stats) return ERR_PTR(-ENOMEM); - if (!stats->names || stats->num_counters <= 0) + if (!stats->descs || stats->num_counters <= 0) goto err_free_stats; /* @@ -1010,7 +1018,6 @@ alloc_hw_stats_port(struct ib_port *port, struct attribute_group *group) if (!group->attrs) goto err_free_data; - mutex_init(&stats->lock); group->name = "hw_counters"; data->stats = stats; return data; @@ -1018,7 +1025,7 @@ alloc_hw_stats_port(struct ib_port *port, struct attribute_group *group) err_free_data: kfree(data); err_free_stats: - kfree(stats); + rdma_free_hw_stats_struct(stats); return ERR_PTR(-ENOMEM); } @@ -1027,7 +1034,8 @@ static int setup_hw_port_stats(struct ib_port *port, { struct hw_stats_port_attribute *attr; struct hw_stats_port_data *data; - int i, ret; + bool opstat_skipped = false; + int i, ret, pos = 0; data = alloc_hw_stats_port(port, group); if (IS_ERR(data)) @@ -1045,16 +1053,23 @@ static int setup_hw_port_stats(struct ib_port *port, data->stats->timestamp = jiffies; for (i = 0; i < data->stats->num_counters; i++) { - attr = &data->attrs[i]; + if (data->stats->descs[i].flags & IB_STAT_FLAG_OPTIONAL) { + opstat_skipped = true; + continue; + } + + WARN_ON(opstat_skipped); + attr = &data->attrs[pos]; sysfs_attr_init(&attr->attr.attr); - attr->attr.attr.name = data->stats->names[i]; + attr->attr.attr.name = data->stats->descs[i].name; attr->attr.attr.mode = 0444; attr->attr.show = hw_stat_port_show; attr->show = show_hw_stats; - group->attrs[i] = &attr->attr.attr; + group->attrs[pos] = &attr->attr.attr; + pos++; } - attr = &data->attrs[i]; + attr = &data->attrs[pos]; sysfs_attr_init(&attr->attr.attr); attr->attr.attr.name = "lifespan"; attr->attr.attr.mode = 0644; @@ -1062,7 +1077,7 @@ static int setup_hw_port_stats(struct ib_port *port, attr->show = show_stats_lifespan; attr->attr.store = hw_stat_port_store; attr->store = set_stats_lifespan; - group->attrs[i] = &attr->attr.attr; + group->attrs[pos] = &attr->attr.attr; port->hw_stats_data = data; return 0; @@ -1189,7 +1204,7 @@ static struct ib_port *setup_port(struct ib_core_device *coredev, int port_num, struct ib_port *p; int ret; - p = kzalloc(struct_size(p, attrs_list, + p = kvzalloc(struct_size(p, attrs_list, attr->gid_tbl_len + attr->pkey_tbl_len), GFP_KERNEL); if (!p) diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index 2b72c4fa9550..9d6ac9dff39a 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -95,6 +95,7 @@ struct ucma_context { u64 uid; struct list_head list; + struct list_head mc_list; struct work_struct close_work; }; @@ -105,6 +106,7 @@ struct ucma_multicast { u64 uid; u8 join_state; + struct list_head list; struct sockaddr_storage addr; }; @@ -198,6 +200,7 @@ static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file) INIT_WORK(&ctx->close_work, ucma_close_id); init_completion(&ctx->comp); + INIT_LIST_HEAD(&ctx->mc_list); /* So list_del() will work if we don't do ucma_finish_ctx() */ INIT_LIST_HEAD(&ctx->list); ctx->file = file; @@ -484,19 +487,19 @@ err1: static void ucma_cleanup_multicast(struct ucma_context *ctx) { - struct ucma_multicast *mc; - unsigned long index; + struct ucma_multicast *mc, *tmp; - xa_for_each(&multicast_table, index, mc) { - if (mc->ctx != ctx) - continue; + xa_lock(&multicast_table); + list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) { + list_del(&mc->list); /* * At this point mc->ctx->ref is 0 so the mc cannot leave the * lock on the reader and this is enough serialization */ - xa_erase(&multicast_table, index); + __xa_erase(&multicast_table, mc->id); kfree(mc); } + xa_unlock(&multicast_table); } static void ucma_cleanup_mc_events(struct ucma_multicast *mc) @@ -1469,12 +1472,16 @@ static ssize_t ucma_process_join(struct ucma_file *file, mc->uid = cmd->uid; memcpy(&mc->addr, addr, cmd->addr_size); - if (xa_alloc(&multicast_table, &mc->id, NULL, xa_limit_32b, + xa_lock(&multicast_table); + if (__xa_alloc(&multicast_table, &mc->id, NULL, xa_limit_32b, GFP_KERNEL)) { ret = -ENOMEM; goto err_free_mc; } + list_add_tail(&mc->list, &ctx->mc_list); + xa_unlock(&multicast_table); + mutex_lock(&ctx->mutex); ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *)&mc->addr, join_state, mc); @@ -1500,8 +1507,11 @@ err_leave_multicast: mutex_unlock(&ctx->mutex); ucma_cleanup_mc_events(mc); err_xa_erase: - xa_erase(&multicast_table, mc->id); + xa_lock(&multicast_table); + list_del(&mc->list); + __xa_erase(&multicast_table, mc->id); err_free_mc: + xa_unlock(&multicast_table); kfree(mc); err_put_ctx: ucma_put_ctx(ctx); @@ -1569,15 +1579,17 @@ static ssize_t ucma_leave_multicast(struct ucma_file *file, mc = ERR_PTR(-EINVAL); else if (!refcount_inc_not_zero(&mc->ctx->ref)) mc = ERR_PTR(-ENXIO); - else - __xa_erase(&multicast_table, mc->id); - xa_unlock(&multicast_table); if (IS_ERR(mc)) { + xa_unlock(&multicast_table); ret = PTR_ERR(mc); goto out; } + list_del(&mc->list); + __xa_erase(&multicast_table, mc->id); + xa_unlock(&multicast_table); + mutex_lock(&mc->ctx->mutex); rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr); mutex_unlock(&mc->ctx->mutex); diff --git a/drivers/infiniband/core/umem_dmabuf.c b/drivers/infiniband/core/umem_dmabuf.c index e824baf4640d..f0760741f281 100644 --- a/drivers/infiniband/core/umem_dmabuf.c +++ b/drivers/infiniband/core/umem_dmabuf.c @@ -6,9 +6,12 @@ #include <linux/dma-buf.h> #include <linux/dma-resv.h> #include <linux/dma-mapping.h> +#include <linux/module.h> #include "uverbs.h" +MODULE_IMPORT_NS(DMA_BUF); + int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf) { struct sg_table *sgt; @@ -163,12 +166,63 @@ out_release_dmabuf: } EXPORT_SYMBOL(ib_umem_dmabuf_get); +static void +ib_umem_dmabuf_unsupported_move_notify(struct dma_buf_attachment *attach) +{ + struct ib_umem_dmabuf *umem_dmabuf = attach->importer_priv; + + ibdev_warn_ratelimited(umem_dmabuf->umem.ibdev, + "Invalidate callback should not be called when memory is pinned\n"); +} + +static struct dma_buf_attach_ops ib_umem_dmabuf_attach_pinned_ops = { + .allow_peer2peer = true, + .move_notify = ib_umem_dmabuf_unsupported_move_notify, +}; + +struct ib_umem_dmabuf *ib_umem_dmabuf_get_pinned(struct ib_device *device, + unsigned long offset, + size_t size, int fd, + int access) +{ + struct ib_umem_dmabuf *umem_dmabuf; + int err; + + umem_dmabuf = ib_umem_dmabuf_get(device, offset, size, fd, access, + &ib_umem_dmabuf_attach_pinned_ops); + if (IS_ERR(umem_dmabuf)) + return umem_dmabuf; + + dma_resv_lock(umem_dmabuf->attach->dmabuf->resv, NULL); + err = dma_buf_pin(umem_dmabuf->attach); + if (err) + goto err_release; + umem_dmabuf->pinned = 1; + + err = ib_umem_dmabuf_map_pages(umem_dmabuf); + if (err) + goto err_unpin; + dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv); + + return umem_dmabuf; + +err_unpin: + dma_buf_unpin(umem_dmabuf->attach); +err_release: + dma_resv_unlock(umem_dmabuf->attach->dmabuf->resv); + ib_umem_release(&umem_dmabuf->umem); + return ERR_PTR(err); +} +EXPORT_SYMBOL(ib_umem_dmabuf_get_pinned); + void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf) { struct dma_buf *dmabuf = umem_dmabuf->attach->dmabuf; dma_resv_lock(dmabuf->resv, NULL); ib_umem_dmabuf_unmap_pages(umem_dmabuf); + if (umem_dmabuf->pinned) + dma_buf_unpin(umem_dmabuf->attach); dma_resv_unlock(dmabuf->resv); dma_buf_detach(dmabuf, umem_dmabuf->attach); diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c index 7a47343d11f9..aead24c1a682 100644 --- a/drivers/infiniband/core/umem_odp.c +++ b/drivers/infiniband/core/umem_odp.c @@ -227,7 +227,6 @@ struct ib_umem_odp *ib_umem_odp_get(struct ib_device *device, const struct mmu_interval_notifier_ops *ops) { struct ib_umem_odp *umem_odp; - struct mm_struct *mm; int ret; if (WARN_ON_ONCE(!(access & IB_ACCESS_ON_DEMAND))) @@ -241,7 +240,7 @@ struct ib_umem_odp *ib_umem_odp_get(struct ib_device *device, umem_odp->umem.length = size; umem_odp->umem.address = addr; umem_odp->umem.writable = ib_access_writable(access); - umem_odp->umem.owning_mm = mm = current->mm; + umem_odp->umem.owning_mm = current->mm; umem_odp->notifier.ops = ops; umem_odp->page_shift = PAGE_SHIFT; diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 740e6b2efe0e..6b6393176b3c 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -837,11 +837,8 @@ static int ib_uverbs_rereg_mr(struct uverbs_attr_bundle *attrs) new_mr->device = new_pd->device; new_mr->pd = new_pd; new_mr->type = IB_MR_TYPE_USER; - new_mr->dm = NULL; - new_mr->sig_attrs = NULL; new_mr->uobject = uobj; atomic_inc(&new_pd->usecnt); - new_mr->iova = cmd.hca_va; new_uobj->object = new_mr; rdma_restrack_new(&new_mr->res, RDMA_RESTRACK_MR); @@ -1402,7 +1399,6 @@ static int create_qp(struct uverbs_attr_bundle *attrs, attr.sq_sig_type = cmd->sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR; attr.qp_type = cmd->qp_type; - attr.create_flags = 0; attr.cap.max_send_wr = cmd->max_send_wr; attr.cap.max_recv_wr = cmd->max_recv_wr; diff --git a/drivers/infiniband/core/uverbs_marshall.c b/drivers/infiniband/core/uverbs_marshall.c index b8d715c68ca4..11a080646916 100644 --- a/drivers/infiniband/core/uverbs_marshall.c +++ b/drivers/infiniband/core/uverbs_marshall.c @@ -66,7 +66,7 @@ void ib_copy_ah_attr_to_user(struct ib_device *device, struct rdma_ah_attr *src = ah_attr; struct rdma_ah_attr conv_ah; - memset(&dst->grh.reserved, 0, sizeof(dst->grh.reserved)); + memset(&dst->grh, 0, sizeof(dst->grh)); if ((ah_attr->type == RDMA_AH_ATTR_TYPE_OPA) && (rdma_ah_get_dlid(ah_attr) > be16_to_cpu(IB_LID_PERMISSIVE)) && diff --git a/drivers/infiniband/core/uverbs_uapi.c b/drivers/infiniband/core/uverbs_uapi.c index 2f2c7646fce1..a02916a3a79c 100644 --- a/drivers/infiniband/core/uverbs_uapi.c +++ b/drivers/infiniband/core/uverbs_uapi.c @@ -447,6 +447,9 @@ static int uapi_finalize(struct uverbs_api *uapi) uapi->num_write_ex = max_write_ex + 1; data = kmalloc_array(uapi->num_write + uapi->num_write_ex, sizeof(*uapi->write_methods), GFP_KERNEL); + if (!data) + return -ENOMEM; + for (i = 0; i != uapi->num_write + uapi->num_write_ex; i++) data[i] = &uapi->notsupp_method; uapi->write_methods = data; diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 89a2b21976d6..c18634bec212 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -1232,6 +1232,9 @@ static struct ib_qp *create_qp(struct ib_device *dev, struct ib_pd *pd, INIT_LIST_HEAD(&qp->rdma_mrs); INIT_LIST_HEAD(&qp->sig_mrs); + qp->send_cq = attr->send_cq; + qp->recv_cq = attr->recv_cq; + rdma_restrack_new(&qp->res, RDMA_RESTRACK_QP); WARN_ONCE(!udata && !caller, "Missing kernel QP owner"); rdma_restrack_set_name(&qp->res, udata ? NULL : caller); @@ -2976,3 +2979,52 @@ bool __rdma_block_iter_next(struct ib_block_iter *biter) return true; } EXPORT_SYMBOL(__rdma_block_iter_next); + +/** + * rdma_alloc_hw_stats_struct - Helper function to allocate dynamic struct + * for the drivers. + * @descs: array of static descriptors + * @num_counters: number of elements in array + * @lifespan: milliseconds between updates + */ +struct rdma_hw_stats *rdma_alloc_hw_stats_struct( + const struct rdma_stat_desc *descs, int num_counters, + unsigned long lifespan) +{ + struct rdma_hw_stats *stats; + + stats = kzalloc(struct_size(stats, value, num_counters), GFP_KERNEL); + if (!stats) + return NULL; + + stats->is_disabled = kcalloc(BITS_TO_LONGS(num_counters), + sizeof(*stats->is_disabled), GFP_KERNEL); + if (!stats->is_disabled) + goto err; + + stats->descs = descs; + stats->num_counters = num_counters; + stats->lifespan = msecs_to_jiffies(lifespan); + mutex_init(&stats->lock); + + return stats; + +err: + kfree(stats); + return NULL; +} +EXPORT_SYMBOL(rdma_alloc_hw_stats_struct); + +/** + * rdma_free_hw_stats_struct - Helper function to release rdma_hw_stats + * @stats: statistics to release + */ +void rdma_free_hw_stats_struct(struct rdma_hw_stats *stats) +{ + if (!stats) + return; + + kfree(stats->is_disabled); + kfree(stats); +} +EXPORT_SYMBOL(rdma_free_hw_stats_struct); diff --git a/drivers/infiniband/hw/bnxt_re/bnxt_re.h b/drivers/infiniband/hw/bnxt_re/bnxt_re.h index ba26d8e6a9c2..79401e6c6aa9 100644 --- a/drivers/infiniband/hw/bnxt_re/bnxt_re.h +++ b/drivers/infiniband/hw/bnxt_re/bnxt_re.h @@ -39,22 +39,13 @@ #ifndef __BNXT_RE_H__ #define __BNXT_RE_H__ +#include "hw_counters.h" #define ROCE_DRV_MODULE_NAME "bnxt_re" #define BNXT_RE_DESC "Broadcom NetXtreme-C/E RoCE Driver" -#define BNXT_RE_PAGE_SHIFT_4K (12) -#define BNXT_RE_PAGE_SHIFT_8K (13) -#define BNXT_RE_PAGE_SHIFT_64K (16) -#define BNXT_RE_PAGE_SHIFT_2M (21) -#define BNXT_RE_PAGE_SHIFT_8M (23) -#define BNXT_RE_PAGE_SHIFT_1G (30) -#define BNXT_RE_PAGE_SIZE_4K BIT(BNXT_RE_PAGE_SHIFT_4K) -#define BNXT_RE_PAGE_SIZE_8K BIT(BNXT_RE_PAGE_SHIFT_8K) -#define BNXT_RE_PAGE_SIZE_64K BIT(BNXT_RE_PAGE_SHIFT_64K) -#define BNXT_RE_PAGE_SIZE_2M BIT(BNXT_RE_PAGE_SHIFT_2M) -#define BNXT_RE_PAGE_SIZE_8M BIT(BNXT_RE_PAGE_SHIFT_8M) -#define BNXT_RE_PAGE_SIZE_1G BIT(BNXT_RE_PAGE_SHIFT_1G) +#define BNXT_RE_PAGE_SHIFT_1G (30) +#define BNXT_RE_PAGE_SIZE_SUPPORTED 0x7FFFF000 /* 4kb - 1G */ #define BNXT_RE_MAX_MR_SIZE_LOW BIT_ULL(BNXT_RE_PAGE_SHIFT_1G) #define BNXT_RE_MAX_MR_SIZE_HIGH BIT_ULL(39) @@ -177,15 +168,17 @@ struct bnxt_re_dev { atomic_t srq_count; atomic_t mr_count; atomic_t mw_count; + atomic_t ah_count; + atomic_t pd_count; /* Max of 2 lossless traffic class supported per port */ u16 cosq[2]; /* QP for for handling QP1 packets */ struct bnxt_re_gsi_context gsi_ctx; + struct bnxt_re_stats stats; atomic_t nq_alloc_cnt; u32 is_virtfn; u32 num_vfs; - struct bnxt_qplib_roce_stats stats; }; #define to_bnxt_re_dev(ptr, member) \ diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.c b/drivers/infiniband/hw/bnxt_re/hw_counters.c index 7ba07797845c..825d512799d9 100644 --- a/drivers/infiniband/hw/bnxt_re/hw_counters.c +++ b/drivers/infiniband/hw/bnxt_re/hw_counters.c @@ -57,69 +57,208 @@ #include "bnxt_re.h" #include "hw_counters.h" -static const char * const bnxt_re_stat_name[] = { - [BNXT_RE_ACTIVE_QP] = "active_qps", - [BNXT_RE_ACTIVE_SRQ] = "active_srqs", - [BNXT_RE_ACTIVE_CQ] = "active_cqs", - [BNXT_RE_ACTIVE_MR] = "active_mrs", - [BNXT_RE_ACTIVE_MW] = "active_mws", - [BNXT_RE_RX_PKTS] = "rx_pkts", - [BNXT_RE_RX_BYTES] = "rx_bytes", - [BNXT_RE_TX_PKTS] = "tx_pkts", - [BNXT_RE_TX_BYTES] = "tx_bytes", - [BNXT_RE_RECOVERABLE_ERRORS] = "recoverable_errors", - [BNXT_RE_RX_DROPS] = "rx_roce_drops", - [BNXT_RE_RX_DISCARDS] = "rx_roce_discards", - [BNXT_RE_TO_RETRANSMITS] = "to_retransmits", - [BNXT_RE_SEQ_ERR_NAKS_RCVD] = "seq_err_naks_rcvd", - [BNXT_RE_MAX_RETRY_EXCEEDED] = "max_retry_exceeded", - [BNXT_RE_RNR_NAKS_RCVD] = "rnr_naks_rcvd", - [BNXT_RE_MISSING_RESP] = "missing_resp", - [BNXT_RE_UNRECOVERABLE_ERR] = "unrecoverable_err", - [BNXT_RE_BAD_RESP_ERR] = "bad_resp_err", - [BNXT_RE_LOCAL_QP_OP_ERR] = "local_qp_op_err", - [BNXT_RE_LOCAL_PROTECTION_ERR] = "local_protection_err", - [BNXT_RE_MEM_MGMT_OP_ERR] = "mem_mgmt_op_err", - [BNXT_RE_REMOTE_INVALID_REQ_ERR] = "remote_invalid_req_err", - [BNXT_RE_REMOTE_ACCESS_ERR] = "remote_access_err", - [BNXT_RE_REMOTE_OP_ERR] = "remote_op_err", - [BNXT_RE_DUP_REQ] = "dup_req", - [BNXT_RE_RES_EXCEED_MAX] = "res_exceed_max", - [BNXT_RE_RES_LENGTH_MISMATCH] = "res_length_mismatch", - [BNXT_RE_RES_EXCEEDS_WQE] = "res_exceeds_wqe", - [BNXT_RE_RES_OPCODE_ERR] = "res_opcode_err", - [BNXT_RE_RES_RX_INVALID_RKEY] = "res_rx_invalid_rkey", - [BNXT_RE_RES_RX_DOMAIN_ERR] = "res_rx_domain_err", - [BNXT_RE_RES_RX_NO_PERM] = "res_rx_no_perm", - [BNXT_RE_RES_RX_RANGE_ERR] = "res_rx_range_err", - [BNXT_RE_RES_TX_INVALID_RKEY] = "res_tx_invalid_rkey", - [BNXT_RE_RES_TX_DOMAIN_ERR] = "res_tx_domain_err", - [BNXT_RE_RES_TX_NO_PERM] = "res_tx_no_perm", - [BNXT_RE_RES_TX_RANGE_ERR] = "res_tx_range_err", - [BNXT_RE_RES_IRRQ_OFLOW] = "res_irrq_oflow", - [BNXT_RE_RES_UNSUP_OPCODE] = "res_unsup_opcode", - [BNXT_RE_RES_UNALIGNED_ATOMIC] = "res_unaligned_atomic", - [BNXT_RE_RES_REM_INV_ERR] = "res_rem_inv_err", - [BNXT_RE_RES_MEM_ERROR] = "res_mem_err", - [BNXT_RE_RES_SRQ_ERR] = "res_srq_err", - [BNXT_RE_RES_CMP_ERR] = "res_cmp_err", - [BNXT_RE_RES_INVALID_DUP_RKEY] = "res_invalid_dup_rkey", - [BNXT_RE_RES_WQE_FORMAT_ERR] = "res_wqe_format_err", - [BNXT_RE_RES_CQ_LOAD_ERR] = "res_cq_load_err", - [BNXT_RE_RES_SRQ_LOAD_ERR] = "res_srq_load_err", - [BNXT_RE_RES_TX_PCI_ERR] = "res_tx_pci_err", - [BNXT_RE_RES_RX_PCI_ERR] = "res_rx_pci_err", - [BNXT_RE_OUT_OF_SEQ_ERR] = "oos_drop_count" +static const struct rdma_stat_desc bnxt_re_stat_descs[] = { + [BNXT_RE_ACTIVE_PD].name = "active_pds", + [BNXT_RE_ACTIVE_AH].name = "active_ahs", + [BNXT_RE_ACTIVE_QP].name = "active_qps", + [BNXT_RE_ACTIVE_SRQ].name = "active_srqs", + [BNXT_RE_ACTIVE_CQ].name = "active_cqs", + [BNXT_RE_ACTIVE_MR].name = "active_mrs", + [BNXT_RE_ACTIVE_MW].name = "active_mws", + [BNXT_RE_RX_PKTS].name = "rx_pkts", + [BNXT_RE_RX_BYTES].name = "rx_bytes", + [BNXT_RE_TX_PKTS].name = "tx_pkts", + [BNXT_RE_TX_BYTES].name = "tx_bytes", + [BNXT_RE_RECOVERABLE_ERRORS].name = "recoverable_errors", + [BNXT_RE_RX_ERRORS].name = "rx_roce_errors", + [BNXT_RE_RX_DISCARDS].name = "rx_roce_discards", + [BNXT_RE_TO_RETRANSMITS].name = "to_retransmits", + [BNXT_RE_SEQ_ERR_NAKS_RCVD].name = "seq_err_naks_rcvd", + [BNXT_RE_MAX_RETRY_EXCEEDED].name = "max_retry_exceeded", + [BNXT_RE_RNR_NAKS_RCVD].name = "rnr_naks_rcvd", + [BNXT_RE_MISSING_RESP].name = "missing_resp", + [BNXT_RE_UNRECOVERABLE_ERR].name = "unrecoverable_err", + [BNXT_RE_BAD_RESP_ERR].name = "bad_resp_err", + [BNXT_RE_LOCAL_QP_OP_ERR].name = "local_qp_op_err", + [BNXT_RE_LOCAL_PROTECTION_ERR].name = "local_protection_err", + [BNXT_RE_MEM_MGMT_OP_ERR].name = "mem_mgmt_op_err", + [BNXT_RE_REMOTE_INVALID_REQ_ERR].name = "remote_invalid_req_err", + [BNXT_RE_REMOTE_ACCESS_ERR].name = "remote_access_err", + [BNXT_RE_REMOTE_OP_ERR].name = "remote_op_err", + [BNXT_RE_DUP_REQ].name = "dup_req", + [BNXT_RE_RES_EXCEED_MAX].name = "res_exceed_max", + [BNXT_RE_RES_LENGTH_MISMATCH].name = "res_length_mismatch", + [BNXT_RE_RES_EXCEEDS_WQE].name = "res_exceeds_wqe", + [BNXT_RE_RES_OPCODE_ERR].name = "res_opcode_err", + [BNXT_RE_RES_RX_INVALID_RKEY].name = "res_rx_invalid_rkey", + [BNXT_RE_RES_RX_DOMAIN_ERR].name = "res_rx_domain_err", + [BNXT_RE_RES_RX_NO_PERM].name = "res_rx_no_perm", + [BNXT_RE_RES_RX_RANGE_ERR].name = "res_rx_range_err", + [BNXT_RE_RES_TX_INVALID_RKEY].name = "res_tx_invalid_rkey", + [BNXT_RE_RES_TX_DOMAIN_ERR].name = "res_tx_domain_err", + [BNXT_RE_RES_TX_NO_PERM].name = "res_tx_no_perm", + [BNXT_RE_RES_TX_RANGE_ERR].name = "res_tx_range_err", + [BNXT_RE_RES_IRRQ_OFLOW].name = "res_irrq_oflow", + [BNXT_RE_RES_UNSUP_OPCODE].name = "res_unsup_opcode", + [BNXT_RE_RES_UNALIGNED_ATOMIC].name = "res_unaligned_atomic", + [BNXT_RE_RES_REM_INV_ERR].name = "res_rem_inv_err", + [BNXT_RE_RES_MEM_ERROR].name = "res_mem_err", + [BNXT_RE_RES_SRQ_ERR].name = "res_srq_err", + [BNXT_RE_RES_CMP_ERR].name = "res_cmp_err", + [BNXT_RE_RES_INVALID_DUP_RKEY].name = "res_invalid_dup_rkey", + [BNXT_RE_RES_WQE_FORMAT_ERR].name = "res_wqe_format_err", + [BNXT_RE_RES_CQ_LOAD_ERR].name = "res_cq_load_err", + [BNXT_RE_RES_SRQ_LOAD_ERR].name = "res_srq_load_err", + [BNXT_RE_RES_TX_PCI_ERR].name = "res_tx_pci_err", + [BNXT_RE_RES_RX_PCI_ERR].name = "res_rx_pci_err", + [BNXT_RE_OUT_OF_SEQ_ERR].name = "oos_drop_count", + [BNXT_RE_TX_ATOMIC_REQ].name = "tx_atomic_req", + [BNXT_RE_TX_READ_REQ].name = "tx_read_req", + [BNXT_RE_TX_READ_RES].name = "tx_read_resp", + [BNXT_RE_TX_WRITE_REQ].name = "tx_write_req", + [BNXT_RE_TX_SEND_REQ].name = "tx_send_req", + [BNXT_RE_RX_ATOMIC_REQ].name = "rx_atomic_req", + [BNXT_RE_RX_READ_REQ].name = "rx_read_req", + [BNXT_RE_RX_READ_RESP].name = "rx_read_resp", + [BNXT_RE_RX_WRITE_REQ].name = "rx_write_req", + [BNXT_RE_RX_SEND_REQ].name = "rx_send_req", + [BNXT_RE_RX_ROCE_GOOD_PKTS].name = "rx_roce_good_pkts", + [BNXT_RE_RX_ROCE_GOOD_BYTES].name = "rx_roce_good_bytes", + [BNXT_RE_OOB].name = "rx_out_of_buffer" }; +static void bnxt_re_copy_ext_stats(struct bnxt_re_dev *rdev, + struct rdma_hw_stats *stats, + struct bnxt_qplib_ext_stat *s) +{ + stats->value[BNXT_RE_TX_ATOMIC_REQ] = s->tx_atomic_req; + stats->value[BNXT_RE_TX_READ_REQ] = s->tx_read_req; + stats->value[BNXT_RE_TX_READ_RES] = s->tx_read_res; + stats->value[BNXT_RE_TX_WRITE_REQ] = s->tx_write_req; + stats->value[BNXT_RE_TX_SEND_REQ] = s->tx_send_req; + stats->value[BNXT_RE_RX_ATOMIC_REQ] = s->rx_atomic_req; + stats->value[BNXT_RE_RX_READ_REQ] = s->rx_read_req; + stats->value[BNXT_RE_RX_READ_RESP] = s->rx_read_res; + stats->value[BNXT_RE_RX_WRITE_REQ] = s->rx_write_req; + stats->value[BNXT_RE_RX_SEND_REQ] = s->rx_send_req; + stats->value[BNXT_RE_RX_ROCE_GOOD_PKTS] = s->rx_roce_good_pkts; + stats->value[BNXT_RE_RX_ROCE_GOOD_BYTES] = s->rx_roce_good_bytes; + stats->value[BNXT_RE_OOB] = s->rx_out_of_buffer; +} + +static int bnxt_re_get_ext_stat(struct bnxt_re_dev *rdev, + struct rdma_hw_stats *stats) +{ + struct bnxt_qplib_ext_stat *estat = &rdev->stats.rstat.ext_stat; + u32 fid; + int rc; + + fid = PCI_FUNC(rdev->en_dev->pdev->devfn); + rc = bnxt_qplib_qext_stat(&rdev->rcfw, fid, estat); + if (rc) + goto done; + bnxt_re_copy_ext_stats(rdev, stats, estat); + +done: + return rc; +} + +static void bnxt_re_copy_err_stats(struct bnxt_re_dev *rdev, + struct rdma_hw_stats *stats, + struct bnxt_qplib_roce_stats *err_s) +{ + stats->value[BNXT_RE_TO_RETRANSMITS] = + err_s->to_retransmits; + stats->value[BNXT_RE_SEQ_ERR_NAKS_RCVD] = + err_s->seq_err_naks_rcvd; + stats->value[BNXT_RE_MAX_RETRY_EXCEEDED] = + err_s->max_retry_exceeded; + stats->value[BNXT_RE_RNR_NAKS_RCVD] = + err_s->rnr_naks_rcvd; + stats->value[BNXT_RE_MISSING_RESP] = + err_s->missing_resp; + stats->value[BNXT_RE_UNRECOVERABLE_ERR] = + err_s->unrecoverable_err; + stats->value[BNXT_RE_BAD_RESP_ERR] = + err_s->bad_resp_err; + stats->value[BNXT_RE_LOCAL_QP_OP_ERR] = + err_s->local_qp_op_err; + stats->value[BNXT_RE_LOCAL_PROTECTION_ERR] = + err_s->local_protection_err; + stats->value[BNXT_RE_MEM_MGMT_OP_ERR] = + err_s->mem_mgmt_op_err; + stats->value[BNXT_RE_REMOTE_INVALID_REQ_ERR] = + err_s->remote_invalid_req_err; + stats->value[BNXT_RE_REMOTE_ACCESS_ERR] = + err_s->remote_access_err; + stats->value[BNXT_RE_REMOTE_OP_ERR] = + err_s->remote_op_err; + stats->value[BNXT_RE_DUP_REQ] = + err_s->dup_req; + stats->value[BNXT_RE_RES_EXCEED_MAX] = + err_s->res_exceed_max; + stats->value[BNXT_RE_RES_LENGTH_MISMATCH] = + err_s->res_length_mismatch; + stats->value[BNXT_RE_RES_EXCEEDS_WQE] = + err_s->res_exceeds_wqe; + stats->value[BNXT_RE_RES_OPCODE_ERR] = + err_s->res_opcode_err; + stats->value[BNXT_RE_RES_RX_INVALID_RKEY] = + err_s->res_rx_invalid_rkey; + stats->value[BNXT_RE_RES_RX_DOMAIN_ERR] = + err_s->res_rx_domain_err; + stats->value[BNXT_RE_RES_RX_NO_PERM] = + err_s->res_rx_no_perm; + stats->value[BNXT_RE_RES_RX_RANGE_ERR] = + err_s->res_rx_range_err; + stats->value[BNXT_RE_RES_TX_INVALID_RKEY] = + err_s->res_tx_invalid_rkey; + stats->value[BNXT_RE_RES_TX_DOMAIN_ERR] = + err_s->res_tx_domain_err; + stats->value[BNXT_RE_RES_TX_NO_PERM] = + err_s->res_tx_no_perm; + stats->value[BNXT_RE_RES_TX_RANGE_ERR] = + err_s->res_tx_range_err; + stats->value[BNXT_RE_RES_IRRQ_OFLOW] = + err_s->res_irrq_oflow; + stats->value[BNXT_RE_RES_UNSUP_OPCODE] = + err_s->res_unsup_opcode; + stats->value[BNXT_RE_RES_UNALIGNED_ATOMIC] = + err_s->res_unaligned_atomic; + stats->value[BNXT_RE_RES_REM_INV_ERR] = + err_s->res_rem_inv_err; + stats->value[BNXT_RE_RES_MEM_ERROR] = + err_s->res_mem_error; + stats->value[BNXT_RE_RES_SRQ_ERR] = + err_s->res_srq_err; + stats->value[BNXT_RE_RES_CMP_ERR] = + err_s->res_cmp_err; + stats->value[BNXT_RE_RES_INVALID_DUP_RKEY] = + err_s->res_invalid_dup_rkey; + stats->value[BNXT_RE_RES_WQE_FORMAT_ERR] = + err_s->res_wqe_format_err; + stats->value[BNXT_RE_RES_CQ_LOAD_ERR] = + err_s->res_cq_load_err; + stats->value[BNXT_RE_RES_SRQ_LOAD_ERR] = + err_s->res_srq_load_err; + stats->value[BNXT_RE_RES_TX_PCI_ERR] = + err_s->res_tx_pci_err; + stats->value[BNXT_RE_RES_RX_PCI_ERR] = + err_s->res_rx_pci_err; + stats->value[BNXT_RE_OUT_OF_SEQ_ERR] = + err_s->res_oos_drop_count; +} + int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats, u32 port, int index) { struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); - struct ctx_hw_stats *bnxt_re_stats = rdev->qplib_ctx.stats.dma; + struct ctx_hw_stats *hw_stats = NULL; + struct bnxt_qplib_roce_stats *err_s = NULL; int rc = 0; + hw_stats = rdev->qplib_ctx.stats.dma; if (!port || !stats) return -EINVAL; @@ -128,118 +267,61 @@ int bnxt_re_ib_get_hw_stats(struct ib_device *ibdev, stats->value[BNXT_RE_ACTIVE_CQ] = atomic_read(&rdev->cq_count); stats->value[BNXT_RE_ACTIVE_MR] = atomic_read(&rdev->mr_count); stats->value[BNXT_RE_ACTIVE_MW] = atomic_read(&rdev->mw_count); - if (bnxt_re_stats) { + stats->value[BNXT_RE_ACTIVE_PD] = atomic_read(&rdev->pd_count); + stats->value[BNXT_RE_ACTIVE_AH] = atomic_read(&rdev->ah_count); + + if (hw_stats) { stats->value[BNXT_RE_RECOVERABLE_ERRORS] = - le64_to_cpu(bnxt_re_stats->tx_bcast_pkts); - stats->value[BNXT_RE_RX_DROPS] = - le64_to_cpu(bnxt_re_stats->rx_error_pkts); + le64_to_cpu(hw_stats->tx_bcast_pkts); + stats->value[BNXT_RE_RX_ERRORS] = + le64_to_cpu(hw_stats->rx_error_pkts); stats->value[BNXT_RE_RX_DISCARDS] = - le64_to_cpu(bnxt_re_stats->rx_discard_pkts); + le64_to_cpu(hw_stats->rx_discard_pkts); stats->value[BNXT_RE_RX_PKTS] = - le64_to_cpu(bnxt_re_stats->rx_ucast_pkts); + le64_to_cpu(hw_stats->rx_ucast_pkts); stats->value[BNXT_RE_RX_BYTES] = - le64_to_cpu(bnxt_re_stats->rx_ucast_bytes); + le64_to_cpu(hw_stats->rx_ucast_bytes); stats->value[BNXT_RE_TX_PKTS] = - le64_to_cpu(bnxt_re_stats->tx_ucast_pkts); + le64_to_cpu(hw_stats->tx_ucast_pkts); stats->value[BNXT_RE_TX_BYTES] = - le64_to_cpu(bnxt_re_stats->tx_ucast_bytes); + le64_to_cpu(hw_stats->tx_ucast_bytes); } + err_s = &rdev->stats.rstat.errs; if (test_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags)) { - rc = bnxt_qplib_get_roce_stats(&rdev->rcfw, &rdev->stats); - if (rc) + rc = bnxt_qplib_get_roce_stats(&rdev->rcfw, err_s); + if (rc) { clear_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, &rdev->flags); - stats->value[BNXT_RE_TO_RETRANSMITS] = - rdev->stats.to_retransmits; - stats->value[BNXT_RE_SEQ_ERR_NAKS_RCVD] = - rdev->stats.seq_err_naks_rcvd; - stats->value[BNXT_RE_MAX_RETRY_EXCEEDED] = - rdev->stats.max_retry_exceeded; - stats->value[BNXT_RE_RNR_NAKS_RCVD] = - rdev->stats.rnr_naks_rcvd; - stats->value[BNXT_RE_MISSING_RESP] = - rdev->stats.missing_resp; - stats->value[BNXT_RE_UNRECOVERABLE_ERR] = - rdev->stats.unrecoverable_err; - stats->value[BNXT_RE_BAD_RESP_ERR] = - rdev->stats.bad_resp_err; - stats->value[BNXT_RE_LOCAL_QP_OP_ERR] = - rdev->stats.local_qp_op_err; - stats->value[BNXT_RE_LOCAL_PROTECTION_ERR] = - rdev->stats.local_protection_err; - stats->value[BNXT_RE_MEM_MGMT_OP_ERR] = - rdev->stats.mem_mgmt_op_err; - stats->value[BNXT_RE_REMOTE_INVALID_REQ_ERR] = - rdev->stats.remote_invalid_req_err; - stats->value[BNXT_RE_REMOTE_ACCESS_ERR] = - rdev->stats.remote_access_err; - stats->value[BNXT_RE_REMOTE_OP_ERR] = - rdev->stats.remote_op_err; - stats->value[BNXT_RE_DUP_REQ] = - rdev->stats.dup_req; - stats->value[BNXT_RE_RES_EXCEED_MAX] = - rdev->stats.res_exceed_max; - stats->value[BNXT_RE_RES_LENGTH_MISMATCH] = - rdev->stats.res_length_mismatch; - stats->value[BNXT_RE_RES_EXCEEDS_WQE] = - rdev->stats.res_exceeds_wqe; - stats->value[BNXT_RE_RES_OPCODE_ERR] = - rdev->stats.res_opcode_err; - stats->value[BNXT_RE_RES_RX_INVALID_RKEY] = - rdev->stats.res_rx_invalid_rkey; - stats->value[BNXT_RE_RES_RX_DOMAIN_ERR] = - rdev->stats.res_rx_domain_err; - stats->value[BNXT_RE_RES_RX_NO_PERM] = - rdev->stats.res_rx_no_perm; - stats->value[BNXT_RE_RES_RX_RANGE_ERR] = - rdev->stats.res_rx_range_err; - stats->value[BNXT_RE_RES_TX_INVALID_RKEY] = - rdev->stats.res_tx_invalid_rkey; - stats->value[BNXT_RE_RES_TX_DOMAIN_ERR] = - rdev->stats.res_tx_domain_err; - stats->value[BNXT_RE_RES_TX_NO_PERM] = - rdev->stats.res_tx_no_perm; - stats->value[BNXT_RE_RES_TX_RANGE_ERR] = - rdev->stats.res_tx_range_err; - stats->value[BNXT_RE_RES_IRRQ_OFLOW] = - rdev->stats.res_irrq_oflow; - stats->value[BNXT_RE_RES_UNSUP_OPCODE] = - rdev->stats.res_unsup_opcode; - stats->value[BNXT_RE_RES_UNALIGNED_ATOMIC] = - rdev->stats.res_unaligned_atomic; - stats->value[BNXT_RE_RES_REM_INV_ERR] = - rdev->stats.res_rem_inv_err; - stats->value[BNXT_RE_RES_MEM_ERROR] = - rdev->stats.res_mem_error; - stats->value[BNXT_RE_RES_SRQ_ERR] = - rdev->stats.res_srq_err; - stats->value[BNXT_RE_RES_CMP_ERR] = - rdev->stats.res_cmp_err; - stats->value[BNXT_RE_RES_INVALID_DUP_RKEY] = - rdev->stats.res_invalid_dup_rkey; - stats->value[BNXT_RE_RES_WQE_FORMAT_ERR] = - rdev->stats.res_wqe_format_err; - stats->value[BNXT_RE_RES_CQ_LOAD_ERR] = - rdev->stats.res_cq_load_err; - stats->value[BNXT_RE_RES_SRQ_LOAD_ERR] = - rdev->stats.res_srq_load_err; - stats->value[BNXT_RE_RES_TX_PCI_ERR] = - rdev->stats.res_tx_pci_err; - stats->value[BNXT_RE_RES_RX_PCI_ERR] = - rdev->stats.res_rx_pci_err; - stats->value[BNXT_RE_OUT_OF_SEQ_ERR] = - rdev->stats.res_oos_drop_count; + goto done; + } + if (_is_ext_stats_supported(rdev->dev_attr.dev_cap_flags) && + !rdev->is_virtfn) { + rc = bnxt_re_get_ext_stat(rdev, stats); + if (rc) { + clear_bit(BNXT_RE_FLAG_ISSUE_ROCE_STATS, + &rdev->flags); + goto done; + } + } + bnxt_re_copy_err_stats(rdev, stats, err_s); } - return ARRAY_SIZE(bnxt_re_stat_name); +done: + return bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ? + BNXT_RE_NUM_EXT_COUNTERS : BNXT_RE_NUM_STD_COUNTERS; } struct rdma_hw_stats *bnxt_re_ib_alloc_hw_port_stats(struct ib_device *ibdev, u32 port_num) { - BUILD_BUG_ON(ARRAY_SIZE(bnxt_re_stat_name) != BNXT_RE_NUM_COUNTERS); + struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); + int num_counters = 0; + + if (bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx)) + num_counters = BNXT_RE_NUM_EXT_COUNTERS; + else + num_counters = BNXT_RE_NUM_STD_COUNTERS; - return rdma_alloc_hw_stats_struct(bnxt_re_stat_name, - ARRAY_SIZE(bnxt_re_stat_name), + return rdma_alloc_hw_stats_struct(bnxt_re_stat_descs, num_counters, RDMA_HW_STATS_DEFAULT_LIFESPAN); } diff --git a/drivers/infiniband/hw/bnxt_re/hw_counters.h b/drivers/infiniband/hw/bnxt_re/hw_counters.h index 6f2d2f91d9ff..7943b2c393e4 100644 --- a/drivers/infiniband/hw/bnxt_re/hw_counters.h +++ b/drivers/infiniband/hw/bnxt_re/hw_counters.h @@ -41,6 +41,8 @@ #define __BNXT_RE_HW_STATS_H__ enum bnxt_re_hw_stats { + BNXT_RE_ACTIVE_PD, + BNXT_RE_ACTIVE_AH, BNXT_RE_ACTIVE_QP, BNXT_RE_ACTIVE_SRQ, BNXT_RE_ACTIVE_CQ, @@ -51,7 +53,7 @@ enum bnxt_re_hw_stats { BNXT_RE_TX_PKTS, BNXT_RE_TX_BYTES, BNXT_RE_RECOVERABLE_ERRORS, - BNXT_RE_RX_DROPS, + BNXT_RE_RX_ERRORS, BNXT_RE_RX_DISCARDS, BNXT_RE_TO_RETRANSMITS, BNXT_RE_SEQ_ERR_NAKS_RCVD, @@ -93,7 +95,31 @@ enum bnxt_re_hw_stats { BNXT_RE_RES_TX_PCI_ERR, BNXT_RE_RES_RX_PCI_ERR, BNXT_RE_OUT_OF_SEQ_ERR, - BNXT_RE_NUM_COUNTERS + BNXT_RE_TX_ATOMIC_REQ, + BNXT_RE_TX_READ_REQ, + BNXT_RE_TX_READ_RES, + BNXT_RE_TX_WRITE_REQ, + BNXT_RE_TX_SEND_REQ, + BNXT_RE_RX_ATOMIC_REQ, + BNXT_RE_RX_READ_REQ, + BNXT_RE_RX_READ_RESP, + BNXT_RE_RX_WRITE_REQ, + BNXT_RE_RX_SEND_REQ, + BNXT_RE_RX_ROCE_GOOD_PKTS, + BNXT_RE_RX_ROCE_GOOD_BYTES, + BNXT_RE_OOB, + BNXT_RE_NUM_EXT_COUNTERS +}; + +#define BNXT_RE_NUM_STD_COUNTERS (BNXT_RE_OUT_OF_SEQ_ERR + 1) + +struct bnxt_re_rstat { + struct bnxt_qplib_roce_stats errs; + struct bnxt_qplib_ext_stat ext_stat; +}; + +struct bnxt_re_stats { + struct bnxt_re_rstat rstat; }; struct rdma_hw_stats *bnxt_re_ib_alloc_hw_port_stats(struct ib_device *ibdev, diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 408dfbcc47b5..3224f18a66e5 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -41,6 +41,7 @@ #include <linux/pci.h> #include <linux/netdevice.h> #include <linux/if_ether.h> +#include <net/addrconf.h> #include <rdma/ib_verbs.h> #include <rdma/ib_user_verbs.h> @@ -130,10 +131,10 @@ int bnxt_re_query_device(struct ib_device *ibdev, memcpy(&ib_attr->fw_ver, dev_attr->fw_ver, min(sizeof(dev_attr->fw_ver), sizeof(ib_attr->fw_ver))); - bnxt_qplib_get_guid(rdev->netdev->dev_addr, - (u8 *)&ib_attr->sys_image_guid); + addrconf_addr_eui48((u8 *)&ib_attr->sys_image_guid, + rdev->netdev->dev_addr); ib_attr->max_mr_size = BNXT_RE_MAX_MR_SIZE; - ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_4K | BNXT_RE_PAGE_SIZE_2M; + ib_attr->page_size_cap = BNXT_RE_PAGE_SIZE_SUPPORTED; ib_attr->vendor_id = rdev->en_dev->pdev->vendor; ib_attr->vendor_part_id = rdev->en_dev->pdev->device; @@ -261,13 +262,12 @@ void bnxt_re_query_fw_str(struct ib_device *ibdev, char *str) int bnxt_re_query_pkey(struct ib_device *ibdev, u32 port_num, u16 index, u16 *pkey) { - struct bnxt_re_dev *rdev = to_bnxt_re_dev(ibdev, ibdev); + if (index > 0) + return -EINVAL; - /* Ignore port_num */ + *pkey = IB_DEFAULT_PKEY_FULL; - memset(pkey, 0, sizeof(*pkey)); - return bnxt_qplib_get_pkey(&rdev->qplib_res, - &rdev->qplib_res.pkey_tbl, index, pkey); + return 0; } int bnxt_re_query_gid(struct ib_device *ibdev, u32 port_num, @@ -541,9 +541,12 @@ int bnxt_re_dealloc_pd(struct ib_pd *ib_pd, struct ib_udata *udata) bnxt_re_destroy_fence_mr(pd); - if (pd->qplib_pd.id) - bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl, - &pd->qplib_pd); + if (pd->qplib_pd.id) { + if (!bnxt_qplib_dealloc_pd(&rdev->qplib_res, + &rdev->qplib_res.pd_tbl, + &pd->qplib_pd)) + atomic_dec(&rdev->pd_count); + } return 0; } @@ -595,6 +598,8 @@ int bnxt_re_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) if (bnxt_re_create_fence_mr(pd)) ibdev_warn(&rdev->ibdev, "Failed to create Fence-MR\n"); + atomic_inc(&rdev->pd_count); + return 0; dbfail: bnxt_qplib_dealloc_pd(&rdev->qplib_res, &rdev->qplib_res.pd_tbl, @@ -611,6 +616,8 @@ int bnxt_re_destroy_ah(struct ib_ah *ib_ah, u32 flags) bnxt_qplib_destroy_ah(&rdev->qplib_res, &ah->qplib_ah, !(flags & RDMA_DESTROY_AH_SLEEPABLE)); + atomic_dec(&rdev->ah_count); + return 0; } @@ -695,15 +702,11 @@ int bnxt_re_create_ah(struct ib_ah *ib_ah, struct rdma_ah_init_attr *init_attr, wmb(); /* make sure cache is updated. */ spin_unlock_irqrestore(&uctx->sh_lock, flag); } + atomic_inc(&rdev->ah_count); return 0; } -int bnxt_re_modify_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr) -{ - return 0; -} - int bnxt_re_query_ah(struct ib_ah *ib_ah, struct rdma_ah_attr *ah_attr) { struct bnxt_re_ah *ah = container_of(ib_ah, struct bnxt_re_ah, ib_ah); @@ -760,6 +763,7 @@ static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp) bnxt_qplib_destroy_ah(&rdev->qplib_res, &gsi_sah->qplib_ah, true); + atomic_dec(&rdev->ah_count); bnxt_qplib_clean_qp(&qp->qplib_qp); ibdev_dbg(&rdev->ibdev, "Destroy the shadow QP\n"); @@ -1006,6 +1010,7 @@ static struct bnxt_re_ah *bnxt_re_create_shadow_qp_ah "Failed to allocate HW AH for Shadow QP"); goto fail; } + atomic_inc(&rdev->ah_count); return ah; @@ -2478,7 +2483,8 @@ static int bnxt_re_build_reg_wqe(const struct ib_reg_wr *wr, wqe->frmr.l_key = wr->key; wqe->frmr.length = wr->mr->length; - wqe->frmr.pbl_pg_sz_log = (wr->mr->page_size >> PAGE_SHIFT_4K) - 1; + wqe->frmr.pbl_pg_sz_log = ilog2(PAGE_SIZE >> PAGE_SHIFT_4K); + wqe->frmr.pg_sz_log = ilog2(wr->mr->page_size >> PAGE_SHIFT_4K); wqe->frmr.va = wr->mr->iova; return 0; } @@ -3354,8 +3360,11 @@ static void bnxt_re_process_res_ud_wc(struct bnxt_re_qp *qp, struct ib_wc *wc, struct bnxt_qplib_cqe *cqe) { + struct bnxt_re_dev *rdev; + u16 vlan_id = 0; u8 nw_type; + rdev = qp->rdev; wc->opcode = IB_WC_RECV; wc->status = __rc_to_ib_wc_status(cqe->status); @@ -3367,9 +3376,12 @@ static void bnxt_re_process_res_ud_wc(struct bnxt_re_qp *qp, memcpy(wc->smac, cqe->smac, ETH_ALEN); wc->wc_flags |= IB_WC_WITH_SMAC; if (cqe->flags & CQ_RES_UD_FLAGS_META_FORMAT_VLAN) { - wc->vlan_id = (cqe->cfa_meta & 0xFFF); - if (wc->vlan_id < 0x1000) - wc->wc_flags |= IB_WC_WITH_VLAN; + vlan_id = (cqe->cfa_meta & 0xFFF); + } + /* Mark only if vlan_id is non zero */ + if (vlan_id && bnxt_re_check_if_vlan_valid(rdev, vlan_id)) { + wc->vlan_id = vlan_id; + wc->wc_flags |= IB_WC_WITH_VLAN; } nw_type = (cqe->flags & CQ_RES_UD_FLAGS_ROCE_IP_VER_MASK) >> CQ_RES_UD_FLAGS_ROCE_IP_VER_SFT; @@ -3798,7 +3810,7 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length, mr->qplib_mr.va = virt_addr; page_size = ib_umem_find_best_pgsz( - umem, BNXT_RE_PAGE_SIZE_4K | BNXT_RE_PAGE_SIZE_2M, virt_addr); + umem, BNXT_RE_PAGE_SIZE_SUPPORTED, virt_addr); if (!page_size) { ibdev_err(&rdev->ibdev, "umem page size unsupported!"); rc = -EFAULT; diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.h b/drivers/infiniband/hw/bnxt_re/ib_verbs.h index b5c6e0f4f877..94326267f9bb 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.h +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.h @@ -166,7 +166,6 @@ int bnxt_re_alloc_pd(struct ib_pd *pd, struct ib_udata *udata); int bnxt_re_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata); int bnxt_re_create_ah(struct ib_ah *ah, struct rdma_ah_init_attr *init_attr, struct ib_udata *udata); -int bnxt_re_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr); int bnxt_re_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr); int bnxt_re_destroy_ah(struct ib_ah *ah, u32 flags); int bnxt_re_create_srq(struct ib_srq *srq, diff --git a/drivers/infiniband/hw/bnxt_re/main.c b/drivers/infiniband/hw/bnxt_re/main.c index 66268e41b470..3d6834d3d4fb 100644 --- a/drivers/infiniband/hw/bnxt_re/main.c +++ b/drivers/infiniband/hw/bnxt_re/main.c @@ -127,6 +127,8 @@ static int bnxt_re_setup_chip_ctx(struct bnxt_re_dev *rdev, u8 wqe_mode) rdev->qplib_res.cctx = rdev->chip_ctx; rdev->rcfw.res = &rdev->qplib_res; + rdev->qplib_res.dattr = &rdev->dev_attr; + rdev->qplib_res.is_vf = BNXT_VF(bp); bnxt_re_set_drv_mode(rdev, wqe_mode); if (bnxt_qplib_determine_atomics(en_dev->pdev)) @@ -523,7 +525,8 @@ static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev, u32 fw_stats_ctx_id) { struct bnxt_en_dev *en_dev = rdev->en_dev; - struct hwrm_stat_ctx_free_input req = {0}; + struct hwrm_stat_ctx_free_input req = {}; + struct hwrm_stat_ctx_free_output resp = {}; struct bnxt_fw_msg fw_msg; int rc = -EINVAL; @@ -537,8 +540,8 @@ static int bnxt_re_net_stats_ctx_free(struct bnxt_re_dev *rdev, bnxt_re_init_hwrm_hdr(rdev, (void *)&req, HWRM_STAT_CTX_FREE, -1, -1); req.stat_ctx_id = cpu_to_le32(fw_stats_ctx_id); - bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&req, - sizeof(req), DFLT_HWRM_CMD_TIMEOUT); + bnxt_re_fill_fw_msg(&fw_msg, (void *)&req, sizeof(req), (void *)&resp, + sizeof(resp), DFLT_HWRM_CMD_TIMEOUT); rc = en_dev->en_ops->bnxt_send_fw_msg(en_dev, BNXT_ROCE_ULP, &fw_msg); if (rc) ibdev_err(&rdev->ibdev, "Failed to free HW stats context %#x", @@ -693,7 +696,6 @@ static const struct ib_device_ops bnxt_re_dev_ops = { .get_port_immutable = bnxt_re_get_port_immutable, .map_mr_sg = bnxt_re_map_mr_sg, .mmap = bnxt_re_mmap, - .modify_ah = bnxt_re_modify_ah, .modify_qp = bnxt_re_modify_qp, .modify_srq = bnxt_re_modify_srq, .poll_cq = bnxt_re_poll_cq, @@ -727,7 +729,7 @@ static int bnxt_re_register_ib(struct bnxt_re_dev *rdev) strlen(BNXT_RE_DESC) + 5); ibdev->phys_port_cnt = 1; - bnxt_qplib_get_guid(rdev->netdev->dev_addr, (u8 *)&ibdev->node_guid); + addrconf_addr_eui48((u8 *)&ibdev->node_guid, rdev->netdev->dev_addr); ibdev->num_comp_vectors = rdev->num_msix - 1; ibdev->dev.parent = &rdev->en_dev->pdev->dev; @@ -777,6 +779,8 @@ static struct bnxt_re_dev *bnxt_re_dev_add(struct net_device *netdev, atomic_set(&rdev->srq_count, 0); atomic_set(&rdev->mr_count, 0); atomic_set(&rdev->mw_count, 0); + atomic_set(&rdev->ah_count, 0); + atomic_set(&rdev->pd_count, 0); rdev->cosq[0] = 0xFFFF; rdev->cosq[1] = 0xFFFF; @@ -889,7 +893,6 @@ static int bnxt_re_srqn_handler(struct bnxt_qplib_nq *nq, struct bnxt_re_srq *srq = container_of(handle, struct bnxt_re_srq, qplib_srq); struct ib_event ib_event; - int rc = 0; ib_event.device = &srq->rdev->ibdev; ib_event.element.srq = &srq->ib_srq; @@ -903,7 +906,7 @@ static int bnxt_re_srqn_handler(struct bnxt_qplib_nq *nq, (*srq->ib_srq.event_handler)(&ib_event, srq->ib_srq.srq_context); } - return rc; + return 0; } static int bnxt_re_cqn_handler(struct bnxt_qplib_nq *nq, @@ -1725,7 +1728,7 @@ static int bnxt_re_netdev_event(struct notifier_block *notifier, } if (sch_work) { /* Allocate for the deferred task */ - re_work = kzalloc(sizeof(*re_work), GFP_ATOMIC); + re_work = kzalloc(sizeof(*re_work), GFP_KERNEL); if (re_work) { get_device(&rdev->ibdev.dev); re_work->rdev = rdev; diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c index d4d4959c2434..96e581ced50e 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c @@ -46,6 +46,7 @@ #include <linux/delay.h> #include <linux/prefetch.h> #include <linux/if_ether.h> +#include <rdma/ib_mad.h> #include "roce_hsi.h" @@ -707,12 +708,13 @@ int bnxt_qplib_query_srq(struct bnxt_qplib_res *res, int rc = 0; RCFW_CMD_PREP(req, QUERY_SRQ, cmd_flags); - req.srq_cid = cpu_to_le32(srq->id); /* Configure the request */ sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb)); if (!sbuf) return -ENOMEM; + req.resp_size = sizeof(*sb) / BNXT_QPLIB_CMDQE_UNITS; + req.srq_cid = cpu_to_le32(srq->id); sb = sbuf->sb; rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, (void *)&resp, (void *)sbuf, 0); @@ -1049,6 +1051,9 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_FORCE_COMPLETION; if (qp->wqe_mode == BNXT_QPLIB_WQE_MODE_VARIABLE) qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_VARIABLE_SIZED_WQE_ENABLED; + if (_is_ext_stats_supported(res->dattr->dev_cap_flags) && !res->is_vf) + qp_flags |= CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED; + req.qp_flags = cpu_to_le32(qp_flags); /* ORRQ and IRRQ */ @@ -1228,7 +1233,7 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct cmdq_modify_qp req; struct creq_modify_qp_resp resp; - u16 cmd_flags = 0, pkey; + u16 cmd_flags = 0; u32 temp32[4]; u32 bmask; int rc; @@ -1251,11 +1256,9 @@ int bnxt_qplib_modify_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_ACCESS) req.access = qp->access; - if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY) { - if (!bnxt_qplib_get_pkey(res, &res->pkey_tbl, - qp->pkey_index, &pkey)) - req.pkey = cpu_to_le16(pkey); - } + if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_PKEY) + req.pkey = cpu_to_le16(IB_DEFAULT_PKEY_FULL); + if (bmask & CMDQ_MODIFY_QP_MODIFY_MASK_QKEY) req.qkey = cpu_to_le32(qp->qkey); @@ -2851,6 +2854,7 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, struct cq_base *hw_cqe; u32 sw_cons, raw_cons; int budget, rc = 0; + u8 type; raw_cons = cq->hwq.cons; budget = num_cqes; @@ -2869,7 +2873,8 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, */ dma_rmb(); /* From the device's respective CQE format to qplib_wc*/ - switch (hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK) { + type = hw_cqe->cqe_type_toggle & CQ_BASE_CQE_TYPE_MASK; + switch (type) { case CQ_BASE_CQE_TYPE_REQ: rc = bnxt_qplib_cq_process_req(cq, (struct cq_req *)hw_cqe, @@ -2916,8 +2921,9 @@ int bnxt_qplib_poll_cq(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, /* Error while processing the CQE, just skip to the * next one */ - dev_err(&cq->hwq.pdev->dev, - "process_cqe error rc = 0x%x\n", rc); + if (type != CQ_BASE_CQE_TYPE_TERMINAL) + dev_err(&cq->hwq.pdev->dev, + "process_cqe error rc = 0x%x\n", rc); } raw_cons++; } diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c index 5d384def5e5f..061b2895dd9b 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.c @@ -78,7 +78,7 @@ static int __block_for_resp(struct bnxt_qplib_rcfw *rcfw, u16 cookie) if (!test_bit(cbit, cmdq->cmdq_bitmap)) goto done; do { - mdelay(1); /* 1m sec */ + udelay(1); bnxt_qplib_service_creq(&rcfw->creq.creq_tasklet); } while (test_bit(cbit, cmdq->cmdq_bitmap) && --count); done: @@ -555,7 +555,7 @@ skip_ctx_setup: void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw) { - kfree(rcfw->cmdq.cmdq_bitmap); + bitmap_free(rcfw->cmdq.cmdq_bitmap); kfree(rcfw->qp_tbl); kfree(rcfw->crsqe_tbl); bnxt_qplib_free_hwq(rcfw->res, &rcfw->cmdq.hwq); @@ -572,7 +572,6 @@ int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res, struct bnxt_qplib_sg_info sginfo = {}; struct bnxt_qplib_cmdq_ctx *cmdq; struct bnxt_qplib_creq_ctx *creq; - u32 bmap_size = 0; rcfw->pdev = res->pdev; cmdq = &rcfw->cmdq; @@ -613,13 +612,10 @@ int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res, if (!rcfw->crsqe_tbl) goto fail; - bmap_size = BITS_TO_LONGS(rcfw->cmdq_depth) * sizeof(unsigned long); - cmdq->cmdq_bitmap = kzalloc(bmap_size, GFP_KERNEL); + cmdq->cmdq_bitmap = bitmap_zalloc(rcfw->cmdq_depth, GFP_KERNEL); if (!cmdq->cmdq_bitmap) goto fail; - cmdq->bmap_size = bmap_size; - /* Allocate one extra to hold the QP1 entries */ rcfw->qp_tbl_size = qp_tbl_sz + 1; rcfw->qp_tbl = kcalloc(rcfw->qp_tbl_size, sizeof(struct bnxt_qplib_qp_node), @@ -667,8 +663,8 @@ void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw) iounmap(cmdq->cmdq_mbox.reg.bar_reg); iounmap(creq->creq_db.reg.bar_reg); - indx = find_first_bit(cmdq->cmdq_bitmap, cmdq->bmap_size); - if (indx != cmdq->bmap_size) + indx = find_first_bit(cmdq->cmdq_bitmap, rcfw->cmdq_depth); + if (indx != rcfw->cmdq_depth) dev_err(&rcfw->pdev->dev, "disabling RCFW with pending cmd-bit %lx\n", indx); @@ -848,13 +844,13 @@ struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf( { struct bnxt_qplib_rcfw_sbuf *sbuf; - sbuf = kzalloc(sizeof(*sbuf), GFP_ATOMIC); + sbuf = kzalloc(sizeof(*sbuf), GFP_KERNEL); if (!sbuf) return NULL; sbuf->size = size; sbuf->sb = dma_alloc_coherent(&rcfw->pdev->dev, sbuf->size, - &sbuf->dma_addr, GFP_ATOMIC); + &sbuf->dma_addr, GFP_KERNEL); if (!sbuf->sb) goto bail; diff --git a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h index 9474c0046582..0a3d8e7da3d4 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_rcfw.h @@ -96,7 +96,7 @@ static inline void bnxt_qplib_set_cmd_slots(struct cmdq_base *req) #define RCFW_MAX_COOKIE_VALUE 0x7FFF #define RCFW_CMD_IS_BLOCKING 0x8000 -#define RCFW_BLOCKED_CMD_WAIT_COUNT 0x4E20 +#define RCFW_BLOCKED_CMD_WAIT_COUNT 20000000UL /* 20 sec */ #define HWRM_VERSION_RCFW_CMDQ_DEPTH_CHECK 0x1000900020011ULL @@ -152,7 +152,6 @@ struct bnxt_qplib_cmdq_ctx { wait_queue_head_t waitq; unsigned long flags; unsigned long *cmdq_bitmap; - u32 bmap_size; u32 seq_num; }; diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.c b/drivers/infiniband/hw/bnxt_re/qplib_res.c index 44282a8cdd4f..126d4f26f75a 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_res.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_res.c @@ -228,15 +228,16 @@ int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq, npages++; } - if (npages == MAX_PBL_LVL_0_PGS) { + if (npages == MAX_PBL_LVL_0_PGS && !hwq_attr->sginfo->nopte) { /* This request is Level 0, map PTE */ rc = __alloc_pbl(res, &hwq->pbl[PBL_LVL_0], hwq_attr->sginfo); if (rc) goto fail; hwq->level = PBL_LVL_0; + goto done; } - if (npages > MAX_PBL_LVL_0_PGS) { + if (npages >= MAX_PBL_LVL_0_PGS) { if (npages > MAX_PBL_LVL_1_PGS) { u32 flag = (hwq_attr->type == HWQ_TYPE_L2_CMPL) ? 0 : PTU_PTE_VALID; @@ -571,23 +572,6 @@ fail: return rc; } -/* GUID */ -void bnxt_qplib_get_guid(u8 *dev_addr, u8 *guid) -{ - u8 mac[ETH_ALEN]; - - /* MAC-48 to EUI-64 mapping */ - memcpy(mac, dev_addr, ETH_ALEN); - guid[0] = mac[0] ^ 2; - guid[1] = mac[1]; - guid[2] = mac[2]; - guid[3] = 0xff; - guid[4] = 0xfe; - guid[5] = mac[3]; - guid[6] = mac[4]; - guid[7] = mac[5]; -} - static void bnxt_qplib_free_sgid_tbl(struct bnxt_qplib_res *res, struct bnxt_qplib_sgid_tbl *sgid_tbl) { @@ -665,31 +649,6 @@ static void bnxt_qplib_init_sgid_tbl(struct bnxt_qplib_sgid_tbl *sgid_tbl, memset(sgid_tbl->hw_id, -1, sizeof(u16) * sgid_tbl->max); } -static void bnxt_qplib_free_pkey_tbl(struct bnxt_qplib_res *res, - struct bnxt_qplib_pkey_tbl *pkey_tbl) -{ - if (!pkey_tbl->tbl) - dev_dbg(&res->pdev->dev, "PKEY tbl not present\n"); - else - kfree(pkey_tbl->tbl); - - pkey_tbl->tbl = NULL; - pkey_tbl->max = 0; - pkey_tbl->active = 0; -} - -static int bnxt_qplib_alloc_pkey_tbl(struct bnxt_qplib_res *res, - struct bnxt_qplib_pkey_tbl *pkey_tbl, - u16 max) -{ - pkey_tbl->tbl = kcalloc(max, sizeof(u16), GFP_KERNEL); - if (!pkey_tbl->tbl) - return -ENOMEM; - - pkey_tbl->max = max; - return 0; -}; - /* PDs */ int bnxt_qplib_alloc_pd(struct bnxt_qplib_pd_tbl *pdt, struct bnxt_qplib_pd *pd) { @@ -859,24 +818,6 @@ unmap_io: return -ENOMEM; } -/* PKEYs */ -static void bnxt_qplib_cleanup_pkey_tbl(struct bnxt_qplib_pkey_tbl *pkey_tbl) -{ - memset(pkey_tbl->tbl, 0, sizeof(u16) * pkey_tbl->max); - pkey_tbl->active = 0; -} - -static void bnxt_qplib_init_pkey_tbl(struct bnxt_qplib_res *res, - struct bnxt_qplib_pkey_tbl *pkey_tbl) -{ - u16 pkey = 0xFFFF; - - memset(pkey_tbl->tbl, 0, sizeof(u16) * pkey_tbl->max); - - /* pkey default = 0xFFFF */ - bnxt_qplib_add_pkey(res, pkey_tbl, &pkey, false); -} - /* Stats */ static void bnxt_qplib_free_stats_ctx(struct pci_dev *pdev, struct bnxt_qplib_stats *stats) @@ -907,21 +848,18 @@ static int bnxt_qplib_alloc_stats_ctx(struct pci_dev *pdev, void bnxt_qplib_cleanup_res(struct bnxt_qplib_res *res) { - bnxt_qplib_cleanup_pkey_tbl(&res->pkey_tbl); bnxt_qplib_cleanup_sgid_tbl(res, &res->sgid_tbl); } int bnxt_qplib_init_res(struct bnxt_qplib_res *res) { bnxt_qplib_init_sgid_tbl(&res->sgid_tbl, res->netdev); - bnxt_qplib_init_pkey_tbl(res, &res->pkey_tbl); return 0; } void bnxt_qplib_free_res(struct bnxt_qplib_res *res) { - bnxt_qplib_free_pkey_tbl(res, &res->pkey_tbl); bnxt_qplib_free_sgid_tbl(res, &res->sgid_tbl); bnxt_qplib_free_pd_tbl(&res->pd_tbl); bnxt_qplib_free_dpi_tbl(res, &res->dpi_tbl); @@ -940,10 +878,6 @@ int bnxt_qplib_alloc_res(struct bnxt_qplib_res *res, struct pci_dev *pdev, if (rc) goto fail; - rc = bnxt_qplib_alloc_pkey_tbl(res, &res->pkey_tbl, dev_attr->max_pkey); - if (rc) - goto fail; - rc = bnxt_qplib_alloc_pd_tbl(res, &res->pd_tbl, dev_attr->max_pd); if (rc) goto fail; diff --git a/drivers/infiniband/hw/bnxt_re/qplib_res.h b/drivers/infiniband/hw/bnxt_re/qplib_res.h index 91031502e8f5..982e2c96dac2 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_res.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_res.h @@ -185,12 +185,6 @@ struct bnxt_qplib_sgid_tbl { u8 *vlan; }; -struct bnxt_qplib_pkey_tbl { - u16 *tbl; - u16 max; - u16 active; -}; - struct bnxt_qplib_dpi { u32 dpi; void __iomem *dbr; @@ -253,14 +247,14 @@ struct bnxt_qplib_ctx { struct bnxt_qplib_res { struct pci_dev *pdev; struct bnxt_qplib_chip_ctx *cctx; + struct bnxt_qplib_dev_attr *dattr; struct net_device *netdev; - struct bnxt_qplib_rcfw *rcfw; struct bnxt_qplib_pd_tbl pd_tbl; struct bnxt_qplib_sgid_tbl sgid_tbl; - struct bnxt_qplib_pkey_tbl pkey_tbl; struct bnxt_qplib_dpi_tbl dpi_tbl; bool prio; + bool is_vf; }; static inline bool bnxt_qplib_is_chip_gen_p5(struct bnxt_qplib_chip_ctx *cctx) @@ -345,7 +339,6 @@ void bnxt_qplib_free_hwq(struct bnxt_qplib_res *res, struct bnxt_qplib_hwq *hwq); int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq, struct bnxt_qplib_hwq_attr *hwq_attr); -void bnxt_qplib_get_guid(u8 *dev_addr, u8 *guid); int bnxt_qplib_alloc_pd(struct bnxt_qplib_pd_tbl *pd_tbl, struct bnxt_qplib_pd *pd); int bnxt_qplib_dealloc_pd(struct bnxt_qplib_res *res, @@ -450,4 +443,10 @@ static inline void bnxt_qplib_ring_nq_db(struct bnxt_qplib_db_info *info, else bnxt_qplib_ring_db32(info, arm); } + +static inline bool _is_ext_stats_supported(u16 dev_cap_flags) +{ + return dev_cap_flags & + CREQ_QUERY_FUNC_RESP_SB_EXT_STATS; +} #endif /* __BNXT_QPLIB_RES_H__ */ diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.c b/drivers/infiniband/hw/bnxt_re/qplib_sp.c index 3d9259632eb3..b802981b7171 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.c @@ -146,21 +146,12 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, attr->max_srq = le16_to_cpu(sb->max_srq); attr->max_srq_wqes = le32_to_cpu(sb->max_srq_wr) - 1; attr->max_srq_sges = sb->max_srq_sge; - attr->max_pkey = le32_to_cpu(sb->max_pkeys); - /* - * Some versions of FW reports more than 0xFFFF. - * Restrict it for now to 0xFFFF to avoid - * reporting trucated value - */ - if (attr->max_pkey > 0xFFFF) { - /* ib_port_attr::pkey_tbl_len is u16 */ - attr->max_pkey = 0xFFFF; - } - + attr->max_pkey = 1; attr->max_inline_data = le32_to_cpu(sb->max_inline_data); attr->l2_db_size = (sb->l2_db_space_size + 1) * (0x01 << RCFW_DBR_BASE_PAGE_SHIFT); attr->max_sgid = BNXT_QPLIB_NUM_GIDS_SUPPORTED; + attr->dev_cap_flags = le16_to_cpu(sb->dev_cap_flags); bnxt_qplib_query_version(rcfw, attr->fw_ver); @@ -286,8 +277,8 @@ int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, } int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, - struct bnxt_qplib_gid *gid, u8 *smac, u16 vlan_id, - bool update, u32 *index) + struct bnxt_qplib_gid *gid, const u8 *smac, + u16 vlan_id, bool update, u32 *index) { struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl, struct bnxt_qplib_res, @@ -378,7 +369,7 @@ int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, struct bnxt_qplib_gid *gid, u16 gid_idx, - u8 *smac) + const u8 *smac) { struct bnxt_qplib_res *res = to_bnxt_qplib(sgid_tbl, struct bnxt_qplib_res, @@ -413,93 +404,6 @@ int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, return rc; } -/* pkeys */ -int bnxt_qplib_get_pkey(struct bnxt_qplib_res *res, - struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 index, - u16 *pkey) -{ - if (index == 0xFFFF) { - *pkey = 0xFFFF; - return 0; - } - if (index >= pkey_tbl->max) { - dev_err(&res->pdev->dev, - "Index %d exceeded PKEY table max (%d)\n", - index, pkey_tbl->max); - return -EINVAL; - } - memcpy(pkey, &pkey_tbl->tbl[index], sizeof(*pkey)); - return 0; -} - -int bnxt_qplib_del_pkey(struct bnxt_qplib_res *res, - struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 *pkey, - bool update) -{ - int i, rc = 0; - - if (!pkey_tbl) { - dev_err(&res->pdev->dev, "PKEY table not allocated\n"); - return -EINVAL; - } - - /* Do we need a pkey_lock here? */ - if (!pkey_tbl->active) { - dev_err(&res->pdev->dev, "PKEY table has no active entries\n"); - return -ENOMEM; - } - for (i = 0; i < pkey_tbl->max; i++) { - if (!memcmp(&pkey_tbl->tbl[i], pkey, sizeof(*pkey))) - break; - } - if (i == pkey_tbl->max) { - dev_err(&res->pdev->dev, - "PKEY 0x%04x not found in the pkey table\n", *pkey); - return -ENOMEM; - } - memset(&pkey_tbl->tbl[i], 0, sizeof(*pkey)); - pkey_tbl->active--; - - /* unlock */ - return rc; -} - -int bnxt_qplib_add_pkey(struct bnxt_qplib_res *res, - struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 *pkey, - bool update) -{ - int i, free_idx, rc = 0; - - if (!pkey_tbl) { - dev_err(&res->pdev->dev, "PKEY table not allocated\n"); - return -EINVAL; - } - - /* Do we need a pkey_lock here? */ - if (pkey_tbl->active == pkey_tbl->max) { - dev_err(&res->pdev->dev, "PKEY table is full\n"); - return -ENOMEM; - } - free_idx = pkey_tbl->max; - for (i = 0; i < pkey_tbl->max; i++) { - if (!memcmp(&pkey_tbl->tbl[i], pkey, sizeof(*pkey))) - return -EALREADY; - else if (!pkey_tbl->tbl[i] && free_idx == pkey_tbl->max) - free_idx = i; - } - if (free_idx == pkey_tbl->max) { - dev_err(&res->pdev->dev, - "PKEY table is FULL but count is not MAX??\n"); - return -ENOMEM; - } - /* Add PKEY to the pkey_tbl */ - memcpy(&pkey_tbl->tbl[free_idx], pkey, sizeof(*pkey)); - pkey_tbl->active++; - - /* unlock */ - return rc; -} - /* AH */ int bnxt_qplib_create_ah(struct bnxt_qplib_res *res, struct bnxt_qplib_ah *ah, bool block) @@ -869,3 +773,53 @@ bail: bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf); return rc; } + +int bnxt_qplib_qext_stat(struct bnxt_qplib_rcfw *rcfw, u32 fid, + struct bnxt_qplib_ext_stat *estat) +{ + struct creq_query_roce_stats_ext_resp resp = {}; + struct creq_query_roce_stats_ext_resp_sb *sb; + struct cmdq_query_roce_stats_ext req = {}; + struct bnxt_qplib_rcfw_sbuf *sbuf; + u16 cmd_flags = 0; + int rc; + + sbuf = bnxt_qplib_rcfw_alloc_sbuf(rcfw, sizeof(*sb)); + if (!sbuf) { + dev_err(&rcfw->pdev->dev, + "SP: QUERY_ROCE_STATS_EXT alloc sb failed"); + return -ENOMEM; + } + + RCFW_CMD_PREP(req, QUERY_ROCE_STATS_EXT, cmd_flags); + + req.resp_size = ALIGN(sizeof(*sb), BNXT_QPLIB_CMDQE_UNITS); + req.resp_addr = cpu_to_le64(sbuf->dma_addr); + req.function_id = cpu_to_le32(fid); + req.flags = cpu_to_le16(CMDQ_QUERY_ROCE_STATS_EXT_FLAGS_FUNCTION_ID); + + rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req, + (void *)&resp, (void *)sbuf, 0); + if (rc) + goto bail; + + sb = sbuf->sb; + estat->tx_atomic_req = le64_to_cpu(sb->tx_atomic_req_pkts); + estat->tx_read_req = le64_to_cpu(sb->tx_read_req_pkts); + estat->tx_read_res = le64_to_cpu(sb->tx_read_res_pkts); + estat->tx_write_req = le64_to_cpu(sb->tx_write_req_pkts); + estat->tx_send_req = le64_to_cpu(sb->tx_send_req_pkts); + estat->rx_atomic_req = le64_to_cpu(sb->rx_atomic_req_pkts); + estat->rx_read_req = le64_to_cpu(sb->rx_read_req_pkts); + estat->rx_read_res = le64_to_cpu(sb->rx_read_res_pkts); + estat->rx_write_req = le64_to_cpu(sb->rx_write_req_pkts); + estat->rx_send_req = le64_to_cpu(sb->rx_send_req_pkts); + estat->rx_roce_good_pkts = le64_to_cpu(sb->rx_roce_good_pkts); + estat->rx_roce_good_bytes = le64_to_cpu(sb->rx_roce_good_bytes); + estat->rx_out_of_buffer = le64_to_cpu(sb->rx_out_of_buffer_pkts); + estat->rx_out_of_sequence = le64_to_cpu(sb->rx_out_of_sequence_pkts); + +bail: + bnxt_qplib_rcfw_free_sbuf(rcfw, sbuf); + return rc; +} diff --git a/drivers/infiniband/hw/bnxt_re/qplib_sp.h b/drivers/infiniband/hw/bnxt_re/qplib_sp.h index 260104783691..5939e8fc8353 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_sp.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_sp.h @@ -71,6 +71,7 @@ struct bnxt_qplib_dev_attr { u32 l2_db_size; u8 tqm_alloc_reqs[MAX_TQM_ALLOC_REQ]; bool is_atomic; + u16 dev_cap_flags; }; struct bnxt_qplib_pd { @@ -219,25 +220,41 @@ struct bnxt_qplib_roce_stats { /* port 3 active qps */ }; +struct bnxt_qplib_ext_stat { + u64 tx_atomic_req; + u64 tx_read_req; + u64 tx_read_res; + u64 tx_write_req; + u64 tx_send_req; + u64 tx_roce_pkts; + u64 tx_roce_bytes; + u64 rx_atomic_req; + u64 rx_read_req; + u64 rx_read_res; + u64 rx_write_req; + u64 rx_send_req; + u64 rx_roce_pkts; + u64 rx_roce_bytes; + u64 rx_roce_good_pkts; + u64 rx_roce_good_bytes; + u64 rx_out_of_buffer; + u64 rx_out_of_sequence; + u64 tx_cnp; + u64 rx_cnp; + u64 rx_ecn_marked; +}; + int bnxt_qplib_get_sgid(struct bnxt_qplib_res *res, struct bnxt_qplib_sgid_tbl *sgid_tbl, int index, struct bnxt_qplib_gid *gid); int bnxt_qplib_del_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, struct bnxt_qplib_gid *gid, u16 vlan_id, bool update); int bnxt_qplib_add_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, - struct bnxt_qplib_gid *gid, u8 *mac, u16 vlan_id, + struct bnxt_qplib_gid *gid, const u8 *mac, u16 vlan_id, bool update, u32 *index); int bnxt_qplib_update_sgid(struct bnxt_qplib_sgid_tbl *sgid_tbl, - struct bnxt_qplib_gid *gid, u16 gid_idx, u8 *smac); -int bnxt_qplib_get_pkey(struct bnxt_qplib_res *res, - struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 index, - u16 *pkey); -int bnxt_qplib_del_pkey(struct bnxt_qplib_res *res, - struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 *pkey, - bool update); -int bnxt_qplib_add_pkey(struct bnxt_qplib_res *res, - struct bnxt_qplib_pkey_tbl *pkey_tbl, u16 *pkey, - bool update); + struct bnxt_qplib_gid *gid, u16 gid_idx, + const u8 *smac); int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw, struct bnxt_qplib_dev_attr *attr, bool vf); int bnxt_qplib_set_func_resources(struct bnxt_qplib_res *res, @@ -263,4 +280,7 @@ int bnxt_qplib_free_fast_reg_page_list(struct bnxt_qplib_res *res, int bnxt_qplib_map_tc2cos(struct bnxt_qplib_res *res, u16 *cids); int bnxt_qplib_get_roce_stats(struct bnxt_qplib_rcfw *rcfw, struct bnxt_qplib_roce_stats *stats); +int bnxt_qplib_qext_stat(struct bnxt_qplib_rcfw *rcfw, u32 fid, + struct bnxt_qplib_ext_stat *estat); + #endif /* __BNXT_QPLIB_SP_H__*/ diff --git a/drivers/infiniband/hw/bnxt_re/roce_hsi.h b/drivers/infiniband/hw/bnxt_re/roce_hsi.h index 3e40e0d76efd..ecb719098b75 100644 --- a/drivers/infiniband/hw/bnxt_re/roce_hsi.h +++ b/drivers/infiniband/hw/bnxt_re/roce_hsi.h @@ -1102,6 +1102,7 @@ struct cmdq_base { #define CMDQ_BASE_OPCODE_MODIFY_CC 0x8cUL #define CMDQ_BASE_OPCODE_QUERY_CC 0x8dUL #define CMDQ_BASE_OPCODE_QUERY_ROCE_STATS 0x8eUL + #define CMDQ_BASE_OPCODE_QUERY_ROCE_STATS_EXT 0x92UL u8 cmd_size; __le16 flags; __le16 cookie; @@ -1127,6 +1128,10 @@ struct cmdq_create_qp { #define CMDQ_CREATE_QP_QP_FLAGS_RESERVED_LKEY_ENABLE 0x4UL #define CMDQ_CREATE_QP_QP_FLAGS_FR_PMR_ENABLED 0x8UL #define CMDQ_CREATE_QP_QP_FLAGS_VARIABLE_SIZED_WQE_ENABLED 0x10UL + #define CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED 0x80UL + #define CMDQ_CREATE_QP_QP_FLAGS_LAST \ + CMDQ_CREATE_QP_QP_FLAGS_EXT_STATS_ENABLED + u8 type; #define CMDQ_CREATE_QP_TYPE_RC 0x2UL #define CMDQ_CREATE_QP_TYPE_UD 0x4UL @@ -2848,6 +2853,7 @@ struct creq_query_func_resp_sb { __le16 max_qp_wr; __le16 dev_cap_flags; #define CREQ_QUERY_FUNC_RESP_SB_DEV_CAP_FLAGS_RESIZE_QP 0x1UL + #define CREQ_QUERY_FUNC_RESP_SB_EXT_STATS 0x10UL __le32 max_cq; __le32 max_cqe; __le32 max_pd; @@ -3087,6 +3093,85 @@ struct creq_query_roce_stats_resp_sb { __le64 active_qp_count_p3; }; +/* cmdq_query_roce_stats_ext (size:192b/24B) */ +struct cmdq_query_roce_stats_ext { + u8 opcode; + #define CMDQ_QUERY_ROCE_STATS_EXT_OPCODE_QUERY_ROCE_STATS 0x92UL + #define CMDQ_QUERY_ROCE_STATS_EXT_OPCODE_LAST \ + CMDQ_QUERY_ROCE_STATS_EXT_OPCODE_QUERY_ROCE_STATS + u8 cmd_size; + __le16 flags; + #define CMDQ_QUERY_ROCE_STATS_EXT_FLAGS_COLLECTION_ID 0x1UL + #define CMDQ_QUERY_ROCE_STATS_EXT_FLAGS_FUNCTION_ID 0x2UL + __le16 cookie; + u8 resp_size; + u8 collection_id; + __le64 resp_addr; + __le32 function_id; + #define CMDQ_QUERY_ROCE_STATS_EXT_PF_NUM_MASK 0xffUL + #define CMDQ_QUERY_ROCE_STATS_EXT_PF_NUM_SFT 0 + #define CMDQ_QUERY_ROCE_STATS_EXT_VF_NUM_MASK 0xffff00UL + #define CMDQ_QUERY_ROCE_STATS_EXT_VF_NUM_SFT 8 + #define CMDQ_QUERY_ROCE_STATS_EXT_VF_VALID 0x1000000UL + __le32 reserved32; +}; + +/* creq_query_roce_stats_ext_resp (size:128b/16B) */ +struct creq_query_roce_stats_ext_resp { + u8 type; + #define CREQ_QUERY_ROCE_STATS_EXT_RESP_TYPE_MASK 0x3fUL + #define CREQ_QUERY_ROCE_STATS_EXT_RESP_TYPE_SFT 0 + #define CREQ_QUERY_ROCE_STATS_EXT_RESP_TYPE_QP_EVENT 0x38UL + #define CREQ_QUERY_ROCE_STATS_EXT_RESP_TYPE_LAST \ + CREQ_QUERY_ROCE_STATS_EXT_RESP_TYPE_QP_EVENT + u8 status; + __le16 cookie; + __le32 size; + u8 v; + #define CREQ_QUERY_ROCE_STATS_EXT_RESP_V 0x1UL + u8 event; + #define CREQ_QUERY_ROCE_STATS_EXT_RESP_EVENT_QUERY_ROCE_STATS_EXT 0x92UL + #define CREQ_QUERY_ROCE_STATS_EXT_RESP_EVENT_LAST \ + CREQ_QUERY_ROCE_STATS_EXT_RESP_EVENT_QUERY_ROCE_STATS_EXT + u8 reserved48[6]; +}; + +/* creq_query_roce_stats_ext_resp_sb (size:1536b/192B) */ +struct creq_query_roce_stats_ext_resp_sb { + u8 opcode; + #define CREQ_QUERY_ROCE_STATS_EXT_RESP_SB_OPCODE_QUERY_ROCE_STATS_EXT 0x92UL + #define CREQ_QUERY_ROCE_STATS_EXT_RESP_SB_OPCODE_LAST \ + CREQ_QUERY_ROCE_STATS_EXT_RESP_SB_OPCODE_QUERY_ROCE_STATS_EXT + u8 status; + __le16 cookie; + __le16 flags; + u8 resp_size; + u8 rsvd; + __le64 tx_atomic_req_pkts; + __le64 tx_read_req_pkts; + __le64 tx_read_res_pkts; + __le64 tx_write_req_pkts; + __le64 tx_send_req_pkts; + __le64 tx_roce_pkts; + __le64 tx_roce_bytes; + __le64 rx_atomic_req_pkts; + __le64 rx_read_req_pkts; + __le64 rx_read_res_pkts; + __le64 rx_write_req_pkts; + __le64 rx_send_req_pkts; + __le64 rx_roce_pkts; + __le64 rx_roce_bytes; + __le64 rx_roce_good_pkts; + __le64 rx_roce_good_bytes; + __le64 rx_out_of_buffer_pkts; + __le64 rx_out_of_sequence_pkts; + __le64 tx_cnp_pkts; + __le64 rx_cnp_pkts; + __le64 rx_ecn_marked_pkts; + __le64 tx_cnp_bytes; + __le64 rx_cnp_bytes; +}; + /* QP error notification event (16 bytes) */ struct creq_qp_error_notification { u8 type; diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 291471d12197..c16017f6e8db 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -2471,7 +2471,8 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb, skb_get(skb); rpl = cplhdr(skb); if (!is_t4(adapter_type)) { - skb_trim(skb, roundup(sizeof(*rpl5), 16)); + BUILD_BUG_ON(sizeof(*rpl5) != roundup(sizeof(*rpl5), 16)); + skb_trim(skb, sizeof(*rpl5)); rpl5 = (void *)rpl; INIT_TP_WR(rpl5, ep->hwtid); } else { @@ -2487,7 +2488,7 @@ static int accept_cr(struct c4iw_ep *ep, struct sk_buff *skb, opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE); opt2 |= T5_ISS_F; rpl5 = (void *)rpl; - memset(&rpl5->iss, 0, roundup(sizeof(*rpl5)-sizeof(*rpl), 16)); + memset_after(rpl5, 0, iss); if (peer2peer) isn += 4; rpl5->iss = cpu_to_be32(isn); @@ -4464,6 +4465,5 @@ int __init c4iw_cm_init(void) void c4iw_cm_term(void) { WARN_ON(!list_empty(&timeout_list)); - flush_workqueue(workq); destroy_workqueue(workq); } diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c index 541dbcf22d0e..80970a1738f8 100644 --- a/drivers/infiniband/hw/cxgb4/device.c +++ b/drivers/infiniband/hw/cxgb4/device.c @@ -1562,7 +1562,6 @@ static void __exit c4iw_exit_module(void) kfree(ctx); } mutex_unlock(&dev_mutex); - flush_workqueue(reg_workq); destroy_workqueue(reg_workq); cxgb4_unregister_uld(CXGB4_ULD_RDMA); c4iw_cm_term(); diff --git a/drivers/infiniband/hw/cxgb4/id_table.c b/drivers/infiniband/hw/cxgb4/id_table.c index 724d23297b35..f64e7e02b129 100644 --- a/drivers/infiniband/hw/cxgb4/id_table.c +++ b/drivers/infiniband/hw/cxgb4/id_table.c @@ -59,7 +59,7 @@ u32 c4iw_id_alloc(struct c4iw_id_table *alloc) alloc->last = obj + 1; if (alloc->last >= alloc->max) alloc->last = 0; - set_bit(obj, alloc->table); + __set_bit(obj, alloc->table); obj += alloc->start; } else obj = -1; @@ -75,37 +75,32 @@ void c4iw_id_free(struct c4iw_id_table *alloc, u32 obj) obj -= alloc->start; spin_lock_irqsave(&alloc->lock, flags); - clear_bit(obj, alloc->table); + __clear_bit(obj, alloc->table); spin_unlock_irqrestore(&alloc->lock, flags); } int c4iw_id_table_alloc(struct c4iw_id_table *alloc, u32 start, u32 num, u32 reserved, u32 flags) { - int i; - alloc->start = start; alloc->flags = flags; if (flags & C4IW_ID_TABLE_F_RANDOM) alloc->last = prandom_u32() % RANDOM_SKIP; else alloc->last = 0; - alloc->max = num; + alloc->max = num; spin_lock_init(&alloc->lock); - alloc->table = kmalloc_array(BITS_TO_LONGS(num), sizeof(long), - GFP_KERNEL); + alloc->table = bitmap_zalloc(num, GFP_KERNEL); if (!alloc->table) return -ENOMEM; - bitmap_zero(alloc->table, num); if (!(alloc->flags & C4IW_ID_TABLE_F_EMPTY)) - for (i = 0; i < reserved; ++i) - set_bit(i, alloc->table); + bitmap_set(alloc->table, 0, reserved); return 0; } void c4iw_id_table_free(struct c4iw_id_table *alloc) { - kfree(alloc->table); + bitmap_free(alloc->table); } diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c index e7337662aff8..89f36a3a9af0 100644 --- a/drivers/infiniband/hw/cxgb4/provider.c +++ b/drivers/infiniband/hw/cxgb4/provider.c @@ -41,6 +41,7 @@ #include <linux/ethtool.h> #include <linux/rtnetlink.h> #include <linux/inetdevice.h> +#include <net/addrconf.h> #include <linux/io.h> #include <asm/irq.h> @@ -264,7 +265,8 @@ static int c4iw_query_device(struct ib_device *ibdev, struct ib_device_attr *pro return -EINVAL; dev = to_c4iw_dev(ibdev); - memcpy(&props->sys_image_guid, dev->rdev.lldi.ports[0]->dev_addr, 6); + addrconf_addr_eui48((u8 *)&props->sys_image_guid, + dev->rdev.lldi.ports[0]->dev_addr); props->hw_ver = CHELSIO_CHIP_RELEASE(dev->rdev.lldi.adapter_type); props->fw_ver = dev->rdev.lldi.fw_vers; props->device_cap_flags = dev->device_cap_flags; @@ -366,23 +368,23 @@ enum counters { NR_COUNTERS }; -static const char * const names[] = { - [IP4INSEGS] = "ip4InSegs", - [IP4OUTSEGS] = "ip4OutSegs", - [IP4RETRANSSEGS] = "ip4RetransSegs", - [IP4OUTRSTS] = "ip4OutRsts", - [IP6INSEGS] = "ip6InSegs", - [IP6OUTSEGS] = "ip6OutSegs", - [IP6RETRANSSEGS] = "ip6RetransSegs", - [IP6OUTRSTS] = "ip6OutRsts" +static const struct rdma_stat_desc cxgb4_descs[] = { + [IP4INSEGS].name = "ip4InSegs", + [IP4OUTSEGS].name = "ip4OutSegs", + [IP4RETRANSSEGS].name = "ip4RetransSegs", + [IP4OUTRSTS].name = "ip4OutRsts", + [IP6INSEGS].name = "ip6InSegs", + [IP6OUTSEGS].name = "ip6OutSegs", + [IP6RETRANSSEGS].name = "ip6RetransSegs", + [IP6OUTRSTS].name = "ip6OutRsts" }; static struct rdma_hw_stats *c4iw_alloc_device_stats(struct ib_device *ibdev) { - BUILD_BUG_ON(ARRAY_SIZE(names) != NR_COUNTERS); + BUILD_BUG_ON(ARRAY_SIZE(cxgb4_descs) != NR_COUNTERS); /* FIXME: these look like port stats */ - return rdma_alloc_hw_stats_struct(names, NR_COUNTERS, + return rdma_alloc_hw_stats_struct(cxgb4_descs, NR_COUNTERS, RDMA_HW_STATS_DEFAULT_LIFESPAN); } @@ -525,8 +527,8 @@ void c4iw_register_device(struct work_struct *work) struct c4iw_dev *dev = ctx->dev; pr_debug("c4iw_dev %p\n", dev); - memset(&dev->ibdev.node_guid, 0, sizeof(dev->ibdev.node_guid)); - memcpy(&dev->ibdev.node_guid, dev->rdev.lldi.ports[0]->dev_addr, 6); + addrconf_addr_eui48((u8 *)&dev->ibdev.node_guid, + dev->rdev.lldi.ports[0]->dev_addr); dev->device_cap_flags = IB_DEVICE_LOCAL_DMA_LKEY | IB_DEVICE_MEM_WINDOW; if (fastreg_support) dev->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS; diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index d20b4ef2c853..ffbd9a89981e 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -2460,6 +2460,7 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, memset(attr, 0, sizeof(*attr)); memset(init_attr, 0, sizeof(*init_attr)); attr->qp_state = to_ib_qp_state(qhp->attr.state); + attr->cur_qp_state = to_ib_qp_state(qhp->attr.state); init_attr->cap.max_send_wr = qhp->attr.sq_num_entries; init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries; init_attr->cap.max_send_sge = qhp->attr.sq_max_sges; diff --git a/drivers/infiniband/hw/efa/efa.h b/drivers/infiniband/hw/efa/efa.h index 87b1dadeb7fe..7352a1f5d811 100644 --- a/drivers/infiniband/hw/efa/efa.h +++ b/drivers/infiniband/hw/efa/efa.h @@ -20,14 +20,14 @@ #define EFA_IRQNAME_SIZE 40 -/* 1 for AENQ + ADMIN */ -#define EFA_NUM_MSIX_VEC 1 #define EFA_MGMNT_MSIX_VEC_IDX 0 +#define EFA_COMP_EQS_VEC_BASE 1 struct efa_irq { irq_handler_t handler; void *data; u32 irqn; + u32 vector; cpumask_t affinity_hint_mask; char name[EFA_IRQNAME_SIZE]; }; @@ -61,6 +61,13 @@ struct efa_dev { struct efa_irq admin_irq; struct efa_stats stats; + + /* Array of completion EQs */ + struct efa_eq *eqs; + unsigned int neqs; + + /* Only stores CQs with interrupts enabled */ + struct xarray cqs_xa; }; struct efa_ucontext { @@ -84,8 +91,11 @@ struct efa_cq { dma_addr_t dma_addr; void *cpu_addr; struct rdma_user_mmap_entry *mmap_entry; + struct rdma_user_mmap_entry *db_mmap_entry; size_t size; u16 cq_idx; + /* NULL when no interrupts requested */ + struct efa_eq *eq; }; struct efa_qp { @@ -116,6 +126,11 @@ struct efa_ah { u8 id[EFA_GID_SIZE]; }; +struct efa_eq { + struct efa_com_eq eeq; + struct efa_irq irq; +}; + int efa_query_device(struct ib_device *ibdev, struct ib_device_attr *props, struct ib_udata *udata); @@ -139,6 +154,10 @@ int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length, u64 virt_addr, int access_flags, struct ib_udata *udata); +struct ib_mr *efa_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start, + u64 length, u64 virt_addr, + int fd, int access_flags, + struct ib_udata *udata); int efa_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata); int efa_get_port_immutable(struct ib_device *ibdev, u32 port_num, struct ib_port_immutable *immutable); diff --git a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h index fa38b34eddb8..0b0b93b529f3 100644 --- a/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h +++ b/drivers/infiniband/hw/efa/efa_admin_cmds_defs.h @@ -28,7 +28,9 @@ enum efa_admin_aq_opcode { EFA_ADMIN_DEALLOC_PD = 15, EFA_ADMIN_ALLOC_UAR = 16, EFA_ADMIN_DEALLOC_UAR = 17, - EFA_ADMIN_MAX_OPCODE = 17, + EFA_ADMIN_CREATE_EQ = 18, + EFA_ADMIN_DESTROY_EQ = 19, + EFA_ADMIN_MAX_OPCODE = 19, }; enum efa_admin_aq_feature_id { @@ -38,6 +40,7 @@ enum efa_admin_aq_feature_id { EFA_ADMIN_QUEUE_ATTR = 4, EFA_ADMIN_HW_HINTS = 5, EFA_ADMIN_HOST_INFO = 6, + EFA_ADMIN_EVENT_QUEUE_ATTR = 7, }; /* QP transport type */ @@ -430,8 +433,8 @@ struct efa_admin_create_cq_cmd { /* * 4:0 : reserved5 - MBZ * 5 : interrupt_mode_enabled - if set, cq operates - * in interrupt mode (i.e. CQ events and MSI-X are - * generated), otherwise - polling + * in interrupt mode (i.e. CQ events and EQ elements + * are generated), otherwise - polling * 6 : virt - If set, ring base address is virtual * (IOVA returned by MR registration) * 7 : reserved6 - MBZ @@ -448,8 +451,11 @@ struct efa_admin_create_cq_cmd { /* completion queue depth in # of entries. must be power of 2 */ u16 cq_depth; - /* msix vector assigned to this cq */ - u32 msix_vector_idx; + /* EQ number assigned to this cq */ + u16 eqn; + + /* MBZ */ + u16 reserved; /* * CQ ring base address, virtual or physical depending on 'virt' @@ -480,6 +486,15 @@ struct efa_admin_create_cq_resp { /* actual cq depth in number of entries */ u16 cq_actual_depth; + + /* CQ doorbell address, as offset to PCIe DB BAR */ + u32 db_offset; + + /* + * 0 : db_valid - If set, doorbell offset is valid. + * Always set when interrupts are requested. + */ + u32 flags; }; struct efa_admin_destroy_cq_cmd { @@ -669,6 +684,17 @@ struct efa_admin_feature_queue_attr_desc { u16 max_tx_batch; }; +struct efa_admin_event_queue_attr_desc { + /* The maximum number of event queues supported */ + u32 max_eq; + + /* Maximum number of EQEs per Event Queue */ + u32 max_eq_depth; + + /* Supported events bitmask */ + u32 event_bitmask; +}; + struct efa_admin_feature_aenq_desc { /* bitmask for AENQ groups the device can report */ u32 supported_groups; @@ -727,6 +753,8 @@ struct efa_admin_get_feature_resp { struct efa_admin_feature_queue_attr_desc queue_attr; + struct efa_admin_event_queue_attr_desc event_queue_attr; + struct efa_admin_hw_hints hw_hints; } u; }; @@ -810,6 +838,60 @@ struct efa_admin_dealloc_uar_resp { struct efa_admin_acq_common_desc acq_common_desc; }; +struct efa_admin_create_eq_cmd { + struct efa_admin_aq_common_desc aq_common_descriptor; + + /* Size of the EQ in entries, must be power of 2 */ + u16 depth; + + /* MSI-X table entry index */ + u8 msix_vec; + + /* + * 4:0 : entry_size_words - size of EQ entry in + * 32-bit words + * 7:5 : reserved - MBZ + */ + u8 caps; + + /* EQ ring base address */ + struct efa_common_mem_addr ba; + + /* + * Enabled events on this EQ + * 0 : completion_events - Enable completion events + * 31:1 : reserved - MBZ + */ + u32 event_bitmask; + + /* MBZ */ + u32 reserved; +}; + +struct efa_admin_create_eq_resp { + struct efa_admin_acq_common_desc acq_common_desc; + + /* EQ number */ + u16 eqn; + + /* MBZ */ + u16 reserved; +}; + +struct efa_admin_destroy_eq_cmd { + struct efa_admin_aq_common_desc aq_common_descriptor; + + /* EQ number */ + u16 eqn; + + /* MBZ */ + u16 reserved; +}; + +struct efa_admin_destroy_eq_resp { + struct efa_admin_acq_common_desc acq_common_desc; +}; + /* asynchronous event notification groups */ enum efa_admin_aenq_group { EFA_ADMIN_FATAL_ERROR = 1, @@ -899,10 +981,18 @@ struct efa_admin_host_info { #define EFA_ADMIN_CREATE_CQ_CMD_VIRT_MASK BIT(6) #define EFA_ADMIN_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK GENMASK(4, 0) +/* create_cq_resp */ +#define EFA_ADMIN_CREATE_CQ_RESP_DB_VALID_MASK BIT(0) + /* feature_device_attr_desc */ #define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RDMA_READ_MASK BIT(0) #define EFA_ADMIN_FEATURE_DEVICE_ATTR_DESC_RNR_RETRY_MASK BIT(1) +/* create_eq_cmd */ +#define EFA_ADMIN_CREATE_EQ_CMD_ENTRY_SIZE_WORDS_MASK GENMASK(4, 0) +#define EFA_ADMIN_CREATE_EQ_CMD_VIRT_MASK BIT(6) +#define EFA_ADMIN_CREATE_EQ_CMD_COMPLETION_EVENTS_MASK BIT(0) + /* host_info */ #define EFA_ADMIN_HOST_INFO_DRIVER_MODULE_TYPE_MASK GENMASK(7, 0) #define EFA_ADMIN_HOST_INFO_DRIVER_SUB_MINOR_MASK GENMASK(15, 8) diff --git a/drivers/infiniband/hw/efa/efa_admin_defs.h b/drivers/infiniband/hw/efa/efa_admin_defs.h index 78ff9389ae25..83f20c38a840 100644 --- a/drivers/infiniband/hw/efa/efa_admin_defs.h +++ b/drivers/infiniband/hw/efa/efa_admin_defs.h @@ -118,6 +118,43 @@ struct efa_admin_aenq_entry { u32 inline_data_w4[12]; }; +enum efa_admin_eqe_event_type { + EFA_ADMIN_EQE_EVENT_TYPE_COMPLETION = 0, +}; + +/* Completion event */ +struct efa_admin_comp_event { + /* CQ number */ + u16 cqn; + + /* MBZ */ + u16 reserved; + + /* MBZ */ + u32 reserved2; +}; + +/* Event Queue Element */ +struct efa_admin_eqe { + /* + * 0 : phase + * 8:1 : event_type - Event type + * 31:9 : reserved - MBZ + */ + u32 common; + + /* MBZ */ + u32 reserved; + + union { + /* Event data */ + u32 event_data[2]; + + /* Completion Event */ + struct efa_admin_comp_event comp_event; + } u; +}; + /* aq_common_desc */ #define EFA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK GENMASK(11, 0) #define EFA_ADMIN_AQ_COMMON_DESC_PHASE_MASK BIT(0) @@ -131,4 +168,8 @@ struct efa_admin_aenq_entry { /* aenq_common_desc */ #define EFA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK BIT(0) +/* eqe */ +#define EFA_ADMIN_EQE_PHASE_MASK BIT(0) +#define EFA_ADMIN_EQE_EVENT_TYPE_MASK GENMASK(8, 1) + #endif /* _EFA_ADMIN_H_ */ diff --git a/drivers/infiniband/hw/efa/efa_com.c b/drivers/infiniband/hw/efa/efa_com.c index 0d523ad736c7..16a24a05fc2a 100644 --- a/drivers/infiniband/hw/efa/efa_com.c +++ b/drivers/infiniband/hw/efa/efa_com.c @@ -56,11 +56,19 @@ static const char *efa_com_cmd_str(u8 cmd) EFA_CMD_STR_CASE(DEALLOC_PD); EFA_CMD_STR_CASE(ALLOC_UAR); EFA_CMD_STR_CASE(DEALLOC_UAR); + EFA_CMD_STR_CASE(CREATE_EQ); + EFA_CMD_STR_CASE(DESTROY_EQ); default: return "unknown command opcode"; } #undef EFA_CMD_STR_CASE } +void efa_com_set_dma_addr(dma_addr_t addr, u32 *addr_high, u32 *addr_low) +{ + *addr_low = lower_32_bits(addr); + *addr_high = upper_32_bits(addr); +} + static u32 efa_com_reg_read32(struct efa_com_dev *edev, u16 offset) { struct efa_com_mmio_read *mmio_read = &edev->mmio_read; @@ -1081,3 +1089,159 @@ int efa_com_dev_reset(struct efa_com_dev *edev, return 0; } + +static int efa_com_create_eq(struct efa_com_dev *edev, + struct efa_com_create_eq_params *params, + struct efa_com_create_eq_result *result) +{ + struct efa_com_admin_queue *aq = &edev->aq; + struct efa_admin_create_eq_resp resp = {}; + struct efa_admin_create_eq_cmd cmd = {}; + int err; + + cmd.aq_common_descriptor.opcode = EFA_ADMIN_CREATE_EQ; + EFA_SET(&cmd.caps, EFA_ADMIN_CREATE_EQ_CMD_ENTRY_SIZE_WORDS, + params->entry_size_in_bytes / 4); + cmd.depth = params->depth; + cmd.event_bitmask = params->event_bitmask; + cmd.msix_vec = params->msix_vec; + + efa_com_set_dma_addr(params->dma_addr, &cmd.ba.mem_addr_high, + &cmd.ba.mem_addr_low); + + err = efa_com_cmd_exec(aq, + (struct efa_admin_aq_entry *)&cmd, + sizeof(cmd), + (struct efa_admin_acq_entry *)&resp, + sizeof(resp)); + if (err) { + ibdev_err_ratelimited(edev->efa_dev, + "Failed to create eq[%d]\n", err); + return err; + } + + result->eqn = resp.eqn; + + return 0; +} + +static void efa_com_destroy_eq(struct efa_com_dev *edev, + struct efa_com_destroy_eq_params *params) +{ + struct efa_com_admin_queue *aq = &edev->aq; + struct efa_admin_destroy_eq_resp resp = {}; + struct efa_admin_destroy_eq_cmd cmd = {}; + int err; + + cmd.aq_common_descriptor.opcode = EFA_ADMIN_DESTROY_EQ; + cmd.eqn = params->eqn; + + err = efa_com_cmd_exec(aq, + (struct efa_admin_aq_entry *)&cmd, + sizeof(cmd), + (struct efa_admin_acq_entry *)&resp, + sizeof(resp)); + if (err) + ibdev_err_ratelimited(edev->efa_dev, + "Failed to destroy EQ-%u [%d]\n", cmd.eqn, + err); +} + +static void efa_com_arm_eq(struct efa_com_dev *edev, struct efa_com_eq *eeq) +{ + u32 val = 0; + + EFA_SET(&val, EFA_REGS_EQ_DB_EQN, eeq->eqn); + EFA_SET(&val, EFA_REGS_EQ_DB_ARM, 1); + + writel(val, edev->reg_bar + EFA_REGS_EQ_DB_OFF); +} + +void efa_com_eq_comp_intr_handler(struct efa_com_dev *edev, + struct efa_com_eq *eeq) +{ + struct efa_admin_eqe *eqe; + u32 processed = 0; + u8 phase; + u32 ci; + + ci = eeq->cc & (eeq->depth - 1); + phase = eeq->phase; + eqe = &eeq->eqes[ci]; + + /* Go over all the events */ + while ((READ_ONCE(eqe->common) & EFA_ADMIN_EQE_PHASE_MASK) == phase) { + /* + * Do not read the rest of the completion entry before the + * phase bit was validated + */ + dma_rmb(); + + eeq->cb(eeq, eqe); + + /* Get next event entry */ + ci++; + processed++; + + if (ci == eeq->depth) { + ci = 0; + phase = !phase; + } + + eqe = &eeq->eqes[ci]; + } + + eeq->cc += processed; + eeq->phase = phase; + efa_com_arm_eq(eeq->edev, eeq); +} + +void efa_com_eq_destroy(struct efa_com_dev *edev, struct efa_com_eq *eeq) +{ + struct efa_com_destroy_eq_params params = { + .eqn = eeq->eqn, + }; + + efa_com_destroy_eq(edev, ¶ms); + dma_free_coherent(edev->dmadev, eeq->depth * sizeof(*eeq->eqes), + eeq->eqes, eeq->dma_addr); +} + +int efa_com_eq_init(struct efa_com_dev *edev, struct efa_com_eq *eeq, + efa_eqe_handler cb, u16 depth, u8 msix_vec) +{ + struct efa_com_create_eq_params params = {}; + struct efa_com_create_eq_result result = {}; + int err; + + params.depth = depth; + params.entry_size_in_bytes = sizeof(*eeq->eqes); + EFA_SET(¶ms.event_bitmask, + EFA_ADMIN_CREATE_EQ_CMD_COMPLETION_EVENTS, 1); + params.msix_vec = msix_vec; + + eeq->eqes = dma_alloc_coherent(edev->dmadev, + params.depth * sizeof(*eeq->eqes), + ¶ms.dma_addr, GFP_KERNEL); + if (!eeq->eqes) + return -ENOMEM; + + err = efa_com_create_eq(edev, ¶ms, &result); + if (err) + goto err_free_coherent; + + eeq->eqn = result.eqn; + eeq->edev = edev; + eeq->dma_addr = params.dma_addr; + eeq->phase = 1; + eeq->depth = params.depth; + eeq->cb = cb; + efa_com_arm_eq(edev, eeq); + + return 0; + +err_free_coherent: + dma_free_coherent(edev->dmadev, params.depth * sizeof(*eeq->eqes), + eeq->eqes, params.dma_addr); + return err; +} diff --git a/drivers/infiniband/hw/efa/efa_com.h b/drivers/infiniband/hw/efa/efa_com.h index 5e4c88877ddb..77282234ce68 100644 --- a/drivers/infiniband/hw/efa/efa_com.h +++ b/drivers/infiniband/hw/efa/efa_com.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */ /* - * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved. + * Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All rights reserved. */ #ifndef _EFA_COM_H_ @@ -80,6 +80,9 @@ struct efa_com_admin_queue { }; struct efa_aenq_handlers; +struct efa_com_eq; +typedef void (*efa_eqe_handler)(struct efa_com_eq *eeq, + struct efa_admin_eqe *eqe); struct efa_com_aenq { struct efa_admin_aenq_entry *entries; @@ -112,6 +115,33 @@ struct efa_com_dev { struct efa_com_mmio_read mmio_read; }; +struct efa_com_eq { + struct efa_com_dev *edev; + struct efa_admin_eqe *eqes; + dma_addr_t dma_addr; + u32 cc; /* Consumer counter */ + u16 eqn; + u16 depth; + u8 phase; + efa_eqe_handler cb; +}; + +struct efa_com_create_eq_params { + dma_addr_t dma_addr; + u32 event_bitmask; + u16 depth; + u8 entry_size_in_bytes; + u8 msix_vec; +}; + +struct efa_com_create_eq_result { + u16 eqn; +}; + +struct efa_com_destroy_eq_params { + u16 eqn; +}; + typedef void (*efa_aenq_handler)(void *data, struct efa_admin_aenq_entry *aenq_e); @@ -121,9 +151,13 @@ struct efa_aenq_handlers { efa_aenq_handler unimplemented_handler; }; +void efa_com_set_dma_addr(dma_addr_t addr, u32 *addr_high, u32 *addr_low); int efa_com_admin_init(struct efa_com_dev *edev, struct efa_aenq_handlers *aenq_handlers); void efa_com_admin_destroy(struct efa_com_dev *edev); +int efa_com_eq_init(struct efa_com_dev *edev, struct efa_com_eq *eeq, + efa_eqe_handler cb, u16 depth, u8 msix_vec); +void efa_com_eq_destroy(struct efa_com_dev *edev, struct efa_com_eq *eeq); int efa_com_dev_reset(struct efa_com_dev *edev, enum efa_regs_reset_reason_types reset_reason); void efa_com_set_admin_polling_mode(struct efa_com_dev *edev, bool polling); @@ -140,5 +174,7 @@ int efa_com_cmd_exec(struct efa_com_admin_queue *aq, struct efa_admin_acq_entry *comp, size_t comp_size); void efa_com_aenq_intr_handler(struct efa_com_dev *edev, void *data); +void efa_com_eq_comp_intr_handler(struct efa_com_dev *edev, + struct efa_com_eq *eeq); #endif /* _EFA_COM_H_ */ diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.c b/drivers/infiniband/hw/efa/efa_com_cmd.c index f752ef64159c..fb405da4e1db 100644 --- a/drivers/infiniband/hw/efa/efa_com_cmd.c +++ b/drivers/infiniband/hw/efa/efa_com_cmd.c @@ -1,17 +1,11 @@ // SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause /* - * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved. + * Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All rights reserved. */ #include "efa_com.h" #include "efa_com_cmd.h" -void efa_com_set_dma_addr(dma_addr_t addr, u32 *addr_high, u32 *addr_low) -{ - *addr_low = lower_32_bits(addr); - *addr_high = upper_32_bits(addr); -} - int efa_com_create_qp(struct efa_com_dev *edev, struct efa_com_create_qp_params *params, struct efa_com_create_qp_result *res) @@ -157,7 +151,7 @@ int efa_com_create_cq(struct efa_com_dev *edev, struct efa_com_create_cq_params *params, struct efa_com_create_cq_result *result) { - struct efa_admin_create_cq_resp cmd_completion; + struct efa_admin_create_cq_resp cmd_completion = {}; struct efa_admin_create_cq_cmd create_cmd = {}; struct efa_com_admin_queue *aq = &edev->aq; int err; @@ -169,6 +163,11 @@ int efa_com_create_cq(struct efa_com_dev *edev, create_cmd.cq_depth = params->cq_depth; create_cmd.num_sub_cqs = params->num_sub_cqs; create_cmd.uar = params->uarn; + if (params->interrupt_mode_enabled) { + EFA_SET(&create_cmd.cq_caps_1, + EFA_ADMIN_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED, 1); + create_cmd.eqn = params->eqn; + } efa_com_set_dma_addr(params->dma_addr, &create_cmd.cq_ba.mem_addr_high, @@ -187,6 +186,9 @@ int efa_com_create_cq(struct efa_com_dev *edev, result->cq_idx = cmd_completion.cq_idx; result->actual_depth = params->cq_depth; + result->db_off = cmd_completion.db_offset; + result->db_valid = EFA_GET(&cmd_completion.flags, + EFA_ADMIN_CREATE_CQ_RESP_DB_VALID); return 0; } @@ -497,6 +499,23 @@ int efa_com_get_device_attr(struct efa_com_dev *edev, sizeof(resp.u.network_attr.addr)); result->mtu = resp.u.network_attr.mtu; + if (efa_com_check_supported_feature_id(edev, + EFA_ADMIN_EVENT_QUEUE_ATTR)) { + err = efa_com_get_feature(edev, &resp, + EFA_ADMIN_EVENT_QUEUE_ATTR); + if (err) { + ibdev_err_ratelimited( + edev->efa_dev, + "Failed to get event queue attributes %d\n", + err); + return err; + } + + result->max_eq = resp.u.event_queue_attr.max_eq; + result->max_eq_depth = resp.u.event_queue_attr.max_eq_depth; + result->event_bitmask = resp.u.event_queue_attr.event_bitmask; + } + return 0; } diff --git a/drivers/infiniband/hw/efa/efa_com_cmd.h b/drivers/infiniband/hw/efa/efa_com_cmd.h index eea4ebfbe6ec..c33010bbf9e8 100644 --- a/drivers/infiniband/hw/efa/efa_com_cmd.h +++ b/drivers/infiniband/hw/efa/efa_com_cmd.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */ /* - * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved. + * Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All rights reserved. */ #ifndef _EFA_COM_CMD_H_ @@ -73,7 +73,9 @@ struct efa_com_create_cq_params { u16 cq_depth; u16 num_sub_cqs; u16 uarn; + u16 eqn; u8 entry_size_in_bytes; + bool interrupt_mode_enabled; }; struct efa_com_create_cq_result { @@ -81,6 +83,8 @@ struct efa_com_create_cq_result { u16 cq_idx; /* actual cq depth in # of entries */ u16 actual_depth; + u32 db_off; + bool db_valid; }; struct efa_com_destroy_cq_params { @@ -125,6 +129,9 @@ struct efa_com_get_device_attr_result { u32 max_llq_size; u32 max_rdma_size; u32 device_caps; + u32 max_eq; + u32 max_eq_depth; + u32 event_bitmask; /* EQ events bitmask */ u16 sub_cqs_per_cq; u16 max_sq_sge; u16 max_rq_sge; @@ -260,7 +267,6 @@ union efa_com_get_stats_result { struct efa_com_rdma_read_stats rdma_read_stats; }; -void efa_com_set_dma_addr(dma_addr_t addr, u32 *addr_high, u32 *addr_low); int efa_com_create_qp(struct efa_com_dev *edev, struct efa_com_create_qp_params *params, struct efa_com_create_qp_result *res); diff --git a/drivers/infiniband/hw/efa/efa_main.c b/drivers/infiniband/hw/efa/efa_main.c index 417dea5f90cf..94b94cca4870 100644 --- a/drivers/infiniband/hw/efa/efa_main.c +++ b/drivers/infiniband/hw/efa/efa_main.c @@ -67,6 +67,47 @@ static void efa_release_bars(struct efa_dev *dev, int bars_mask) pci_release_selected_regions(pdev, release_bars); } +static void efa_process_comp_eqe(struct efa_dev *dev, struct efa_admin_eqe *eqe) +{ + u16 cqn = eqe->u.comp_event.cqn; + struct efa_cq *cq; + + /* Safe to load as we're in irq and removal calls synchronize_irq() */ + cq = xa_load(&dev->cqs_xa, cqn); + if (unlikely(!cq)) { + ibdev_err_ratelimited(&dev->ibdev, + "Completion event on non-existent CQ[%u]", + cqn); + return; + } + + cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); +} + +static void efa_process_eqe(struct efa_com_eq *eeq, struct efa_admin_eqe *eqe) +{ + struct efa_dev *dev = container_of(eeq->edev, struct efa_dev, edev); + + if (likely(EFA_GET(&eqe->common, EFA_ADMIN_EQE_EVENT_TYPE) == + EFA_ADMIN_EQE_EVENT_TYPE_COMPLETION)) + efa_process_comp_eqe(dev, eqe); + else + ibdev_err_ratelimited(&dev->ibdev, + "Unknown event type received %lu", + EFA_GET(&eqe->common, + EFA_ADMIN_EQE_EVENT_TYPE)); +} + +static irqreturn_t efa_intr_msix_comp(int irq, void *data) +{ + struct efa_eq *eq = data; + struct efa_com_dev *edev = eq->eeq.edev; + + efa_com_eq_comp_intr_handler(edev, &eq->eeq); + + return IRQ_HANDLED; +} + static irqreturn_t efa_intr_msix_mgmnt(int irq, void *data) { struct efa_dev *dev = data; @@ -77,26 +118,43 @@ static irqreturn_t efa_intr_msix_mgmnt(int irq, void *data) return IRQ_HANDLED; } -static int efa_request_mgmnt_irq(struct efa_dev *dev) +static int efa_request_irq(struct efa_dev *dev, struct efa_irq *irq) { - struct efa_irq *irq; int err; - irq = &dev->admin_irq; err = request_irq(irq->irqn, irq->handler, 0, irq->name, irq->data); if (err) { - dev_err(&dev->pdev->dev, "Failed to request admin irq (%d)\n", - err); + dev_err(&dev->pdev->dev, "Failed to request irq %s (%d)\n", + irq->name, err); return err; } - dev_dbg(&dev->pdev->dev, "Set affinity hint of mgmnt irq to %*pbl (irq vector: %d)\n", - nr_cpumask_bits, &irq->affinity_hint_mask, irq->irqn); irq_set_affinity_hint(irq->irqn, &irq->affinity_hint_mask); return 0; } +static void efa_setup_comp_irq(struct efa_dev *dev, struct efa_eq *eq, + int vector) +{ + u32 cpu; + + cpu = vector - EFA_COMP_EQS_VEC_BASE; + snprintf(eq->irq.name, EFA_IRQNAME_SIZE, "efa-comp%d@pci:%s", cpu, + pci_name(dev->pdev)); + eq->irq.handler = efa_intr_msix_comp; + eq->irq.data = eq; + eq->irq.vector = vector; + eq->irq.irqn = pci_irq_vector(dev->pdev, vector); + cpumask_set_cpu(cpu, &eq->irq.affinity_hint_mask); +} + +static void efa_free_irq(struct efa_dev *dev, struct efa_irq *irq) +{ + irq_set_affinity_hint(irq->irqn, NULL); + free_irq(irq->irqn, irq->data); +} + static void efa_setup_mgmnt_irq(struct efa_dev *dev) { u32 cpu; @@ -105,8 +163,9 @@ static void efa_setup_mgmnt_irq(struct efa_dev *dev) "efa-mgmnt@pci:%s", pci_name(dev->pdev)); dev->admin_irq.handler = efa_intr_msix_mgmnt; dev->admin_irq.data = dev; - dev->admin_irq.irqn = - pci_irq_vector(dev->pdev, dev->admin_msix_vector_idx); + dev->admin_irq.vector = dev->admin_msix_vector_idx; + dev->admin_irq.irqn = pci_irq_vector(dev->pdev, + dev->admin_msix_vector_idx); cpu = cpumask_first(cpu_online_mask); cpumask_set_cpu(cpu, &dev->admin_irq.affinity_hint_mask); @@ -115,20 +174,11 @@ static void efa_setup_mgmnt_irq(struct efa_dev *dev) dev->admin_irq.name); } -static void efa_free_mgmnt_irq(struct efa_dev *dev) -{ - struct efa_irq *irq; - - irq = &dev->admin_irq; - irq_set_affinity_hint(irq->irqn, NULL); - free_irq(irq->irqn, irq->data); -} - static int efa_set_mgmnt_irq(struct efa_dev *dev) { efa_setup_mgmnt_irq(dev); - return efa_request_mgmnt_irq(dev); + return efa_request_irq(dev, &dev->admin_irq); } static int efa_request_doorbell_bar(struct efa_dev *dev) @@ -234,6 +284,72 @@ static void efa_set_host_info(struct efa_dev *dev) dma_free_coherent(&dev->pdev->dev, bufsz, hinf, hinf_dma); } +static void efa_destroy_eq(struct efa_dev *dev, struct efa_eq *eq) +{ + efa_com_eq_destroy(&dev->edev, &eq->eeq); + efa_free_irq(dev, &eq->irq); +} + +static int efa_create_eq(struct efa_dev *dev, struct efa_eq *eq, u8 msix_vec) +{ + int err; + + efa_setup_comp_irq(dev, eq, msix_vec); + err = efa_request_irq(dev, &eq->irq); + if (err) + return err; + + err = efa_com_eq_init(&dev->edev, &eq->eeq, efa_process_eqe, + dev->dev_attr.max_eq_depth, msix_vec); + if (err) + goto err_free_comp_irq; + + return 0; + +err_free_comp_irq: + efa_free_irq(dev, &eq->irq); + return err; +} + +static int efa_create_eqs(struct efa_dev *dev) +{ + unsigned int neqs = dev->dev_attr.max_eq; + int err; + int i; + + neqs = min_t(unsigned int, neqs, num_online_cpus()); + dev->neqs = neqs; + dev->eqs = kcalloc(neqs, sizeof(*dev->eqs), GFP_KERNEL); + if (!dev->eqs) + return -ENOMEM; + + for (i = 0; i < neqs; i++) { + err = efa_create_eq(dev, &dev->eqs[i], + i + EFA_COMP_EQS_VEC_BASE); + if (err) + goto err_destroy_eqs; + } + + return 0; + +err_destroy_eqs: + for (i--; i >= 0; i--) + efa_destroy_eq(dev, &dev->eqs[i]); + kfree(dev->eqs); + + return err; +} + +static void efa_destroy_eqs(struct efa_dev *dev) +{ + int i; + + for (i = 0; i < dev->neqs; i++) + efa_destroy_eq(dev, &dev->eqs[i]); + + kfree(dev->eqs); +} + static const struct ib_device_ops efa_dev_ops = { .owner = THIS_MODULE, .driver_id = RDMA_DRIVER_EFA, @@ -264,6 +380,7 @@ static const struct ib_device_ops efa_dev_ops = { .query_port = efa_query_port, .query_qp = efa_query_qp, .reg_user_mr = efa_reg_mr, + .reg_user_mr_dmabuf = efa_reg_user_mr_dmabuf, INIT_RDMA_OBJ_SIZE(ib_ah, efa_ah, ibah), INIT_RDMA_OBJ_SIZE(ib_cq, efa_cq, ibcq), @@ -300,23 +417,29 @@ static int efa_ib_device_add(struct efa_dev *dev) if (err) goto err_release_doorbell_bar; + err = efa_create_eqs(dev); + if (err) + goto err_release_doorbell_bar; + efa_set_host_info(dev); dev->ibdev.node_type = RDMA_NODE_UNSPECIFIED; dev->ibdev.phys_port_cnt = 1; - dev->ibdev.num_comp_vectors = 1; + dev->ibdev.num_comp_vectors = dev->neqs ?: 1; dev->ibdev.dev.parent = &pdev->dev; ib_set_device_ops(&dev->ibdev, &efa_dev_ops); err = ib_register_device(&dev->ibdev, "efa_%d", &pdev->dev); if (err) - goto err_release_doorbell_bar; + goto err_destroy_eqs; ibdev_info(&dev->ibdev, "IB device registered\n"); return 0; +err_destroy_eqs: + efa_destroy_eqs(dev); err_release_doorbell_bar: efa_release_doorbell_bar(dev); return err; @@ -324,9 +447,10 @@ err_release_doorbell_bar: static void efa_ib_device_remove(struct efa_dev *dev) { - efa_com_dev_reset(&dev->edev, EFA_REGS_RESET_NORMAL); ibdev_info(&dev->ibdev, "Unregister ib device\n"); ib_unregister_device(&dev->ibdev); + efa_destroy_eqs(dev); + efa_com_dev_reset(&dev->edev, EFA_REGS_RESET_NORMAL); efa_release_doorbell_bar(dev); } @@ -339,8 +463,12 @@ static int efa_enable_msix(struct efa_dev *dev) { int msix_vecs, irq_num; - /* Reserve the max msix vectors we might need */ - msix_vecs = EFA_NUM_MSIX_VEC; + /* + * Reserve the max msix vectors we might need, one vector is reserved + * for admin. + */ + msix_vecs = min_t(int, pci_msix_vec_count(dev->pdev), + num_online_cpus() + 1); dev_dbg(&dev->pdev->dev, "Trying to enable MSI-X, vectors %d\n", msix_vecs); @@ -421,6 +549,7 @@ static struct efa_dev *efa_probe_device(struct pci_dev *pdev) edev->efa_dev = dev; edev->dmadev = &pdev->dev; dev->pdev = pdev; + xa_init(&dev->cqs_xa); bars = pci_select_bars(pdev, IORESOURCE_MEM) & EFA_BASE_BAR_MASK; err = pci_request_selected_regions(pdev, bars, DRV_MODULE_NAME); @@ -476,7 +605,7 @@ static struct efa_dev *efa_probe_device(struct pci_dev *pdev) return dev; err_free_mgmnt_irq: - efa_free_mgmnt_irq(dev); + efa_free_irq(dev, &dev->admin_irq); err_disable_msix: efa_disable_msix(dev); err_reg_read_destroy: @@ -499,11 +628,12 @@ static void efa_remove_device(struct pci_dev *pdev) edev = &dev->edev; efa_com_admin_destroy(edev); - efa_free_mgmnt_irq(dev); + efa_free_irq(dev, &dev->admin_irq); efa_disable_msix(dev); efa_com_mmio_reg_read_destroy(edev); devm_iounmap(&pdev->dev, edev->reg_bar); efa_release_bars(dev, EFA_BASE_BAR_MASK); + xa_destroy(&dev->cqs_xa); ib_dealloc_device(&dev->ibdev); pci_disable_device(pdev); } diff --git a/drivers/infiniband/hw/efa/efa_regs_defs.h b/drivers/infiniband/hw/efa/efa_regs_defs.h index 4017982fe13b..714ae6258800 100644 --- a/drivers/infiniband/hw/efa/efa_regs_defs.h +++ b/drivers/infiniband/hw/efa/efa_regs_defs.h @@ -1,6 +1,6 @@ /* SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause */ /* - * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved. + * Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All rights reserved. */ #ifndef _EFA_REGS_H_ @@ -42,6 +42,7 @@ enum efa_regs_reset_reason_types { #define EFA_REGS_MMIO_REG_READ_OFF 0x5c #define EFA_REGS_MMIO_RESP_LO_OFF 0x60 #define EFA_REGS_MMIO_RESP_HI_OFF 0x64 +#define EFA_REGS_EQ_DB_OFF 0x68 /* version register */ #define EFA_REGS_VERSION_MINOR_VERSION_MASK 0xff @@ -93,4 +94,8 @@ enum efa_regs_reset_reason_types { #define EFA_REGS_MMIO_REG_READ_REQ_ID_MASK 0xffff #define EFA_REGS_MMIO_REG_READ_REG_OFF_MASK 0xffff0000 +/* eq_db register */ +#define EFA_REGS_EQ_DB_EQN_MASK 0xffff +#define EFA_REGS_EQ_DB_ARM_MASK 0x80000000 + #endif /* _EFA_REGS_H_ */ diff --git a/drivers/infiniband/hw/efa/efa_verbs.c b/drivers/infiniband/hw/efa/efa_verbs.c index e5f9d90aad5e..ecfe70eb5efb 100644 --- a/drivers/infiniband/hw/efa/efa_verbs.c +++ b/drivers/infiniband/hw/efa/efa_verbs.c @@ -3,6 +3,8 @@ * Copyright 2018-2021 Amazon.com, Inc. or its affiliates. All rights reserved. */ +#include <linux/dma-buf.h> +#include <linux/dma-resv.h> #include <linux/vmalloc.h> #include <linux/log2.h> @@ -60,13 +62,14 @@ struct efa_user_mmap_entry { op(EFA_RDMA_READ_RESP_BYTES, "rdma_read_resp_bytes") \ #define EFA_STATS_ENUM(ename, name) ename, -#define EFA_STATS_STR(ename, name) [ename] = name, +#define EFA_STATS_STR(ename, nam) \ + [ename].name = nam, enum efa_hw_device_stats { EFA_DEFINE_DEVICE_STATS(EFA_STATS_ENUM) }; -static const char *const efa_device_stats_names[] = { +static const struct rdma_stat_desc efa_device_stats_descs[] = { EFA_DEFINE_DEVICE_STATS(EFA_STATS_STR) }; @@ -74,7 +77,7 @@ enum efa_hw_port_stats { EFA_DEFINE_PORT_STATS(EFA_STATS_ENUM) }; -static const char *const efa_port_stats_names[] = { +static const struct rdma_stat_desc efa_port_stats_descs[] = { EFA_DEFINE_PORT_STATS(EFA_STATS_STR) }; @@ -245,6 +248,9 @@ int efa_query_device(struct ib_device *ibdev, if (EFA_DEV_CAP(dev, RNR_RETRY)) resp.device_caps |= EFA_QUERY_DEVICE_CAPS_RNR_RETRY; + if (dev->neqs) + resp.device_caps |= EFA_QUERY_DEVICE_CAPS_CQ_NOTIFICATIONS; + err = ib_copy_to_udata(udata, &resp, min(sizeof(resp), udata->outlen)); if (err) { @@ -984,6 +990,12 @@ static int efa_destroy_cq_idx(struct efa_dev *dev, int cq_idx) return efa_com_destroy_cq(&dev->edev, ¶ms); } +static void efa_cq_user_mmap_entries_remove(struct efa_cq *cq) +{ + rdma_user_mmap_entry_remove(cq->db_mmap_entry); + rdma_user_mmap_entry_remove(cq->mmap_entry); +} + int efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) { struct efa_dev *dev = to_edev(ibcq->device); @@ -993,15 +1005,25 @@ int efa_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) "Destroy cq[%d] virt[0x%p] freed: size[%lu], dma[%pad]\n", cq->cq_idx, cq->cpu_addr, cq->size, &cq->dma_addr); - rdma_user_mmap_entry_remove(cq->mmap_entry); + efa_cq_user_mmap_entries_remove(cq); efa_destroy_cq_idx(dev, cq->cq_idx); + if (cq->eq) { + xa_erase(&dev->cqs_xa, cq->cq_idx); + synchronize_irq(cq->eq->irq.irqn); + } efa_free_mapped(dev, cq->cpu_addr, cq->dma_addr, cq->size, DMA_FROM_DEVICE); return 0; } +static struct efa_eq *efa_vec2eq(struct efa_dev *dev, int vec) +{ + return &dev->eqs[vec]; +} + static int cq_mmap_entries_setup(struct efa_dev *dev, struct efa_cq *cq, - struct efa_ibv_create_cq_resp *resp) + struct efa_ibv_create_cq_resp *resp, + bool db_valid) { resp->q_mmap_size = cq->size; cq->mmap_entry = efa_user_mmap_entry_insert(&cq->ucontext->ibucontext, @@ -1011,6 +1033,21 @@ static int cq_mmap_entries_setup(struct efa_dev *dev, struct efa_cq *cq, if (!cq->mmap_entry) return -ENOMEM; + if (db_valid) { + cq->db_mmap_entry = + efa_user_mmap_entry_insert(&cq->ucontext->ibucontext, + dev->db_bar_addr + resp->db_off, + PAGE_SIZE, EFA_MMAP_IO_NC, + &resp->db_mmap_key); + if (!cq->db_mmap_entry) { + rdma_user_mmap_entry_remove(cq->mmap_entry); + return -ENOMEM; + } + + resp->db_off &= ~PAGE_MASK; + resp->comp_mask |= EFA_CREATE_CQ_RESP_DB_OFF; + } + return 0; } @@ -1019,8 +1056,8 @@ int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, { struct efa_ucontext *ucontext = rdma_udata_to_drv_context( udata, struct efa_ucontext, ibucontext); + struct efa_com_create_cq_params params = {}; struct efa_ibv_create_cq_resp resp = {}; - struct efa_com_create_cq_params params; struct efa_com_create_cq_result result; struct ib_device *ibdev = ibcq->device; struct efa_dev *dev = to_edev(ibdev); @@ -1065,7 +1102,7 @@ int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, goto err_out; } - if (cmd.comp_mask || !is_reserved_cleared(cmd.reserved_50)) { + if (cmd.comp_mask || !is_reserved_cleared(cmd.reserved_58)) { ibdev_dbg(ibdev, "Incompatible ABI params, unknown fields in udata\n"); err = -EINVAL; @@ -1101,29 +1138,45 @@ int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, params.dma_addr = cq->dma_addr; params.entry_size_in_bytes = cmd.cq_entry_size; params.num_sub_cqs = cmd.num_sub_cqs; + if (cmd.flags & EFA_CREATE_CQ_WITH_COMPLETION_CHANNEL) { + cq->eq = efa_vec2eq(dev, attr->comp_vector); + params.eqn = cq->eq->eeq.eqn; + params.interrupt_mode_enabled = true; + } + err = efa_com_create_cq(&dev->edev, ¶ms, &result); if (err) goto err_free_mapped; + resp.db_off = result.db_off; resp.cq_idx = result.cq_idx; cq->cq_idx = result.cq_idx; cq->ibcq.cqe = result.actual_depth; WARN_ON_ONCE(entries != result.actual_depth); - err = cq_mmap_entries_setup(dev, cq, &resp); + err = cq_mmap_entries_setup(dev, cq, &resp, result.db_valid); if (err) { ibdev_dbg(ibdev, "Could not setup cq[%u] mmap entries\n", cq->cq_idx); goto err_destroy_cq; } + if (cq->eq) { + err = xa_err(xa_store(&dev->cqs_xa, cq->cq_idx, cq, GFP_KERNEL)); + if (err) { + ibdev_dbg(ibdev, "Failed to store cq[%u] in xarray\n", + cq->cq_idx); + goto err_remove_mmap; + } + } + if (udata->outlen) { err = ib_copy_to_udata(udata, &resp, min(sizeof(resp), udata->outlen)); if (err) { ibdev_dbg(ibdev, "Failed to copy udata for create_cq\n"); - goto err_remove_mmap; + goto err_xa_erase; } } @@ -1132,8 +1185,11 @@ int efa_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, return 0; +err_xa_erase: + if (cq->eq) + xa_erase(&dev->cqs_xa, cq->cq_idx); err_remove_mmap: - rdma_user_mmap_entry_remove(cq->mmap_entry); + efa_cq_user_mmap_entries_remove(cq); err_destroy_cq: efa_destroy_cq_idx(dev, cq->cq_idx); err_free_mapped: @@ -1490,26 +1546,18 @@ static int efa_create_pbl(struct efa_dev *dev, return 0; } -struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length, - u64 virt_addr, int access_flags, - struct ib_udata *udata) +static struct efa_mr *efa_alloc_mr(struct ib_pd *ibpd, int access_flags, + struct ib_udata *udata) { struct efa_dev *dev = to_edev(ibpd->device); - struct efa_com_reg_mr_params params = {}; - struct efa_com_reg_mr_result result = {}; - struct pbl_context pbl; int supp_access_flags; - unsigned int pg_sz; struct efa_mr *mr; - int inline_size; - int err; if (udata && udata->inlen && !ib_is_udata_cleared(udata, 0, sizeof(udata->inlen))) { ibdev_dbg(&dev->ibdev, "Incompatible ABI params, udata not cleared\n"); - err = -EINVAL; - goto err_out; + return ERR_PTR(-EINVAL); } supp_access_flags = @@ -1521,23 +1569,26 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length, ibdev_dbg(&dev->ibdev, "Unsupported access flags[%#x], supported[%#x]\n", access_flags, supp_access_flags); - err = -EOPNOTSUPP; - goto err_out; + return ERR_PTR(-EOPNOTSUPP); } mr = kzalloc(sizeof(*mr), GFP_KERNEL); - if (!mr) { - err = -ENOMEM; - goto err_out; - } + if (!mr) + return ERR_PTR(-ENOMEM); - mr->umem = ib_umem_get(ibpd->device, start, length, access_flags); - if (IS_ERR(mr->umem)) { - err = PTR_ERR(mr->umem); - ibdev_dbg(&dev->ibdev, - "Failed to pin and map user space memory[%d]\n", err); - goto err_free; - } + return mr; +} + +static int efa_register_mr(struct ib_pd *ibpd, struct efa_mr *mr, u64 start, + u64 length, u64 virt_addr, int access_flags) +{ + struct efa_dev *dev = to_edev(ibpd->device); + struct efa_com_reg_mr_params params = {}; + struct efa_com_reg_mr_result result = {}; + struct pbl_context pbl; + unsigned int pg_sz; + int inline_size; + int err; params.pd = to_epd(ibpd)->pdn; params.iova = virt_addr; @@ -1548,10 +1599,9 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length, dev->dev_attr.page_size_cap, virt_addr); if (!pg_sz) { - err = -EOPNOTSUPP; ibdev_dbg(&dev->ibdev, "Failed to find a suitable page size in page_size_cap %#llx\n", dev->dev_attr.page_size_cap); - goto err_unmap; + return -EOPNOTSUPP; } params.page_shift = order_base_2(pg_sz); @@ -1565,21 +1615,21 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length, if (params.page_num <= inline_size) { err = efa_create_inline_pbl(dev, mr, ¶ms); if (err) - goto err_unmap; + return err; err = efa_com_register_mr(&dev->edev, ¶ms, &result); if (err) - goto err_unmap; + return err; } else { err = efa_create_pbl(dev, &pbl, mr, ¶ms); if (err) - goto err_unmap; + return err; err = efa_com_register_mr(&dev->edev, ¶ms, &result); pbl_destroy(dev, &pbl); if (err) - goto err_unmap; + return err; } mr->ibmr.lkey = result.l_key; @@ -1587,9 +1637,78 @@ struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length, mr->ibmr.length = length; ibdev_dbg(&dev->ibdev, "Registered mr[%d]\n", mr->ibmr.lkey); + return 0; +} + +struct ib_mr *efa_reg_user_mr_dmabuf(struct ib_pd *ibpd, u64 start, + u64 length, u64 virt_addr, + int fd, int access_flags, + struct ib_udata *udata) +{ + struct efa_dev *dev = to_edev(ibpd->device); + struct ib_umem_dmabuf *umem_dmabuf; + struct efa_mr *mr; + int err; + + mr = efa_alloc_mr(ibpd, access_flags, udata); + if (IS_ERR(mr)) { + err = PTR_ERR(mr); + goto err_out; + } + + umem_dmabuf = ib_umem_dmabuf_get_pinned(ibpd->device, start, length, fd, + access_flags); + if (IS_ERR(umem_dmabuf)) { + err = PTR_ERR(umem_dmabuf); + ibdev_dbg(&dev->ibdev, "Failed to get dmabuf umem[%d]\n", err); + goto err_free; + } + + mr->umem = &umem_dmabuf->umem; + err = efa_register_mr(ibpd, mr, start, length, virt_addr, access_flags); + if (err) + goto err_release; + + return &mr->ibmr; + +err_release: + ib_umem_release(mr->umem); +err_free: + kfree(mr); +err_out: + atomic64_inc(&dev->stats.reg_mr_err); + return ERR_PTR(err); +} + +struct ib_mr *efa_reg_mr(struct ib_pd *ibpd, u64 start, u64 length, + u64 virt_addr, int access_flags, + struct ib_udata *udata) +{ + struct efa_dev *dev = to_edev(ibpd->device); + struct efa_mr *mr; + int err; + + mr = efa_alloc_mr(ibpd, access_flags, udata); + if (IS_ERR(mr)) { + err = PTR_ERR(mr); + goto err_out; + } + + mr->umem = ib_umem_get(ibpd->device, start, length, access_flags); + if (IS_ERR(mr->umem)) { + err = PTR_ERR(mr->umem); + ibdev_dbg(&dev->ibdev, + "Failed to pin and map user space memory[%d]\n", err); + goto err_free; + } + + err = efa_register_mr(ibpd, mr, start, length, virt_addr, access_flags); + if (err) + goto err_release; + return &mr->ibmr; -err_unmap: +err_release: ib_umem_release(mr->umem); err_free: kfree(mr); @@ -1906,15 +2025,15 @@ int efa_destroy_ah(struct ib_ah *ibah, u32 flags) struct rdma_hw_stats *efa_alloc_hw_port_stats(struct ib_device *ibdev, u32 port_num) { - return rdma_alloc_hw_stats_struct(efa_port_stats_names, - ARRAY_SIZE(efa_port_stats_names), + return rdma_alloc_hw_stats_struct(efa_port_stats_descs, + ARRAY_SIZE(efa_port_stats_descs), RDMA_HW_STATS_DEFAULT_LIFESPAN); } struct rdma_hw_stats *efa_alloc_hw_device_stats(struct ib_device *ibdev) { - return rdma_alloc_hw_stats_struct(efa_device_stats_names, - ARRAY_SIZE(efa_device_stats_names), + return rdma_alloc_hw_stats_struct(efa_device_stats_descs, + ARRAY_SIZE(efa_device_stats_descs), RDMA_HW_STATS_DEFAULT_LIFESPAN); } @@ -1939,7 +2058,7 @@ static int efa_fill_device_stats(struct efa_dev *dev, stats->value[EFA_CREATE_AH_ERR] = atomic64_read(&s->create_ah_err); stats->value[EFA_MMAP_ERR] = atomic64_read(&s->mmap_err); - return ARRAY_SIZE(efa_device_stats_names); + return ARRAY_SIZE(efa_device_stats_descs); } static int efa_fill_port_stats(struct efa_dev *dev, struct rdma_hw_stats *stats, @@ -1988,7 +2107,7 @@ static int efa_fill_port_stats(struct efa_dev *dev, struct rdma_hw_stats *stats, stats->value[EFA_RDMA_READ_WR_ERR] = rrs->read_wr_err; stats->value[EFA_RDMA_READ_RESP_BYTES] = rrs->read_resp_bytes; - return ARRAY_SIZE(efa_port_stats_names); + return ARRAY_SIZE(efa_port_stats_descs); } int efa_get_hw_stats(struct ib_device *ibdev, struct rdma_hw_stats *stats, diff --git a/drivers/infiniband/hw/hfi1/Kconfig b/drivers/infiniband/hw/hfi1/Kconfig index 519866b30a13..6eb739052121 100644 --- a/drivers/infiniband/hw/hfi1/Kconfig +++ b/drivers/infiniband/hw/hfi1/Kconfig @@ -1,12 +1,12 @@ # SPDX-License-Identifier: GPL-2.0-only config INFINIBAND_HFI1 - tristate "Intel OPA Gen1 support" + tristate "Cornelis OPX Gen1 support" depends on X86_64 && INFINIBAND_RDMAVT && I2C select MMU_NOTIFIER select CRC32 select I2C_ALGOBIT help - This is a low-level driver for Intel OPA Gen1 adapter. + This is a low-level driver for Cornelis OPX Gen1 adapter. config HFI1_DEBUG_SDMA_ORDER bool "HFI1 SDMA Order debug" depends on INFINIBAND_HFI1 diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index 37273dc0c03c..f1245c94ae26 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause /* * Copyright(c) 2015 - 2020 Intel Corporation. + * Copyright(c) 2021 Cornelis Networks. */ /* @@ -8414,6 +8415,8 @@ static void receive_interrupt_common(struct hfi1_ctxtdata *rcd) */ static void __hfi1_rcd_eoi_intr(struct hfi1_ctxtdata *rcd) { + if (!rcd->rcvhdrq) + return; clear_recv_intr(rcd); if (check_packet_present(rcd)) force_recv_intr(rcd); @@ -14918,7 +14921,7 @@ static int obtain_boardname(struct hfi1_devdata *dd) { /* generic board description */ const char generic[] = - "Intel Omni-Path Host Fabric Interface Adapter 100 Series"; + "Cornelis Omni-Path Host Fabric Interface Adapter 100 Series"; unsigned long size; int ret; diff --git a/drivers/infiniband/hw/hfi1/driver.c b/drivers/infiniband/hw/hfi1/driver.c index de411884386b..e2c634af40e9 100644 --- a/drivers/infiniband/hw/hfi1/driver.c +++ b/drivers/infiniband/hw/hfi1/driver.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause /* * Copyright(c) 2015-2020 Intel Corporation. + * Copyright(c) 2021 Cornelis Networks. */ #include <linux/spinlock.h> @@ -56,7 +57,7 @@ module_param_cb(cap_mask, &cap_ops, &hfi1_cap_mask, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(cap_mask, "Bit mask of enabled/disabled HW features"); MODULE_LICENSE("Dual BSD/GPL"); -MODULE_DESCRIPTION("Intel Omni-Path Architecture driver"); +MODULE_DESCRIPTION("Cornelis Omni-Path Express driver"); /* * MAX_PKT_RCV is the max # if packets processed per receive interrupt. @@ -1011,6 +1012,8 @@ int handle_receive_interrupt(struct hfi1_ctxtdata *rcd, int thread) struct hfi1_packet packet; int skip_pkt = 0; + if (!rcd->rcvhdrq) + return RCV_PKT_OK; /* Control context will always use the slow path interrupt handler */ needset = (rcd->ctxt == HFI1_CTRL_CTXT) ? 0 : 1; diff --git a/drivers/infiniband/hw/hfi1/efivar.c b/drivers/infiniband/hw/hfi1/efivar.c index f275dd1abed8..e8ed05516bf2 100644 --- a/drivers/infiniband/hw/hfi1/efivar.c +++ b/drivers/infiniband/hw/hfi1/efivar.c @@ -3,7 +3,9 @@ * Copyright(c) 2015, 2016 Intel Corporation. */ -#include <linux/ctype.h> +#include <linux/string.h> +#include <linux/string_helpers.h> + #include "efivar.h" /* GUID for HFI1 variables in EFI */ @@ -112,7 +114,6 @@ int read_hfi1_efi_var(struct hfi1_devdata *dd, const char *kind, char prefix_name[64]; char name[64]; int result; - int i; /* create a common prefix */ snprintf(prefix_name, sizeof(prefix_name), "%04x:%02x:%02x.%x", @@ -128,10 +129,7 @@ int read_hfi1_efi_var(struct hfi1_devdata *dd, const char *kind, * variable. */ if (result) { - /* Converting to uppercase */ - for (i = 0; prefix_name[i]; i++) - if (isalpha(prefix_name[i])) - prefix_name[i] = toupper(prefix_name[i]); + string_upper(prefix_name, prefix_name); snprintf(name, sizeof(name), "%s-%s", prefix_name, kind); result = read_efi_var(name, size, return_data); } diff --git a/drivers/infiniband/hw/hfi1/init.c b/drivers/infiniband/hw/hfi1/init.c index e3679d076eaa..4436ed41547c 100644 --- a/drivers/infiniband/hw/hfi1/init.c +++ b/drivers/infiniband/hw/hfi1/init.c @@ -1,6 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause /* * Copyright(c) 2015 - 2020 Intel Corporation. + * Copyright(c) 2021 Cornelis Networks. */ #include <linux/pci.h> @@ -112,7 +113,6 @@ static int hfi1_create_kctxt(struct hfi1_devdata *dd, rcd->fast_handler = get_dma_rtail_setting(rcd) ? handle_receive_interrupt_dma_rtail : handle_receive_interrupt_nodma_rtail; - rcd->slow_handler = handle_receive_interrupt; hfi1_set_seq_cnt(rcd, 1); @@ -333,6 +333,8 @@ int hfi1_create_ctxtdata(struct hfi1_pportdata *ppd, int numa, rcd->numa_id = numa; rcd->rcv_array_groups = dd->rcv_entries.ngroups; rcd->rhf_rcv_function_map = normal_rhf_rcv_functions; + rcd->slow_handler = handle_receive_interrupt; + rcd->do_interrupt = rcd->slow_handler; rcd->msix_intr = CCE_NUM_MSIX_VECTORS; mutex_init(&rcd->exp_mutex); @@ -873,18 +875,6 @@ int hfi1_init(struct hfi1_devdata *dd, int reinit) if (ret) goto done; - /* allocate dummy tail memory for all receive contexts */ - dd->rcvhdrtail_dummy_kvaddr = dma_alloc_coherent(&dd->pcidev->dev, - sizeof(u64), - &dd->rcvhdrtail_dummy_dma, - GFP_KERNEL); - - if (!dd->rcvhdrtail_dummy_kvaddr) { - dd_dev_err(dd, "cannot allocate dummy tail memory\n"); - ret = -ENOMEM; - goto done; - } - /* dd->rcd can be NULL if early initialization failed */ for (i = 0; dd->rcd && i < dd->first_dyn_alloc_ctxt; ++i) { /* @@ -897,8 +887,6 @@ int hfi1_init(struct hfi1_devdata *dd, int reinit) if (!rcd) continue; - rcd->do_interrupt = &handle_receive_interrupt; - lastfail = hfi1_create_rcvhdrq(dd, rcd); if (!lastfail) lastfail = hfi1_setup_eagerbufs(rcd); @@ -1119,7 +1107,7 @@ void hfi1_free_ctxtdata(struct hfi1_devdata *dd, struct hfi1_ctxtdata *rcd) rcd->egrbufs.rcvtids = NULL; for (e = 0; e < rcd->egrbufs.alloced; e++) { - if (rcd->egrbufs.buffers[e].dma) + if (rcd->egrbufs.buffers[e].addr) dma_free_coherent(&dd->pcidev->dev, rcd->egrbufs.buffers[e].len, rcd->egrbufs.buffers[e].addr, @@ -1200,6 +1188,11 @@ void hfi1_free_devdata(struct hfi1_devdata *dd) dd->tx_opstats = NULL; kfree(dd->comp_vect); dd->comp_vect = NULL; + if (dd->rcvhdrtail_dummy_kvaddr) + dma_free_coherent(&dd->pcidev->dev, sizeof(u64), + (void *)dd->rcvhdrtail_dummy_kvaddr, + dd->rcvhdrtail_dummy_dma); + dd->rcvhdrtail_dummy_kvaddr = NULL; sdma_clean(dd, dd->num_sdma); rvt_dealloc_device(&dd->verbs_dev.rdi); } @@ -1297,6 +1290,15 @@ static struct hfi1_devdata *hfi1_alloc_devdata(struct pci_dev *pdev, goto bail; } + /* allocate dummy tail memory for all receive contexts */ + dd->rcvhdrtail_dummy_kvaddr = + dma_alloc_coherent(&dd->pcidev->dev, sizeof(u64), + &dd->rcvhdrtail_dummy_dma, GFP_KERNEL); + if (!dd->rcvhdrtail_dummy_kvaddr) { + ret = -ENOMEM; + goto bail; + } + atomic_set(&dd->ipoib_rsm_usr_num, 0); return dd; @@ -1342,7 +1344,7 @@ static void remove_one(struct pci_dev *); static int init_one(struct pci_dev *, const struct pci_device_id *); static void shutdown_one(struct pci_dev *); -#define DRIVER_LOAD_MSG "Intel " DRIVER_NAME " loaded: " +#define DRIVER_LOAD_MSG "Cornelis " DRIVER_NAME " loaded: " #define PFX DRIVER_NAME ": " const struct pci_device_id hfi1_pci_tbl[] = { @@ -1504,13 +1506,6 @@ static void cleanup_device_data(struct hfi1_devdata *dd) free_credit_return(dd); - if (dd->rcvhdrtail_dummy_kvaddr) { - dma_free_coherent(&dd->pcidev->dev, sizeof(u64), - (void *)dd->rcvhdrtail_dummy_kvaddr, - dd->rcvhdrtail_dummy_dma); - dd->rcvhdrtail_dummy_kvaddr = NULL; - } - /* * Free any resources still in use (usually just kernel contexts) * at unload; we do for ctxtcnt, because that's what we allocate. diff --git a/drivers/infiniband/hw/hfi1/ipoib.h b/drivers/infiniband/hw/hfi1/ipoib.h index 2cff38b105ac..aec60d4888eb 100644 --- a/drivers/infiniband/hw/hfi1/ipoib.h +++ b/drivers/infiniband/hw/hfi1/ipoib.h @@ -44,22 +44,52 @@ union hfi1_ipoib_flow { }; /** + * struct ipoib_txreq - IPOIB transmit descriptor + * @txreq: sdma transmit request + * @sdma_hdr: 9b ib headers + * @sdma_status: status returned by sdma engine + * @complete: non-zero implies complete + * @priv: ipoib netdev private data + * @txq: txq on which skb was output + * @skb: skb to send + */ +struct ipoib_txreq { + struct sdma_txreq txreq; + struct hfi1_sdma_header *sdma_hdr; + int sdma_status; + int complete; + struct hfi1_ipoib_dev_priv *priv; + struct hfi1_ipoib_txq *txq; + struct sk_buff *skb; +}; + +/** * struct hfi1_ipoib_circ_buf - List of items to be processed - * @items: ring of items - * @head: ring head - * @tail: ring tail + * @items: ring of items each a power of two size * @max_items: max items + 1 that the ring can contain - * @producer_lock: producer sync lock - * @consumer_lock: consumer sync lock + * @shift: log2 of size for getting txreq + * @sent_txreqs: count of txreqs posted to sdma + * @tail: ring tail + * @stops: count of stops of queue + * @ring_full: ring has been filled + * @no_desc: descriptor shortage seen + * @complete_txreqs: count of txreqs completed by sdma + * @head: ring head */ -struct ipoib_txreq; struct hfi1_ipoib_circ_buf { - struct ipoib_txreq **items; - unsigned long head; - unsigned long tail; - unsigned long max_items; - spinlock_t producer_lock; /* head sync lock */ - spinlock_t consumer_lock; /* tail sync lock */ + void *items; + u32 max_items; + u32 shift; + /* consumer cache line */ + u64 ____cacheline_aligned_in_smp sent_txreqs; + u32 avail; + u32 tail; + atomic_t stops; + atomic_t ring_full; + atomic_t no_desc; + /* producer cache line */ + u64 ____cacheline_aligned_in_smp complete_txreqs; + u32 head; }; /** @@ -68,33 +98,24 @@ struct hfi1_ipoib_circ_buf { * @sde: sdma engine * @tx_list: tx request list * @sent_txreqs: count of txreqs posted to sdma - * @stops: count of stops of queue - * @ring_full: ring has been filled - * @no_desc: descriptor shortage seen * @flow: tracks when list needs to be flushed for a flow change * @q_idx: ipoib Tx queue index * @pkts_sent: indicator packets have been sent from this queue * @wait: iowait structure - * @complete_txreqs: count of txreqs completed by sdma * @napi: pointer to tx napi interface * @tx_ring: ring of ipoib txreqs to be reaped by napi callback */ struct hfi1_ipoib_txq { + struct napi_struct napi; struct hfi1_ipoib_dev_priv *priv; struct sdma_engine *sde; struct list_head tx_list; - u64 sent_txreqs; - atomic_t stops; - atomic_t ring_full; - atomic_t no_desc; union hfi1_ipoib_flow flow; u8 q_idx; bool pkts_sent; struct iowait wait; - atomic64_t ____cacheline_aligned_in_smp complete_txreqs; - struct napi_struct *napi; - struct hfi1_ipoib_circ_buf tx_ring; + struct hfi1_ipoib_circ_buf ____cacheline_aligned_in_smp tx_ring; }; struct hfi1_ipoib_dev_priv { @@ -102,15 +123,12 @@ struct hfi1_ipoib_dev_priv { struct net_device *netdev; struct ib_device *device; struct hfi1_ipoib_txq *txqs; - struct kmem_cache *txreq_cache; - struct napi_struct *tx_napis; + const struct net_device_ops *netdev_ops; + struct rvt_qp *qp; + u32 qkey; u16 pkey; u16 pkey_index; - u32 qkey; u8 port_num; - - const struct net_device_ops *netdev_ops; - struct rvt_qp *qp; }; /* hfi1 ipoib rdma netdev's private data structure */ diff --git a/drivers/infiniband/hw/hfi1/ipoib_main.c b/drivers/infiniband/hw/hfi1/ipoib_main.c index e594a961f513..5d814afdf7f3 100644 --- a/drivers/infiniband/hw/hfi1/ipoib_main.c +++ b/drivers/infiniband/hw/hfi1/ipoib_main.c @@ -11,7 +11,7 @@ #include "ipoib.h" #include "hfi.h" -static u32 qpn_from_mac(u8 *mac_arr) +static u32 qpn_from_mac(const u8 *mac_arr) { return (u32)mac_arr[1] << 16 | mac_arr[2] << 8 | mac_arr[3]; } @@ -22,26 +22,35 @@ static int hfi1_ipoib_dev_init(struct net_device *dev) int ret; dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); + if (!dev->tstats) + return -ENOMEM; ret = priv->netdev_ops->ndo_init(dev); if (ret) - return ret; + goto out_ret; ret = hfi1_netdev_add_data(priv->dd, qpn_from_mac(priv->netdev->dev_addr), dev); if (ret < 0) { priv->netdev_ops->ndo_uninit(dev); - return ret; + goto out_ret; } return 0; +out_ret: + free_percpu(dev->tstats); + dev->tstats = NULL; + return ret; } static void hfi1_ipoib_dev_uninit(struct net_device *dev) { struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev); + free_percpu(dev->tstats); + dev->tstats = NULL; + hfi1_netdev_remove_data(priv->dd, qpn_from_mac(priv->netdev->dev_addr)); priv->netdev_ops->ndo_uninit(dev); @@ -166,12 +175,7 @@ static void hfi1_ipoib_netdev_dtor(struct net_device *dev) hfi1_ipoib_rxq_deinit(priv->netdev); free_percpu(dev->tstats); -} - -static void hfi1_ipoib_free_rdma_netdev(struct net_device *dev) -{ - hfi1_ipoib_netdev_dtor(dev); - free_netdev(dev); + dev->tstats = NULL; } static void hfi1_ipoib_set_id(struct net_device *dev, int id) @@ -211,24 +215,23 @@ static int hfi1_ipoib_setup_rn(struct ib_device *device, priv->port_num = port_num; priv->netdev_ops = netdev->netdev_ops; - netdev->netdev_ops = &hfi1_ipoib_netdev_ops; - ib_query_pkey(device, port_num, priv->pkey_index, &priv->pkey); rc = hfi1_ipoib_txreq_init(priv); if (rc) { dd_dev_err(dd, "IPoIB netdev TX init - failed(%d)\n", rc); - hfi1_ipoib_free_rdma_netdev(netdev); return rc; } rc = hfi1_ipoib_rxq_init(netdev); if (rc) { dd_dev_err(dd, "IPoIB netdev RX init - failed(%d)\n", rc); - hfi1_ipoib_free_rdma_netdev(netdev); + hfi1_ipoib_txreq_deinit(priv); return rc; } + netdev->netdev_ops = &hfi1_ipoib_netdev_ops; + netdev->priv_destructor = hfi1_ipoib_netdev_dtor; netdev->needs_free_netdev = true; diff --git a/drivers/infiniband/hw/hfi1/ipoib_tx.c b/drivers/infiniband/hw/hfi1/ipoib_tx.c index 15b0cb0f363f..d6bbdb8fcb50 100644 --- a/drivers/infiniband/hw/hfi1/ipoib_tx.c +++ b/drivers/infiniband/hw/hfi1/ipoib_tx.c @@ -22,24 +22,6 @@ #define CIRC_NEXT(val, size) CIRC_ADD(val, 1, size) #define CIRC_PREV(val, size) CIRC_ADD(val, -1, size) -/** - * struct ipoib_txreq - IPOIB transmit descriptor - * @txreq: sdma transmit request - * @sdma_hdr: 9b ib headers - * @sdma_status: status returned by sdma engine - * @priv: ipoib netdev private data - * @txq: txq on which skb was output - * @skb: skb to send - */ -struct ipoib_txreq { - struct sdma_txreq txreq; - struct hfi1_sdma_header sdma_hdr; - int sdma_status; - struct hfi1_ipoib_dev_priv *priv; - struct hfi1_ipoib_txq *txq; - struct sk_buff *skb; -}; - struct ipoib_txparms { struct hfi1_devdata *dd; struct rdma_ah_attr *ah_attr; @@ -51,28 +33,34 @@ struct ipoib_txparms { u8 entropy; }; -static u64 hfi1_ipoib_txreqs(const u64 sent, const u64 completed) +static struct ipoib_txreq * +hfi1_txreq_from_idx(struct hfi1_ipoib_circ_buf *r, u32 idx) +{ + return (struct ipoib_txreq *)(r->items + (idx << r->shift)); +} + +static u32 hfi1_ipoib_txreqs(const u64 sent, const u64 completed) { return sent - completed; } static u64 hfi1_ipoib_used(struct hfi1_ipoib_txq *txq) { - return hfi1_ipoib_txreqs(txq->sent_txreqs, - atomic64_read(&txq->complete_txreqs)); + return hfi1_ipoib_txreqs(txq->tx_ring.sent_txreqs, + txq->tx_ring.complete_txreqs); } static void hfi1_ipoib_stop_txq(struct hfi1_ipoib_txq *txq) { trace_hfi1_txq_stop(txq); - if (atomic_inc_return(&txq->stops) == 1) + if (atomic_inc_return(&txq->tx_ring.stops) == 1) netif_stop_subqueue(txq->priv->netdev, txq->q_idx); } static void hfi1_ipoib_wake_txq(struct hfi1_ipoib_txq *txq) { trace_hfi1_txq_wake(txq); - if (atomic_dec_and_test(&txq->stops)) + if (atomic_dec_and_test(&txq->tx_ring.stops)) netif_wake_subqueue(txq->priv->netdev, txq->q_idx); } @@ -90,9 +78,9 @@ static uint hfi1_ipoib_ring_lwat(struct hfi1_ipoib_txq *txq) static void hfi1_ipoib_check_queue_depth(struct hfi1_ipoib_txq *txq) { - ++txq->sent_txreqs; + ++txq->tx_ring.sent_txreqs; if (hfi1_ipoib_used(txq) >= hfi1_ipoib_ring_hwat(txq) && - !atomic_xchg(&txq->ring_full, 1)) { + !atomic_xchg(&txq->tx_ring.ring_full, 1)) { trace_hfi1_txq_full(txq); hfi1_ipoib_stop_txq(txq); } @@ -117,7 +105,7 @@ static void hfi1_ipoib_check_queue_stopped(struct hfi1_ipoib_txq *txq) * to protect against ring overflow. */ if (hfi1_ipoib_used(txq) < hfi1_ipoib_ring_lwat(txq) && - atomic_xchg(&txq->ring_full, 0)) { + atomic_xchg(&txq->tx_ring.ring_full, 0)) { trace_hfi1_txq_xmit_unstopped(txq); hfi1_ipoib_wake_txq(txq); } @@ -125,7 +113,7 @@ static void hfi1_ipoib_check_queue_stopped(struct hfi1_ipoib_txq *txq) static void hfi1_ipoib_free_tx(struct ipoib_txreq *tx, int budget) { - struct hfi1_ipoib_dev_priv *priv = tx->priv; + struct hfi1_ipoib_dev_priv *priv = tx->txq->priv; if (likely(!tx->sdma_status)) { dev_sw_netstats_tx_add(priv->netdev, 1, tx->skb->len); @@ -134,102 +122,78 @@ static void hfi1_ipoib_free_tx(struct ipoib_txreq *tx, int budget) dd_dev_warn(priv->dd, "%s: Status = 0x%x pbc 0x%llx txq = %d sde = %d\n", __func__, tx->sdma_status, - le64_to_cpu(tx->sdma_hdr.pbc), tx->txq->q_idx, + le64_to_cpu(tx->sdma_hdr->pbc), tx->txq->q_idx, tx->txq->sde->this_idx); } napi_consume_skb(tx->skb, budget); + tx->skb = NULL; sdma_txclean(priv->dd, &tx->txreq); - kmem_cache_free(priv->txreq_cache, tx); } -static int hfi1_ipoib_drain_tx_ring(struct hfi1_ipoib_txq *txq, int budget) +static void hfi1_ipoib_drain_tx_ring(struct hfi1_ipoib_txq *txq) { struct hfi1_ipoib_circ_buf *tx_ring = &txq->tx_ring; - unsigned long head; - unsigned long tail; - unsigned int max_tx; - int work_done; - int tx_count; - - spin_lock_bh(&tx_ring->consumer_lock); - - /* Read index before reading contents at that index. */ - head = smp_load_acquire(&tx_ring->head); - tail = tx_ring->tail; - max_tx = tx_ring->max_items; - - work_done = min_t(int, CIRC_CNT(head, tail, max_tx), budget); + int i; + struct ipoib_txreq *tx; - for (tx_count = work_done; tx_count; tx_count--) { - hfi1_ipoib_free_tx(tx_ring->items[tail], budget); - tail = CIRC_NEXT(tail, max_tx); + for (i = 0; i < tx_ring->max_items; i++) { + tx = hfi1_txreq_from_idx(tx_ring, i); + tx->complete = 0; + dev_kfree_skb_any(tx->skb); + tx->skb = NULL; + sdma_txclean(txq->priv->dd, &tx->txreq); } + tx_ring->head = 0; + tx_ring->tail = 0; + tx_ring->complete_txreqs = 0; + tx_ring->sent_txreqs = 0; + tx_ring->avail = hfi1_ipoib_ring_hwat(txq); +} - atomic64_add(work_done, &txq->complete_txreqs); +static int hfi1_ipoib_poll_tx_ring(struct napi_struct *napi, int budget) +{ + struct hfi1_ipoib_txq *txq = + container_of(napi, struct hfi1_ipoib_txq, napi); + struct hfi1_ipoib_circ_buf *tx_ring = &txq->tx_ring; + u32 head = tx_ring->head; + u32 max_tx = tx_ring->max_items; + int work_done; + struct ipoib_txreq *tx = hfi1_txreq_from_idx(tx_ring, head); - /* Finished freeing tx items so store the tail value. */ - smp_store_release(&tx_ring->tail, tail); + trace_hfi1_txq_poll(txq); + for (work_done = 0; work_done < budget; work_done++) { + /* See hfi1_ipoib_sdma_complete() */ + if (!smp_load_acquire(&tx->complete)) + break; + tx->complete = 0; + trace_hfi1_tx_produce(tx, head); + hfi1_ipoib_free_tx(tx, budget); + head = CIRC_NEXT(head, max_tx); + tx = hfi1_txreq_from_idx(tx_ring, head); + } + tx_ring->complete_txreqs += work_done; - spin_unlock_bh(&tx_ring->consumer_lock); + /* Finished freeing tx items so store the head value. */ + smp_store_release(&tx_ring->head, head); hfi1_ipoib_check_queue_stopped(txq); - return work_done; -} - -static int hfi1_ipoib_process_tx_ring(struct napi_struct *napi, int budget) -{ - struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(napi->dev); - struct hfi1_ipoib_txq *txq = &priv->txqs[napi - priv->tx_napis]; - - int work_done = hfi1_ipoib_drain_tx_ring(txq, budget); - if (work_done < budget) napi_complete_done(napi, work_done); return work_done; } -static void hfi1_ipoib_add_tx(struct ipoib_txreq *tx) -{ - struct hfi1_ipoib_circ_buf *tx_ring = &tx->txq->tx_ring; - unsigned long head; - unsigned long tail; - size_t max_tx; - - spin_lock(&tx_ring->producer_lock); - - head = tx_ring->head; - tail = READ_ONCE(tx_ring->tail); - max_tx = tx_ring->max_items; - - if (likely(CIRC_SPACE(head, tail, max_tx))) { - tx_ring->items[head] = tx; - - /* Finish storing txreq before incrementing head. */ - smp_store_release(&tx_ring->head, CIRC_ADD(head, 1, max_tx)); - napi_schedule_irqoff(tx->txq->napi); - } else { - struct hfi1_ipoib_txq *txq = tx->txq; - struct hfi1_ipoib_dev_priv *priv = tx->priv; - - /* Ring was full */ - hfi1_ipoib_free_tx(tx, 0); - atomic64_inc(&txq->complete_txreqs); - dd_dev_dbg(priv->dd, "txq %d full.\n", txq->q_idx); - } - - spin_unlock(&tx_ring->producer_lock); -} - static void hfi1_ipoib_sdma_complete(struct sdma_txreq *txreq, int status) { struct ipoib_txreq *tx = container_of(txreq, struct ipoib_txreq, txreq); + trace_hfi1_txq_complete(tx->txq); tx->sdma_status = status; - - hfi1_ipoib_add_tx(tx); + /* see hfi1_ipoib_poll_tx_ring */ + smp_store_release(&tx->complete, 1); + napi_schedule_irqoff(&tx->txq->napi); } static int hfi1_ipoib_build_ulp_payload(struct ipoib_txreq *tx, @@ -267,7 +231,7 @@ static int hfi1_ipoib_build_tx_desc(struct ipoib_txreq *tx, { struct hfi1_devdata *dd = txp->dd; struct sdma_txreq *txreq = &tx->txreq; - struct hfi1_sdma_header *sdma_hdr = &tx->sdma_hdr; + struct hfi1_sdma_header *sdma_hdr = tx->sdma_hdr; u16 pkt_bytes = sizeof(sdma_hdr->pbc) + (txp->hdr_dwords << 2) + tx->skb->len; int ret; @@ -291,8 +255,8 @@ static int hfi1_ipoib_build_tx_desc(struct ipoib_txreq *tx, static void hfi1_ipoib_build_ib_tx_headers(struct ipoib_txreq *tx, struct ipoib_txparms *txp) { - struct hfi1_ipoib_dev_priv *priv = tx->priv; - struct hfi1_sdma_header *sdma_hdr = &tx->sdma_hdr; + struct hfi1_ipoib_dev_priv *priv = tx->txq->priv; + struct hfi1_sdma_header *sdma_hdr = tx->sdma_hdr; struct sk_buff *skb = tx->skb; struct hfi1_pportdata *ppd = ppd_from_ibp(txp->ibp); struct rdma_ah_attr *ah_attr = txp->ah_attr; @@ -362,7 +326,7 @@ static void hfi1_ipoib_build_ib_tx_headers(struct ipoib_txreq *tx, ohdr->bth[0] = cpu_to_be32(bth0); ohdr->bth[1] = cpu_to_be32(txp->dqpn); - ohdr->bth[2] = cpu_to_be32(mask_psn((u32)txp->txq->sent_txreqs)); + ohdr->bth[2] = cpu_to_be32(mask_psn((u32)txp->txq->tx_ring.sent_txreqs)); /* Build the deth */ ohdr->u.ud.deth[0] = cpu_to_be32(priv->qkey); @@ -385,19 +349,32 @@ static struct ipoib_txreq *hfi1_ipoib_send_dma_common(struct net_device *dev, struct ipoib_txparms *txp) { struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev); + struct hfi1_ipoib_txq *txq = txp->txq; struct ipoib_txreq *tx; + struct hfi1_ipoib_circ_buf *tx_ring = &txq->tx_ring; + u32 tail = tx_ring->tail; int ret; - tx = kmem_cache_alloc_node(priv->txreq_cache, - GFP_ATOMIC, - priv->dd->node); - if (unlikely(!tx)) - return ERR_PTR(-ENOMEM); + if (unlikely(!tx_ring->avail)) { + u32 head; + + if (hfi1_ipoib_used(txq) >= hfi1_ipoib_ring_hwat(txq)) + /* This shouldn't happen with a stopped queue */ + return ERR_PTR(-ENOMEM); + /* See hfi1_ipoib_poll_tx_ring() */ + head = smp_load_acquire(&tx_ring->head); + tx_ring->avail = + min_t(u32, hfi1_ipoib_ring_hwat(txq), + CIRC_CNT(head, tail, tx_ring->max_items)); + } else { + tx_ring->avail--; + } + tx = hfi1_txreq_from_idx(tx_ring, tail); + trace_hfi1_txq_alloc_tx(txq); /* so that we can test if the sdma descriptors are there */ tx->txreq.num_desc = 0; - tx->priv = priv; - tx->txq = txp->txq; + tx->txq = txq; tx->skb = skb; INIT_LIST_HEAD(&tx->txreq.list); @@ -405,21 +382,20 @@ static struct ipoib_txreq *hfi1_ipoib_send_dma_common(struct net_device *dev, ret = hfi1_ipoib_build_tx_desc(tx, txp); if (likely(!ret)) { - if (txp->txq->flow.as_int != txp->flow.as_int) { - txp->txq->flow.tx_queue = txp->flow.tx_queue; - txp->txq->flow.sc5 = txp->flow.sc5; - txp->txq->sde = + if (txq->flow.as_int != txp->flow.as_int) { + txq->flow.tx_queue = txp->flow.tx_queue; + txq->flow.sc5 = txp->flow.sc5; + txq->sde = sdma_select_engine_sc(priv->dd, txp->flow.tx_queue, txp->flow.sc5); - trace_hfi1_flow_switch(txp->txq); + trace_hfi1_flow_switch(txq); } return tx; } sdma_txclean(priv->dd, &tx->txreq); - kmem_cache_free(priv->txreq_cache, tx); return ERR_PTR(ret); } @@ -480,8 +456,8 @@ static int hfi1_ipoib_send_dma_single(struct net_device *dev, struct sk_buff *skb, struct ipoib_txparms *txp) { - struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev); struct hfi1_ipoib_txq *txq = txp->txq; + struct hfi1_ipoib_circ_buf *tx_ring; struct ipoib_txreq *tx; int ret; @@ -499,11 +475,15 @@ static int hfi1_ipoib_send_dma_single(struct net_device *dev, return NETDEV_TX_OK; } + tx_ring = &txq->tx_ring; + trace_hfi1_tx_consume(tx, tx_ring->tail); + /* consume tx */ + smp_store_release(&tx_ring->tail, CIRC_NEXT(tx_ring->tail, tx_ring->max_items)); ret = hfi1_ipoib_submit_tx(txq, tx); if (likely(!ret)) { tx_ok: - trace_sdma_output_ibhdr(tx->priv->dd, - &tx->sdma_hdr.hdr, + trace_sdma_output_ibhdr(txq->priv->dd, + &tx->sdma_hdr->hdr, ib_is_sc5(txp->flow.sc5)); hfi1_ipoib_check_queue_depth(txq); return NETDEV_TX_OK; @@ -514,9 +494,10 @@ tx_ok: if (ret == -EBUSY || ret == -ECOMM) goto tx_ok; - sdma_txclean(priv->dd, &tx->txreq); - dev_kfree_skb_any(skb); - kmem_cache_free(priv->txreq_cache, tx); + /* mark complete and kick napi tx */ + smp_store_release(&tx->complete, 1); + napi_schedule(&tx->txq->napi); + ++dev->stats.tx_carrier_errors; return NETDEV_TX_OK; @@ -527,6 +508,7 @@ static int hfi1_ipoib_send_dma_list(struct net_device *dev, struct ipoib_txparms *txp) { struct hfi1_ipoib_txq *txq = txp->txq; + struct hfi1_ipoib_circ_buf *tx_ring; struct ipoib_txreq *tx; /* Has the flow change ? */ @@ -556,12 +538,16 @@ static int hfi1_ipoib_send_dma_list(struct net_device *dev, return NETDEV_TX_OK; } + tx_ring = &txq->tx_ring; + trace_hfi1_tx_consume(tx, tx_ring->tail); + /* consume tx */ + smp_store_release(&tx_ring->tail, CIRC_NEXT(tx_ring->tail, tx_ring->max_items)); list_add_tail(&tx->txreq.list, &txq->tx_list); hfi1_ipoib_check_queue_depth(txq); - trace_sdma_output_ibhdr(tx->priv->dd, - &tx->sdma_hdr.hdr, + trace_sdma_output_ibhdr(txq->priv->dd, + &tx->sdma_hdr->hdr, ib_is_sc5(txp->flow.sc5)); if (!netdev_xmit_more()) @@ -646,7 +632,7 @@ static int hfi1_ipoib_sdma_sleep(struct sdma_engine *sde, if (list_empty(&txq->wait.list)) { struct hfi1_ibport *ibp = &sde->ppd->ibport_data; - if (!atomic_xchg(&txq->no_desc, 1)) { + if (!atomic_xchg(&txq->tx_ring.no_desc, 1)) { trace_hfi1_txq_queued(txq); hfi1_ipoib_stop_txq(txq); } @@ -689,49 +675,36 @@ static void hfi1_ipoib_flush_txq(struct work_struct *work) if (likely(dev->reg_state == NETREG_REGISTERED) && likely(!hfi1_ipoib_flush_tx_list(dev, txq))) - if (atomic_xchg(&txq->no_desc, 0)) + if (atomic_xchg(&txq->tx_ring.no_desc, 0)) hfi1_ipoib_wake_txq(txq); } int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv) { struct net_device *dev = priv->netdev; - char buf[HFI1_IPOIB_TXREQ_NAME_LEN]; - unsigned long tx_ring_size; - int i; + u32 tx_ring_size, tx_item_size; + struct hfi1_ipoib_circ_buf *tx_ring; + int i, j; /* * Ring holds 1 less than tx_ring_size * Round up to next power of 2 in order to hold at least tx_queue_len */ - tx_ring_size = roundup_pow_of_two((unsigned long)dev->tx_queue_len + 1); - - snprintf(buf, sizeof(buf), "hfi1_%u_ipoib_txreq_cache", priv->dd->unit); - priv->txreq_cache = kmem_cache_create(buf, - sizeof(struct ipoib_txreq), - 0, - 0, - NULL); - if (!priv->txreq_cache) - return -ENOMEM; - - priv->tx_napis = kcalloc_node(dev->num_tx_queues, - sizeof(struct napi_struct), - GFP_KERNEL, - priv->dd->node); - if (!priv->tx_napis) - goto free_txreq_cache; + tx_ring_size = roundup_pow_of_two(dev->tx_queue_len + 1); + tx_item_size = roundup_pow_of_two(sizeof(struct ipoib_txreq)); priv->txqs = kcalloc_node(dev->num_tx_queues, sizeof(struct hfi1_ipoib_txq), GFP_KERNEL, priv->dd->node); if (!priv->txqs) - goto free_tx_napis; + return -ENOMEM; for (i = 0; i < dev->num_tx_queues; i++) { struct hfi1_ipoib_txq *txq = &priv->txqs[i]; + struct ipoib_txreq *tx; + tx_ring = &txq->tx_ring; iowait_init(&txq->wait, 0, hfi1_ipoib_flush_txq, @@ -743,10 +716,9 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv) txq->priv = priv; txq->sde = NULL; INIT_LIST_HEAD(&txq->tx_list); - atomic64_set(&txq->complete_txreqs, 0); - atomic_set(&txq->stops, 0); - atomic_set(&txq->ring_full, 0); - atomic_set(&txq->no_desc, 0); + atomic_set(&txq->tx_ring.stops, 0); + atomic_set(&txq->tx_ring.ring_full, 0); + atomic_set(&txq->tx_ring.no_desc, 0); txq->q_idx = i; txq->flow.tx_queue = 0xff; txq->flow.sc5 = 0xff; @@ -756,19 +728,22 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv) priv->dd->node); txq->tx_ring.items = - kcalloc_node(tx_ring_size, - sizeof(struct ipoib_txreq *), - GFP_KERNEL, priv->dd->node); + kvzalloc_node(array_size(tx_ring_size, tx_item_size), + GFP_KERNEL, priv->dd->node); if (!txq->tx_ring.items) goto free_txqs; - spin_lock_init(&txq->tx_ring.producer_lock); - spin_lock_init(&txq->tx_ring.consumer_lock); txq->tx_ring.max_items = tx_ring_size; - - txq->napi = &priv->tx_napis[i]; - netif_tx_napi_add(dev, txq->napi, - hfi1_ipoib_process_tx_ring, + txq->tx_ring.shift = ilog2(tx_item_size); + txq->tx_ring.avail = hfi1_ipoib_ring_hwat(txq); + tx_ring = &txq->tx_ring; + for (j = 0; j < tx_ring_size; j++) + hfi1_txreq_from_idx(tx_ring, j)->sdma_hdr = + kzalloc_node(sizeof(*tx->sdma_hdr), + GFP_KERNEL, priv->dd->node); + + netif_tx_napi_add(dev, &txq->napi, + hfi1_ipoib_poll_tx_ring, NAPI_POLL_WEIGHT); } @@ -778,20 +753,15 @@ free_txqs: for (i--; i >= 0; i--) { struct hfi1_ipoib_txq *txq = &priv->txqs[i]; - netif_napi_del(txq->napi); - kfree(txq->tx_ring.items); + netif_napi_del(&txq->napi); + tx_ring = &txq->tx_ring; + for (j = 0; j < tx_ring_size; j++) + kfree(hfi1_txreq_from_idx(tx_ring, j)->sdma_hdr); + kvfree(tx_ring->items); } kfree(priv->txqs); priv->txqs = NULL; - -free_tx_napis: - kfree(priv->tx_napis); - priv->tx_napis = NULL; - -free_txreq_cache: - kmem_cache_destroy(priv->txreq_cache); - priv->txreq_cache = NULL; return -ENOMEM; } @@ -799,7 +769,6 @@ static void hfi1_ipoib_drain_tx_list(struct hfi1_ipoib_txq *txq) { struct sdma_txreq *txreq; struct sdma_txreq *txreq_tmp; - atomic64_t *complete_txreqs = &txq->complete_txreqs; list_for_each_entry_safe(txreq, txreq_tmp, &txq->tx_list, list) { struct ipoib_txreq *tx = @@ -808,41 +777,38 @@ static void hfi1_ipoib_drain_tx_list(struct hfi1_ipoib_txq *txq) list_del(&txreq->list); sdma_txclean(txq->priv->dd, &tx->txreq); dev_kfree_skb_any(tx->skb); - kmem_cache_free(txq->priv->txreq_cache, tx); - atomic64_inc(complete_txreqs); + tx->skb = NULL; + txq->tx_ring.complete_txreqs++; } if (hfi1_ipoib_used(txq)) dd_dev_warn(txq->priv->dd, - "txq %d not empty found %llu requests\n", + "txq %d not empty found %u requests\n", txq->q_idx, - hfi1_ipoib_txreqs(txq->sent_txreqs, - atomic64_read(complete_txreqs))); + hfi1_ipoib_txreqs(txq->tx_ring.sent_txreqs, + txq->tx_ring.complete_txreqs)); } void hfi1_ipoib_txreq_deinit(struct hfi1_ipoib_dev_priv *priv) { - int i; + int i, j; for (i = 0; i < priv->netdev->num_tx_queues; i++) { struct hfi1_ipoib_txq *txq = &priv->txqs[i]; + struct hfi1_ipoib_circ_buf *tx_ring = &txq->tx_ring; iowait_cancel_work(&txq->wait); iowait_sdma_drain(&txq->wait); hfi1_ipoib_drain_tx_list(txq); - netif_napi_del(txq->napi); - (void)hfi1_ipoib_drain_tx_ring(txq, txq->tx_ring.max_items); - kfree(txq->tx_ring.items); + netif_napi_del(&txq->napi); + hfi1_ipoib_drain_tx_ring(txq); + for (j = 0; j < tx_ring->max_items; j++) + kfree(hfi1_txreq_from_idx(tx_ring, j)->sdma_hdr); + kvfree(tx_ring->items); } kfree(priv->txqs); priv->txqs = NULL; - - kfree(priv->tx_napis); - priv->tx_napis = NULL; - - kmem_cache_destroy(priv->txreq_cache); - priv->txreq_cache = NULL; } void hfi1_ipoib_napi_tx_enable(struct net_device *dev) @@ -853,7 +819,7 @@ void hfi1_ipoib_napi_tx_enable(struct net_device *dev) for (i = 0; i < dev->num_tx_queues; i++) { struct hfi1_ipoib_txq *txq = &priv->txqs[i]; - napi_enable(txq->napi); + napi_enable(&txq->napi); } } @@ -865,8 +831,8 @@ void hfi1_ipoib_napi_tx_disable(struct net_device *dev) for (i = 0; i < dev->num_tx_queues; i++) { struct hfi1_ipoib_txq *txq = &priv->txqs[i]; - napi_disable(txq->napi); - (void)hfi1_ipoib_drain_tx_ring(txq, txq->tx_ring.max_items); + napi_disable(&txq->napi); + hfi1_ipoib_drain_tx_ring(txq); } } @@ -874,23 +840,23 @@ void hfi1_ipoib_tx_timeout(struct net_device *dev, unsigned int q) { struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev); struct hfi1_ipoib_txq *txq = &priv->txqs[q]; - u64 completed = atomic64_read(&txq->complete_txreqs); dd_dev_info(priv->dd, "timeout txq %p q %u stopped %u stops %d no_desc %d ring_full %d\n", txq, q, __netif_subqueue_stopped(dev, txq->q_idx), - atomic_read(&txq->stops), - atomic_read(&txq->no_desc), - atomic_read(&txq->ring_full)); + atomic_read(&txq->tx_ring.stops), + atomic_read(&txq->tx_ring.no_desc), + atomic_read(&txq->tx_ring.ring_full)); dd_dev_info(priv->dd, "sde %p engine %u\n", txq->sde, txq->sde ? txq->sde->this_idx : 0); dd_dev_info(priv->dd, "flow %x\n", txq->flow.as_int); dd_dev_info(priv->dd, "sent %llu completed %llu used %llu\n", - txq->sent_txreqs, completed, hfi1_ipoib_used(txq)); - dd_dev_info(priv->dd, "tx_queue_len %u max_items %lu\n", + txq->tx_ring.sent_txreqs, txq->tx_ring.complete_txreqs, + hfi1_ipoib_used(txq)); + dd_dev_info(priv->dd, "tx_queue_len %u max_items %u\n", dev->tx_queue_len, txq->tx_ring.max_items); - dd_dev_info(priv->dd, "head %lu tail %lu\n", + dd_dev_info(priv->dd, "head %u tail %u\n", txq->tx_ring.head, txq->tx_ring.tail); dd_dev_info(priv->dd, "wait queued %u\n", !list_empty(&txq->wait.list)); diff --git a/drivers/infiniband/hw/hfi1/sdma.c b/drivers/infiniband/hw/hfi1/sdma.c index 2b6c24b7b586..f07d328689d3 100644 --- a/drivers/infiniband/hw/hfi1/sdma.c +++ b/drivers/infiniband/hw/hfi1/sdma.c @@ -838,8 +838,8 @@ struct sdma_engine *sdma_select_user_engine(struct hfi1_devdata *dd, if (current->nr_cpus_allowed != 1) goto out; - cpu_id = smp_processor_id(); rcu_read_lock(); + cpu_id = smp_processor_id(); rht_node = rhashtable_lookup(dd->sdma_rht, &cpu_id, sdma_rht_params); diff --git a/drivers/infiniband/hw/hfi1/trace_tx.h b/drivers/infiniband/hw/hfi1/trace_tx.h index 7318aa6150b5..ed1b9e1e4b17 100644 --- a/drivers/infiniband/hw/hfi1/trace_tx.h +++ b/drivers/infiniband/hw/hfi1/trace_tx.h @@ -917,20 +917,22 @@ DECLARE_EVENT_CLASS(/* AIP */ __entry->tail = txq->tx_ring.tail; __entry->idx = txq->q_idx; __entry->used = - txq->sent_txreqs - - atomic64_read(&txq->complete_txreqs); + txq->tx_ring.sent_txreqs - + txq->tx_ring.complete_txreqs; __entry->flow = txq->flow.as_int; - __entry->stops = atomic_read(&txq->stops); - __entry->no_desc = atomic_read(&txq->no_desc); + __entry->stops = atomic_read(&txq->tx_ring.stops); + __entry->no_desc = atomic_read(&txq->tx_ring.no_desc); __entry->stopped = __netif_subqueue_stopped(txq->priv->netdev, txq->q_idx); ), TP_printk(/* print */ - "[%s] txq %llx idx %u sde %llx head %lx tail %lx flow %x used %u stops %d no_desc %d stopped %u", + "[%s] txq %llx idx %u sde %llx:%u cpu %d head %lx tail %lx flow %x used %u stops %d no_desc %d stopped %u", __get_str(dev), (unsigned long long)__entry->txq, __entry->idx, (unsigned long long)__entry->sde, + __entry->sde ? __entry->sde->this_idx : 0, + __entry->sde ? __entry->sde->cpu : 0, __entry->head, __entry->tail, __entry->flow, @@ -995,6 +997,65 @@ DEFINE_EVENT(/* xmit_unstopped */ TP_ARGS(txq) ); +DECLARE_EVENT_CLASS(/* AIP */ + hfi1_ipoib_tx_template, + TP_PROTO(struct ipoib_txreq *tx, u32 idx), + TP_ARGS(tx, idx), + TP_STRUCT__entry(/* entry */ + DD_DEV_ENTRY(tx->txq->priv->dd) + __field(struct ipoib_txreq *, tx) + __field(struct hfi1_ipoib_txq *, txq) + __field(struct sk_buff *, skb) + __field(ulong, idx) + ), + TP_fast_assign(/* assign */ + DD_DEV_ASSIGN(tx->txq->priv->dd); + __entry->tx = tx; + __entry->skb = tx->skb; + __entry->txq = tx->txq; + __entry->idx = idx; + ), + TP_printk(/* print */ + "[%s] tx %llx txq %llx,%u skb %llx idx %lu", + __get_str(dev), + (unsigned long long)__entry->tx, + (unsigned long long)__entry->txq, + __entry->txq ? __entry->txq->q_idx : 0, + (unsigned long long)__entry->skb, + __entry->idx + ) +); + +DEFINE_EVENT(/* produce */ + hfi1_ipoib_tx_template, hfi1_tx_produce, + TP_PROTO(struct ipoib_txreq *tx, u32 idx), + TP_ARGS(tx, idx) +); + +DEFINE_EVENT(/* consume */ + hfi1_ipoib_tx_template, hfi1_tx_consume, + TP_PROTO(struct ipoib_txreq *tx, u32 idx), + TP_ARGS(tx, idx) +); + +DEFINE_EVENT(/* alloc_tx */ + hfi1_ipoib_txq_template, hfi1_txq_alloc_tx, + TP_PROTO(struct hfi1_ipoib_txq *txq), + TP_ARGS(txq) +); + +DEFINE_EVENT(/* poll */ + hfi1_ipoib_txq_template, hfi1_txq_poll, + TP_PROTO(struct hfi1_ipoib_txq *txq), + TP_ARGS(txq) +); + +DEFINE_EVENT(/* complete */ + hfi1_ipoib_txq_template, hfi1_txq_complete, + TP_PROTO(struct hfi1_ipoib_txq *txq), + TP_ARGS(txq) +); + #endif /* __HFI1_TRACE_TX_H */ #undef TRACE_INCLUDE_PATH diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c index 0c86e9d354f8..186d30291260 100644 --- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c +++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c @@ -692,8 +692,7 @@ static int set_rcvarray_entry(struct hfi1_filedata *fd, * Allocate the node first so we can handle a potential * failure before we've programmed anything. */ - node = kzalloc(sizeof(*node) + (sizeof(struct page *) * npages), - GFP_KERNEL); + node = kzalloc(struct_size(node, pages, npages), GFP_KERNEL); if (!node) return -ENOMEM; @@ -713,7 +712,7 @@ static int set_rcvarray_entry(struct hfi1_filedata *fd, node->dma_addr = phys; node->grp = grp; node->freed = false; - memcpy(node->pages, pages, sizeof(struct page *) * npages); + memcpy(node->pages, pages, flex_array_size(node, pages, npages)); if (fd->use_mn) { ret = mmu_interval_notifier_insert( diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c index 5b11c8282744..a71c5a36ceba 100644 --- a/drivers/infiniband/hw/hfi1/user_sdma.c +++ b/drivers/infiniband/hw/hfi1/user_sdma.c @@ -161,9 +161,7 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt, if (!pq->reqs) goto pq_reqs_nomem; - pq->req_in_use = kcalloc(BITS_TO_LONGS(hfi1_sdma_comp_ring_size), - sizeof(*pq->req_in_use), - GFP_KERNEL); + pq->req_in_use = bitmap_zalloc(hfi1_sdma_comp_ring_size, GFP_KERNEL); if (!pq->req_in_use) goto pq_reqs_no_in_use; @@ -210,7 +208,7 @@ cq_comps_nomem: cq_nomem: kmem_cache_destroy(pq->txreq_cache); pq_txreq_nomem: - kfree(pq->req_in_use); + bitmap_free(pq->req_in_use); pq_reqs_no_in_use: kfree(pq->reqs); pq_reqs_nomem: @@ -257,7 +255,7 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd, pq->wait, !atomic_read(&pq->n_reqs)); kfree(pq->reqs); - kfree(pq->req_in_use); + bitmap_free(pq->req_in_use); kmem_cache_destroy(pq->txreq_cache); flush_pq_iowait(pq); kfree(pq); diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c index 26bea51869bf..dc9211f3a009 100644 --- a/drivers/infiniband/hw/hfi1/verbs.c +++ b/drivers/infiniband/hw/hfi1/verbs.c @@ -1602,8 +1602,8 @@ static const char * const driver_cntr_names[] = { }; static DEFINE_MUTEX(cntr_names_lock); /* protects the *_cntr_names bufers */ -static const char **dev_cntr_names; -static const char **port_cntr_names; +static struct rdma_stat_desc *dev_cntr_descs; +static struct rdma_stat_desc *port_cntr_descs; int num_driver_cntrs = ARRAY_SIZE(driver_cntr_names); static int num_dev_cntrs; static int num_port_cntrs; @@ -1614,13 +1614,12 @@ static int cntr_names_initialized; * strings. Optionally some entries can be reserved in the array to hold extra * external strings. */ -static int init_cntr_names(const char *names_in, - const size_t names_len, - int num_extra_names, - int *num_cntrs, - const char ***cntr_names) +static int init_cntr_names(const char *names_in, const size_t names_len, + int num_extra_names, int *num_cntrs, + struct rdma_stat_desc **cntr_descs) { - char *names_out, *p, **q; + struct rdma_stat_desc *q; + char *names_out, *p; int i, n; n = 0; @@ -1628,26 +1627,27 @@ static int init_cntr_names(const char *names_in, if (names_in[i] == '\n') n++; - names_out = kmalloc((n + num_extra_names) * sizeof(char *) + names_len, - GFP_KERNEL); + names_out = + kzalloc((n + num_extra_names) * sizeof(*q) + names_len, + GFP_KERNEL); if (!names_out) { *num_cntrs = 0; - *cntr_names = NULL; + *cntr_descs = NULL; return -ENOMEM; } - p = names_out + (n + num_extra_names) * sizeof(char *); + p = names_out + (n + num_extra_names) * sizeof(*q); memcpy(p, names_in, names_len); - q = (char **)names_out; + q = (struct rdma_stat_desc *)names_out; for (i = 0; i < n; i++) { - q[i] = p; + q[i].name = p; p = strchr(p, '\n'); *p++ = '\0'; } *num_cntrs = n; - *cntr_names = (const char **)names_out; + *cntr_descs = (struct rdma_stat_desc *)names_out; return 0; } @@ -1661,18 +1661,18 @@ static int init_counters(struct ib_device *ibdev) goto out_unlock; err = init_cntr_names(dd->cntrnames, dd->cntrnameslen, num_driver_cntrs, - &num_dev_cntrs, &dev_cntr_names); + &num_dev_cntrs, &dev_cntr_descs); if (err) goto out_unlock; for (i = 0; i < num_driver_cntrs; i++) - dev_cntr_names[num_dev_cntrs + i] = driver_cntr_names[i]; + dev_cntr_descs[num_dev_cntrs + i].name = driver_cntr_names[i]; err = init_cntr_names(dd->portcntrnames, dd->portcntrnameslen, 0, - &num_port_cntrs, &port_cntr_names); + &num_port_cntrs, &port_cntr_descs); if (err) { - kfree(dev_cntr_names); - dev_cntr_names = NULL; + kfree(dev_cntr_descs); + dev_cntr_descs = NULL; goto out_unlock; } cntr_names_initialized = 1; @@ -1686,7 +1686,7 @@ static struct rdma_hw_stats *hfi1_alloc_hw_device_stats(struct ib_device *ibdev) { if (init_counters(ibdev)) return NULL; - return rdma_alloc_hw_stats_struct(dev_cntr_names, + return rdma_alloc_hw_stats_struct(dev_cntr_descs, num_dev_cntrs + num_driver_cntrs, RDMA_HW_STATS_DEFAULT_LIFESPAN); } @@ -1696,7 +1696,7 @@ static struct rdma_hw_stats *hfi_alloc_hw_port_stats(struct ib_device *ibdev, { if (init_counters(ibdev)) return NULL; - return rdma_alloc_hw_stats_struct(port_cntr_names, num_port_cntrs, + return rdma_alloc_hw_stats_struct(port_cntr_descs, num_port_cntrs, RDMA_HW_STATS_DEFAULT_LIFESPAN); } @@ -1921,10 +1921,10 @@ void hfi1_unregister_ib_device(struct hfi1_devdata *dd) verbs_txreq_exit(dev); mutex_lock(&cntr_names_lock); - kfree(dev_cntr_names); - kfree(port_cntr_names); - dev_cntr_names = NULL; - port_cntr_names = NULL; + kfree(dev_cntr_descs); + kfree(port_cntr_descs); + dev_cntr_descs = NULL; + port_cntr_descs = NULL; cntr_names_initialized = 0; mutex_unlock(&cntr_names_lock); } diff --git a/drivers/infiniband/hw/hns/Kconfig b/drivers/infiniband/hw/hns/Kconfig index 18d10ebf900b..ab3fbba70789 100644 --- a/drivers/infiniband/hw/hns/Kconfig +++ b/drivers/infiniband/hw/hns/Kconfig @@ -5,22 +5,9 @@ config INFINIBAND_HNS depends on ARM64 || (COMPILE_TEST && 64BIT) depends on (HNS_DSAF && HNS_ENET) || HNS3 help - This is a RoCE/RDMA driver for the Hisilicon RoCE engine. The engine - is used in Hisilicon Hip06 and more further ICT SoC based on - platform device. + This is a RoCE/RDMA driver for the Hisilicon RoCE engine. - To compile HIP06 or HIP08 driver as module, choose M here. - -config INFINIBAND_HNS_HIP06 - bool "Hisilicon Hip06 Family RoCE support" - depends on INFINIBAND_HNS && HNS && HNS_DSAF && HNS_ENET - depends on INFINIBAND_HNS=m || (HNS_DSAF=y && HNS_ENET=y) - help - RoCE driver support for Hisilicon RoCE engine in Hisilicon Hip06 and - Hip07 SoC. These RoCE engines are platform devices. - - To compile this driver, choose Y here: if INFINIBAND_HNS is m, this - module will be called hns-roce-hw-v1 + To compile HIP08 driver as module, choose M here. config INFINIBAND_HNS_HIP08 bool "Hisilicon Hip08 Family RoCE support" diff --git a/drivers/infiniband/hw/hns/Makefile b/drivers/infiniband/hw/hns/Makefile index e105945b94a1..9f04f25d9631 100644 --- a/drivers/infiniband/hw/hns/Makefile +++ b/drivers/infiniband/hw/hns/Makefile @@ -9,11 +9,6 @@ hns-roce-objs := hns_roce_main.o hns_roce_cmd.o hns_roce_pd.o \ hns_roce_ah.o hns_roce_hem.o hns_roce_mr.o hns_roce_qp.o \ hns_roce_cq.o hns_roce_alloc.o hns_roce_db.o hns_roce_srq.o hns_roce_restrack.o -ifdef CONFIG_INFINIBAND_HNS_HIP06 -hns-roce-hw-v1-objs := hns_roce_hw_v1.o $(hns-roce-objs) -obj-$(CONFIG_INFINIBAND_HNS) += hns-roce-hw-v1.o -endif - ifdef CONFIG_INFINIBAND_HNS_HIP08 hns-roce-hw-v2-objs := hns_roce_hw_v2.o hns_roce_hw_v2_dfx.o $(hns-roce-objs) obj-$(CONFIG_INFINIBAND_HNS) += hns-roce-hw-v2.o diff --git a/drivers/infiniband/hw/hns/hns_roce_ah.c b/drivers/infiniband/hw/hns/hns_roce_ah.c index cc258edec331..492b122d0521 100644 --- a/drivers/infiniband/hw/hns/hns_roce_ah.c +++ b/drivers/infiniband/hw/hns/hns_roce_ah.c @@ -30,7 +30,6 @@ * SOFTWARE. */ -#include <linux/platform_device.h> #include <linux/pci.h> #include <rdma/ib_addr.h> #include <rdma/ib_cache.h> @@ -61,7 +60,7 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr, struct hns_roce_ah *ah = to_hr_ah(ibah); int ret = 0; - if (hr_dev->pci_dev->revision <= PCI_REVISION_ID_HIP08 && udata) + if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 && udata) return -EOPNOTSUPP; ah->av.port = rdma_ah_get_port_num(ah_attr); @@ -80,7 +79,7 @@ int hns_roce_create_ah(struct ib_ah *ibah, struct rdma_ah_init_attr *init_attr, memcpy(ah->av.mac, ah_attr->roce.dmac, ETH_ALEN); /* HIP08 needs to record vlan info in Address Vector */ - if (hr_dev->pci_dev->revision <= PCI_REVISION_ID_HIP08) { + if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) { ret = rdma_read_gid_l2_fields(ah_attr->grh.sgid_attr, &ah->av.vlan_id, NULL); if (ret) diff --git a/drivers/infiniband/hw/hns/hns_roce_alloc.c b/drivers/infiniband/hw/hns/hns_roce_alloc.c index d4fa0fd52294..11a78ceae568 100644 --- a/drivers/infiniband/hw/hns/hns_roce_alloc.c +++ b/drivers/infiniband/hw/hns/hns_roce_alloc.c @@ -31,10 +31,9 @@ * SOFTWARE. */ -#include <linux/platform_device.h> #include <linux/vmalloc.h> -#include "hns_roce_device.h" #include <rdma/ib_umem.h> +#include "hns_roce_device.h" void hns_roce_buf_free(struct hns_roce_dev *hr_dev, struct hns_roce_buf *buf) { diff --git a/drivers/infiniband/hw/hns/hns_roce_cmd.c b/drivers/infiniband/hw/hns/hns_roce_cmd.c index 84f3f2b5f097..4b693d542ace 100644 --- a/drivers/infiniband/hw/hns/hns_roce_cmd.c +++ b/drivers/infiniband/hw/hns/hns_roce_cmd.c @@ -31,7 +31,6 @@ */ #include <linux/dmapool.h> -#include <linux/platform_device.h> #include "hns_roce_common.h" #include "hns_roce_device.h" #include "hns_roce_cmd.h" @@ -61,7 +60,7 @@ static int __hns_roce_cmd_mbox_poll(struct hns_roce_dev *hr_dev, u64 in_param, CMD_POLL_TOKEN, 0); if (ret) { dev_err_ratelimited(hr_dev->dev, - "failed to post mailbox %x in poll mode, ret = %d.\n", + "failed to post mailbox 0x%x in poll mode, ret = %d.\n", op, ret); return ret; } @@ -91,7 +90,7 @@ void hns_roce_cmd_event(struct hns_roce_dev *hr_dev, u16 token, u8 status, if (unlikely(token != context->token)) { dev_err_ratelimited(hr_dev->dev, - "[cmd] invalid ae token %x,context token is %x!\n", + "[cmd] invalid ae token 0x%x, context token is 0x%x.\n", token, context->token); return; } @@ -130,14 +129,14 @@ static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param, context->token, 1); if (ret) { dev_err_ratelimited(dev, - "failed to post mailbox %x in event mode, ret = %d.\n", + "failed to post mailbox 0x%x in event mode, ret = %d.\n", op, ret); goto out; } if (!wait_for_completion_timeout(&context->done, msecs_to_jiffies(timeout))) { - dev_err_ratelimited(dev, "[cmd] token %x mailbox %x timeout.\n", + dev_err_ratelimited(dev, "[cmd] token 0x%x mailbox 0x%x timeout.\n", context->token, op); ret = -EBUSY; goto out; @@ -145,7 +144,7 @@ static int __hns_roce_cmd_mbox_wait(struct hns_roce_dev *hr_dev, u64 in_param, ret = context->result; if (ret) - dev_err_ratelimited(dev, "[cmd] token %x mailbox %x error %d\n", + dev_err_ratelimited(dev, "[cmd] token 0x%x mailbox 0x%x error %d.\n", context->token, op, ret); out: diff --git a/drivers/infiniband/hw/hns/hns_roce_common.h b/drivers/infiniband/hw/hns/hns_roce_common.h index b73e55de83ac..465d1f914b6c 100644 --- a/drivers/infiniband/hw/hns/hns_roce_common.h +++ b/drivers/infiniband/hw/hns/hns_roce_common.h @@ -104,208 +104,6 @@ #define hr_reg_read(ptr, field) _hr_reg_read(ptr, field) -#define ROCEE_GLB_CFG_ROCEE_DB_SQ_MODE_S 3 -#define ROCEE_GLB_CFG_ROCEE_DB_OTH_MODE_S 4 - -#define ROCEE_GLB_CFG_SQ_EXT_DB_MODE_S 5 - -#define ROCEE_GLB_CFG_OTH_EXT_DB_MODE_S 6 - -#define ROCEE_GLB_CFG_ROCEE_PORT_ST_S 10 -#define ROCEE_GLB_CFG_ROCEE_PORT_ST_M \ - (((1UL << 6) - 1) << ROCEE_GLB_CFG_ROCEE_PORT_ST_S) - -#define ROCEE_GLB_CFG_TRP_RAQ_DROP_EN_S 16 - -#define ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_S 0 -#define ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_M \ - (((1UL << 24) - 1) << ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_S) - -#define ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_S 24 -#define ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_M \ - (((1UL << 4) - 1) << ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_S) - -#define ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_S 0 -#define ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_M \ - (((1UL << 24) - 1) << ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_S) - -#define ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_S 24 -#define ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_M \ - (((1UL << 4) - 1) << ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_S) - -#define ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_S 0 -#define ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_M \ - (((1UL << 16) - 1) << ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_S) - -#define ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_S 16 -#define ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_M \ - (((1UL << 16) - 1) << ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_S) - -#define ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_S 0 -#define ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_M \ - (((1UL << 16) - 1) << ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_S) - -#define ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_S 16 -#define ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_M \ - (((1UL << 16) - 1) << ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_S) - -#define ROCEE_RAQ_WL_ROCEE_RAQ_WL_S 0 -#define ROCEE_RAQ_WL_ROCEE_RAQ_WL_M \ - (((1UL << 8) - 1) << ROCEE_RAQ_WL_ROCEE_RAQ_WL_S) - -#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_S 0 -#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_M \ - (((1UL << 15) - 1) << \ - ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_S) - -#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_S 16 -#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_M \ - (((1UL << 4) - 1) << \ - ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_S) - -#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_EN_S 20 - -#define ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_EXT_RAQ_MODE 21 - -#define ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_S 0 -#define ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_M \ - (((1UL << 5) - 1) << ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_S) - -#define ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_S 5 -#define ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_M \ - (((1UL << 5) - 1) << ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_S) - -#define ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_S 0 -#define ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_M \ - (((1UL << 5) - 1) << ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_S) - -#define ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_S 5 -#define ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_M \ - (((1UL << 5) - 1) << ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_S) - -#define ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_S 0 -#define ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_M \ - (((1UL << 5) - 1) << ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_S) - -#define ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_S 8 -#define ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_M \ - (((1UL << 5) - 1) << ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_S) - -#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S 0 -#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M \ - (((1UL << 19) - 1) << ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S) - -#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_S 19 - -#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S 20 -#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M \ - (((1UL << 2) - 1) << ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S) - -#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S 22 -#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M \ - (((1UL << 5) - 1) << ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S) - -#define ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S 31 - -#define ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_S 0 -#define ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_M \ - (((1UL << 3) - 1) << ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_S) - -#define ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S 0 -#define ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_M \ - (((1UL << 15) - 1) << ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S) - -#define ROCEE_MB6_ROCEE_MB_CMD_S 0 -#define ROCEE_MB6_ROCEE_MB_CMD_M \ - (((1UL << 8) - 1) << ROCEE_MB6_ROCEE_MB_CMD_S) - -#define ROCEE_MB6_ROCEE_MB_CMD_MDF_S 8 -#define ROCEE_MB6_ROCEE_MB_CMD_MDF_M \ - (((1UL << 4) - 1) << ROCEE_MB6_ROCEE_MB_CMD_MDF_S) - -#define ROCEE_MB6_ROCEE_MB_EVENT_S 14 - -#define ROCEE_MB6_ROCEE_MB_HW_RUN_S 15 - -#define ROCEE_MB6_ROCEE_MB_TOKEN_S 16 -#define ROCEE_MB6_ROCEE_MB_TOKEN_M \ - (((1UL << 16) - 1) << ROCEE_MB6_ROCEE_MB_TOKEN_S) - -#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S 0 -#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_M \ - (((1UL << 24) - 1) << ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S) - -#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S 24 -#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_M \ - (((1UL << 4) - 1) << ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S) - -#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S 28 -#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M \ - (((1UL << 3) - 1) << ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S) - -#define ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S 31 - -#define ROCEE_SMAC_H_ROCEE_SMAC_H_S 0 -#define ROCEE_SMAC_H_ROCEE_SMAC_H_M \ - (((1UL << 16) - 1) << ROCEE_SMAC_H_ROCEE_SMAC_H_S) - -#define ROCEE_SMAC_H_ROCEE_PORT_MTU_S 16 -#define ROCEE_SMAC_H_ROCEE_PORT_MTU_M \ - (((1UL << 4) - 1) << ROCEE_SMAC_H_ROCEE_PORT_MTU_S) - -#define ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S 0 -#define ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M \ - (((1UL << 2) - 1) << ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S) - -#define ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_S 8 -#define ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_M \ - (((1UL << 4) - 1) << ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_S) - -#define ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQ_ALM_OVF_INT_ST_S 17 - -#define ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S 0 -#define ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_M \ - (((1UL << 5) - 1) << ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S) - -#define ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S 16 -#define ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_M \ - (((1UL << 16) - 1) << ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S) - -#define ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S 0 -#define ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_M \ - (((1UL << 16) - 1) << ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S) - -#define ROCEE_CAEP_CEQC_SHIFT_CAEP_CEQ_ALM_OVF_INT_ST_S 16 -#define ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S 1 -#define ROCEE_CAEP_CEQ_ALM_OVF_CAEP_CEQ_ALM_OVF_S 0 - -#define ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S 0 -#define ROCEE_CAEP_AE_MASK_CAEP_AE_IRQ_MASK_S 1 - -#define ROCEE_CAEP_AE_ST_CAEP_AEQ_ALM_OVF_S 0 - -#define ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_S 0 -#define ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_M \ - (((1UL << 28) - 1) << ROCEE_SDB_ISSUE_PTR_SDB_ISSUE_PTR_S) - -#define ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S 0 -#define ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_M \ - (((1UL << 28) - 1) << ROCEE_SDB_SEND_PTR_SDB_SEND_PTR_S) - -#define ROCEE_SDB_INV_CNT_SDB_INV_CNT_S 0 -#define ROCEE_SDB_INV_CNT_SDB_INV_CNT_M \ - (((1UL << 16) - 1) << ROCEE_SDB_INV_CNT_SDB_INV_CNT_S) - -#define ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S 0 -#define ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_M \ - (((1UL << 16) - 1) << ROCEE_SDB_RETRY_CNT_SDB_RETRY_CT_S) - -#define ROCEE_SDB_CNT_CMP_BITS 16 - -#define ROCEE_TSP_BP_ST_QH_FIFO_ENTRY_S 20 - -#define ROCEE_CNT_CLR_CE_CNT_CLR_CE_S 0 - /*************ROCEE_REG DEFINITION****************/ #define ROCEE_VENDOR_ID_REG 0x0 #define ROCEE_VENDOR_PART_ID_REG 0x4 diff --git a/drivers/infiniband/hw/hns/hns_roce_cq.c b/drivers/infiniband/hw/hns/hns_roce_cq.c index d763f097599f..55057dcbb2dc 100644 --- a/drivers/infiniband/hw/hns/hns_roce_cq.c +++ b/drivers/infiniband/hw/hns/hns_roce_cq.c @@ -30,7 +30,6 @@ * SOFTWARE. */ -#include <linux/platform_device.h> #include <rdma/ib_umem.h> #include <rdma/uverbs_ioctl.h> #include "hns_roce_device.h" @@ -406,15 +405,6 @@ int hns_roce_create_cq(struct ib_cq *ib_cq, const struct ib_cq_init_attr *attr, goto err_cqn; } - /* - * For the QP created by kernel space, tptr value should be initialized - * to zero; For the QP created by user space, it will cause synchronous - * problems if tptr is set to zero here, so we initialize it in user - * space. - */ - if (!udata && hr_cq->tptr_addr) - *hr_cq->tptr_addr = 0; - if (udata) { resp.cqn = hr_cq->cqn; ret = ib_copy_to_udata(udata, &resp, @@ -441,9 +431,6 @@ int hns_roce_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) struct hns_roce_dev *hr_dev = to_hr_dev(ib_cq->device); struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq); - if (hr_dev->hw->destroy_cq) - hr_dev->hw->destroy_cq(ib_cq, udata); - free_cqc(hr_dev, hr_cq); free_cqn(hr_dev, hr_cq->cqn); free_cq_db(hr_dev, hr_cq, udata); diff --git a/drivers/infiniband/hw/hns/hns_roce_db.c b/drivers/infiniband/hw/hns/hns_roce_db.c index 751470c7a2ce..5c4c0480832b 100644 --- a/drivers/infiniband/hw/hns/hns_roce_db.c +++ b/drivers/infiniband/hw/hns/hns_roce_db.c @@ -4,7 +4,6 @@ * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. */ -#include <linux/platform_device.h> #include <rdma/ib_umem.h> #include "hns_roce_device.h" diff --git a/drivers/infiniband/hw/hns/hns_roce_device.h b/drivers/infiniband/hw/hns/hns_roce_device.h index 9467c39e3d28..1e0bae136997 100644 --- a/drivers/infiniband/hw/hns/hns_roce_device.h +++ b/drivers/infiniband/hw/hns/hns_roce_device.h @@ -36,36 +36,18 @@ #include <rdma/ib_verbs.h> #include <rdma/hns-abi.h> -#define DRV_NAME "hns_roce" - #define PCI_REVISION_ID_HIP08 0x21 #define PCI_REVISION_ID_HIP09 0x30 -#define HNS_ROCE_HW_VER1 ('h' << 24 | 'i' << 16 | '0' << 8 | '6') - #define HNS_ROCE_MAX_MSG_LEN 0x80000000 #define HNS_ROCE_IB_MIN_SQ_STRIDE 6 #define BA_BYTE_LEN 8 -/* Hardware specification only for v1 engine */ #define HNS_ROCE_MIN_CQE_NUM 0x40 -#define HNS_ROCE_MIN_WQE_NUM 0x20 #define HNS_ROCE_MIN_SRQ_WQE_NUM 1 -/* Hardware specification only for v1 engine */ -#define HNS_ROCE_MAX_INNER_MTPT_NUM 0x7 -#define HNS_ROCE_MAX_MTPT_PBL_NUM 0x100000 - -#define HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS 20 -#define HNS_ROCE_MAX_FREE_CQ_WAIT_CNT \ - (5000 / HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS) -#define HNS_ROCE_CQE_WCMD_EMPTY_BIT 0x2 -#define HNS_ROCE_MIN_CQE_CNT 16 - -#define HNS_ROCE_RESERVED_SGE 1 - #define HNS_ROCE_MAX_IRQ_NUM 128 #define HNS_ROCE_SGE_IN_WQE 2 @@ -102,18 +84,12 @@ #define HNS_ROCE_FRMR_MAX_PA 512 #define PKEY_ID 0xffff -#define GUID_LEN 8 #define NODE_DESC_SIZE 64 #define DB_REG_OFFSET 0x1000 /* Configure to HW for PAGE_SIZE larger than 4KB */ #define PG_SHIFT_OFFSET (PAGE_SHIFT - 12) -#define PAGES_SHIFT_8 8 -#define PAGES_SHIFT_16 16 -#define PAGES_SHIFT_24 24 -#define PAGES_SHIFT_32 32 - #define HNS_ROCE_IDX_QUE_ENTRY_SZ 4 #define SRQ_DB_REG 0x230 @@ -122,11 +98,6 @@ #define CQ_BANKID_SHIFT 2 -/* The chip implementation of the consumer index is calculated - * according to twice the actual EQ depth - */ -#define EQ_DEPTH_COEFF 2 - enum { SERV_TYPE_RC, SERV_TYPE_UC, @@ -182,6 +153,7 @@ enum { HNS_ROCE_CAP_FLAG_FRMR = BIT(8), HNS_ROCE_CAP_FLAG_QP_FLOW_CTRL = BIT(9), HNS_ROCE_CAP_FLAG_ATOMIC = BIT(10), + HNS_ROCE_CAP_FLAG_DIRECT_WQE = BIT(12), HNS_ROCE_CAP_FLAG_SDI_MODE = BIT(14), HNS_ROCE_CAP_FLAG_STASH = BIT(17), }; @@ -225,11 +197,23 @@ struct hns_roce_uar { unsigned long logic_idx; }; +enum hns_roce_mmap_type { + HNS_ROCE_MMAP_TYPE_DB = 1, + HNS_ROCE_MMAP_TYPE_DWQE, +}; + +struct hns_user_mmap_entry { + struct rdma_user_mmap_entry rdma_entry; + enum hns_roce_mmap_type mmap_type; + u64 address; +}; + struct hns_roce_ucontext { struct ib_ucontext ibucontext; struct hns_roce_uar uar; struct list_head page_list; struct mutex page_mutex; + struct hns_user_mmap_entry *db_mmap_entry; }; struct hns_roce_pd { @@ -332,19 +316,16 @@ struct hns_roce_mw { u32 pbl_buf_pg_sz; }; -/* Only support 4K page size for mr register */ -#define MR_SIZE_4K 0 - struct hns_roce_mr { struct ib_mr ibmr; u64 iova; /* MR's virtual original addr */ u64 size; /* Address range of MR */ u32 key; /* Key of MR */ u32 pd; /* PD num of MR */ - u32 access; /* Access permission of MR */ + u32 access; /* Access permission of MR */ int enabled; /* MR's active status */ - int type; /* MR's register type */ - u32 pbl_hop_num; /* multi-hop number */ + int type; /* MR's register type */ + u32 pbl_hop_num; /* multi-hop number */ struct hns_roce_mtr pbl_mtr; u32 npages; dma_addr_t *page_list; @@ -361,17 +342,17 @@ struct hns_roce_wq { u32 wqe_cnt; /* WQE num */ u32 max_gs; u32 rsv_sge; - int offset; - int wqe_shift; /* WQE size */ + u32 offset; + u32 wqe_shift; /* WQE size */ u32 head; u32 tail; void __iomem *db_reg; }; struct hns_roce_sge { - unsigned int sge_cnt; /* SGE num */ - int offset; - int sge_shift; /* SGE size */ + unsigned int sge_cnt; /* SGE num */ + u32 offset; + u32 sge_shift; /* SGE size */ }; struct hns_roce_buf_list { @@ -440,7 +421,6 @@ struct hns_roce_cq { u32 cons_index; u32 *set_ci_db; void __iomem *db_reg; - u16 *tptr_addr; int arm_sn; int cqe_size; unsigned long cqn; @@ -455,7 +435,7 @@ struct hns_roce_cq { struct hns_roce_idx_que { struct hns_roce_mtr mtr; - int entry_shift; + u32 entry_shift; unsigned long *bitmap; u32 head; u32 tail; @@ -467,7 +447,7 @@ struct hns_roce_srq { u32 wqe_cnt; int max_gs; u32 rsv_sge; - int wqe_shift; + u32 wqe_shift; u32 cqn; u32 xrcdn; void __iomem *db_reg; @@ -526,10 +506,6 @@ struct hns_roce_srq_table { struct hns_roce_hem_table table; }; -struct hns_roce_raq_table { - struct hns_roce_buf_list *e_raq_buf; -}; - struct hns_roce_av { u8 port; u8 gid_index; @@ -614,10 +590,6 @@ struct hns_roce_work { u32 queue_num; }; -enum { - HNS_ROCE_QP_CAP_DIRECT_WQE = BIT(5), -}; - struct hns_roce_qp { struct ib_qp ibqp; struct hns_roce_wq rq; @@ -637,9 +609,7 @@ struct hns_roce_qp { u8 sl; u8 resp_depth; u8 state; - u32 access_flags; u32 atomic_rd_en; - u32 pkey_index; u32 qkey; void (*event)(struct hns_roce_qp *qp, enum hns_roce_event event_type); @@ -659,9 +629,10 @@ struct hns_roce_qp { unsigned long flush_flag; struct hns_roce_work flush_work; struct hns_roce_rinl_buf rq_inl_buf; - struct list_head node; /* all qps are on a list */ - struct list_head rq_node; /* all recv qps are on a list */ - struct list_head sq_node; /* all send qps are on a list */ + struct list_head node; /* all qps are on a list */ + struct list_head rq_node; /* all recv qps are on a list */ + struct list_head sq_node; /* all send qps are on a list */ + struct hns_user_mmap_entry *dwqe_mmap_entry; }; struct hns_roce_ib_iboe { @@ -671,11 +642,6 @@ struct hns_roce_ib_iboe { u8 phy_port[HNS_ROCE_MAX_PORTS]; }; -enum { - HNS_ROCE_EQ_STAT_INVALID = 0, - HNS_ROCE_EQ_STAT_VALID = 2, -}; - struct hns_roce_ceqe { __le32 comp; __le32 rsv[15]; @@ -707,12 +673,9 @@ struct hns_roce_eq { int type_flag; /* Aeq:1 ceq:0 */ int eqn; u32 entries; - u32 log_entries; int eqe_size; int irq; - int log_page_size; u32 cons_index; - struct hns_roce_buf_list *buf_list; int over_ignore; int coalesce; int arm_st; @@ -727,7 +690,6 @@ struct hns_roce_eq { struct hns_roce_eq_table { struct hns_roce_eq *eq; - void __iomem **eqc_base; /* only for hw v1 */ }; enum cong_type { @@ -754,7 +716,7 @@ struct hns_roce_caps { u32 reserved_qps; int num_qpc_timer; int num_cqc_timer; - int num_srqs; + u32 num_srqs; u32 max_wqes; u32 max_srq_wrs; u32 max_srq_sges; @@ -768,7 +730,7 @@ struct hns_roce_caps { u32 min_cqes; u32 min_wqes; u32 reserved_cqs; - int reserved_srqs; + u32 reserved_srqs; int num_aeq_vectors; int num_comp_vectors; int num_other_vectors; @@ -842,7 +804,7 @@ struct hns_roce_caps { u32 cqc_timer_ba_pg_sz; u32 cqc_timer_buf_pg_sz; u32 cqc_timer_hop_num; - u32 cqe_ba_pg_sz; /* page_size = 4K*(2^cqe_ba_pg_sz) */ + u32 cqe_ba_pg_sz; /* page_size = 4K*(2^cqe_ba_pg_sz) */ u32 cqe_buf_pg_sz; u32 cqe_hop_num; u32 srqwqe_ba_pg_sz; @@ -861,7 +823,7 @@ struct hns_roce_caps { u32 gmv_hop_num; u32 sl_num; u32 llm_buf_pg_sz; - u32 chunk_sz; /* chunk size in non multihop mode */ + u32 chunk_sz; /* chunk size in non multihop mode */ u64 flags; u16 default_ceq_max_cnt; u16 default_ceq_period; @@ -884,7 +846,6 @@ enum hns_roce_device_state { }; struct hns_roce_hw { - int (*reset)(struct hns_roce_dev *hr_dev, bool enable); int (*cmq_init)(struct hns_roce_dev *hr_dev); void (*cmq_exit)(struct hns_roce_dev *hr_dev); int (*hw_profile)(struct hns_roce_dev *hr_dev); @@ -896,13 +857,12 @@ struct hns_roce_hw { int (*poll_mbox_done)(struct hns_roce_dev *hr_dev, unsigned int timeout); bool (*chk_mbox_avail)(struct hns_roce_dev *hr_dev, bool *is_busy); - int (*set_gid)(struct hns_roce_dev *hr_dev, u32 port, int gid_index, + int (*set_gid)(struct hns_roce_dev *hr_dev, int gid_index, const union ib_gid *gid, const struct ib_gid_attr *attr); - int (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port, u8 *addr); - void (*set_mtu)(struct hns_roce_dev *hr_dev, u8 phy_port, - enum ib_mtu mtu); + int (*set_mac)(struct hns_roce_dev *hr_dev, u8 phy_port, + const u8 *addr); int (*write_mtpt)(struct hns_roce_dev *hr_dev, void *mb_buf, - struct hns_roce_mr *mr, unsigned long mtpt_idx); + struct hns_roce_mr *mr); int (*rereg_write_mtpt)(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr, int flags, void *mb_buf); @@ -922,9 +882,6 @@ struct hns_roce_hw { enum ib_qp_state new_state); int (*qp_flow_control_init)(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp); - int (*dereg_mr)(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr, - struct ib_udata *udata); - int (*destroy_cq)(struct ib_cq *ibcq, struct ib_udata *udata); int (*init_eq)(struct hns_roce_dev *hr_dev); void (*cleanup_eq)(struct hns_roce_dev *hr_dev); int (*write_srqc)(struct hns_roce_srq *srq, void *mb_buf); @@ -934,13 +891,11 @@ struct hns_roce_hw { struct hns_roce_dev { struct ib_device ib_dev; - struct platform_device *pdev; struct pci_dev *pci_dev; struct device *dev; struct hns_roce_uar priv_uar; const char *irq_names[HNS_ROCE_MAX_IRQ_NUM]; spinlock_t sm_lock; - spinlock_t bt_cmd_lock; bool active; bool is_reset; bool dis_db; @@ -987,8 +942,6 @@ struct hns_roce_dev { int loop_idc; u32 sdb_offset; u32 odb_offset; - dma_addr_t tptr_dma_addr; /* only for hw v1 */ - u32 tptr_size; /* only for hw v1 */ const struct hns_roce_hw *hw; void *priv; struct workqueue_struct *irq_workq; @@ -996,6 +949,7 @@ struct hns_roce_dev { u32 func_num; u32 is_vf; u32 cong_algo_tmpl_id; + u64 dwqe_page; }; static inline struct hns_roce_dev *to_hr_dev(struct ib_device *ib_dev) @@ -1049,6 +1003,12 @@ static inline struct hns_roce_srq *to_hr_srq(struct ib_srq *ibsrq) return container_of(ibsrq, struct hns_roce_srq, ibsrq); } +static inline struct hns_user_mmap_entry * +to_hns_mmap(struct rdma_user_mmap_entry *rdma_entry) +{ + return container_of(rdma_entry, struct hns_user_mmap_entry, rdma_entry); +} + static inline void hns_roce_write64_k(__le32 val[2], void __iomem *dest) { writeq(*(u64 *)val, dest); @@ -1138,7 +1098,7 @@ void hns_roce_cmd_use_polling(struct hns_roce_dev *hr_dev); /* hns roce hw need current block and next block addr from mtt */ #define MTT_MIN_COUNT 2 int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, - int offset, u64 *mtt_buf, int mtt_max, u64 *base_addr); + u32 offset, u64 *mtt_buf, int mtt_max, u64 *base_addr); int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, struct hns_roce_buf_attr *buf_attr, unsigned int page_shift, struct ib_udata *udata, @@ -1259,4 +1219,8 @@ int hns_roce_init(struct hns_roce_dev *hr_dev); void hns_roce_exit(struct hns_roce_dev *hr_dev); int hns_roce_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ib_cq); +struct hns_user_mmap_entry * +hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address, + size_t length, + enum hns_roce_mmap_type mmap_type); #endif /* _HNS_ROCE_DEVICE_H */ diff --git a/drivers/infiniband/hw/hns/hns_roce_hem.c b/drivers/infiniband/hw/hns/hns_roce_hem.c index fa15d79eabb3..8917365cc6b8 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hem.c +++ b/drivers/infiniband/hw/hns/hns_roce_hem.c @@ -31,7 +31,6 @@ * SOFTWARE. */ -#include <linux/platform_device.h> #include "hns_roce_device.h" #include "hns_roce_hem.h" #include "hns_roce_common.h" diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c b/drivers/infiniband/hw/hns/hns_roce_hw_v1.c deleted file mode 100644 index e0f59b8d7d5d..000000000000 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.c +++ /dev/null @@ -1,4675 +0,0 @@ -/* - * Copyright (c) 2016 Hisilicon Limited. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#include <linux/platform_device.h> -#include <linux/acpi.h> -#include <linux/etherdevice.h> -#include <linux/interrupt.h> -#include <linux/of.h> -#include <linux/of_platform.h> -#include <rdma/ib_umem.h> -#include "hns_roce_common.h" -#include "hns_roce_device.h" -#include "hns_roce_cmd.h" -#include "hns_roce_hem.h" -#include "hns_roce_hw_v1.h" - -/** - * hns_get_gid_index - Get gid index. - * @hr_dev: pointer to structure hns_roce_dev. - * @port: port, value range: 0 ~ MAX - * @gid_index: gid_index, value range: 0 ~ MAX - * Description: - * N ports shared gids, allocation method as follow: - * GID[0][0], GID[1][0],.....GID[N - 1][0], - * GID[0][0], GID[1][0],.....GID[N - 1][0], - * And so on - */ -u8 hns_get_gid_index(struct hns_roce_dev *hr_dev, u32 port, int gid_index) -{ - return gid_index * hr_dev->caps.num_ports + port; -} - -static void set_data_seg(struct hns_roce_wqe_data_seg *dseg, struct ib_sge *sg) -{ - dseg->lkey = cpu_to_le32(sg->lkey); - dseg->addr = cpu_to_le64(sg->addr); - dseg->len = cpu_to_le32(sg->length); -} - -static void set_raddr_seg(struct hns_roce_wqe_raddr_seg *rseg, u64 remote_addr, - u32 rkey) -{ - rseg->raddr = cpu_to_le64(remote_addr); - rseg->rkey = cpu_to_le32(rkey); - rseg->len = 0; -} - -static int hns_roce_v1_post_send(struct ib_qp *ibqp, - const struct ib_send_wr *wr, - const struct ib_send_wr **bad_wr) -{ - struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); - struct hns_roce_ah *ah = to_hr_ah(ud_wr(wr)->ah); - struct hns_roce_ud_send_wqe *ud_sq_wqe = NULL; - struct hns_roce_wqe_ctrl_seg *ctrl = NULL; - struct hns_roce_wqe_data_seg *dseg = NULL; - struct hns_roce_qp *qp = to_hr_qp(ibqp); - struct device *dev = &hr_dev->pdev->dev; - struct hns_roce_sq_db sq_db = {}; - int ps_opcode, i; - unsigned long flags = 0; - void *wqe = NULL; - __le32 doorbell[2]; - int ret = 0; - int loopback; - u32 wqe_idx; - int nreq; - u8 *smac; - - if (unlikely(ibqp->qp_type != IB_QPT_GSI && - ibqp->qp_type != IB_QPT_RC)) { - dev_err(dev, "un-supported QP type\n"); - *bad_wr = NULL; - return -EOPNOTSUPP; - } - - spin_lock_irqsave(&qp->sq.lock, flags); - - for (nreq = 0; wr; ++nreq, wr = wr->next) { - if (hns_roce_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { - ret = -ENOMEM; - *bad_wr = wr; - goto out; - } - - wqe_idx = (qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1); - - if (unlikely(wr->num_sge > qp->sq.max_gs)) { - dev_err(dev, "num_sge=%d > qp->sq.max_gs=%d\n", - wr->num_sge, qp->sq.max_gs); - ret = -EINVAL; - *bad_wr = wr; - goto out; - } - - wqe = hns_roce_get_send_wqe(qp, wqe_idx); - qp->sq.wrid[wqe_idx] = wr->wr_id; - - /* Corresponding to the RC and RD type wqe process separately */ - if (ibqp->qp_type == IB_QPT_GSI) { - ud_sq_wqe = wqe; - roce_set_field(ud_sq_wqe->dmac_h, - UD_SEND_WQE_U32_4_DMAC_0_M, - UD_SEND_WQE_U32_4_DMAC_0_S, - ah->av.mac[0]); - roce_set_field(ud_sq_wqe->dmac_h, - UD_SEND_WQE_U32_4_DMAC_1_M, - UD_SEND_WQE_U32_4_DMAC_1_S, - ah->av.mac[1]); - roce_set_field(ud_sq_wqe->dmac_h, - UD_SEND_WQE_U32_4_DMAC_2_M, - UD_SEND_WQE_U32_4_DMAC_2_S, - ah->av.mac[2]); - roce_set_field(ud_sq_wqe->dmac_h, - UD_SEND_WQE_U32_4_DMAC_3_M, - UD_SEND_WQE_U32_4_DMAC_3_S, - ah->av.mac[3]); - - roce_set_field(ud_sq_wqe->u32_8, - UD_SEND_WQE_U32_8_DMAC_4_M, - UD_SEND_WQE_U32_8_DMAC_4_S, - ah->av.mac[4]); - roce_set_field(ud_sq_wqe->u32_8, - UD_SEND_WQE_U32_8_DMAC_5_M, - UD_SEND_WQE_U32_8_DMAC_5_S, - ah->av.mac[5]); - - smac = (u8 *)hr_dev->dev_addr[qp->port]; - loopback = ether_addr_equal_unaligned(ah->av.mac, - smac) ? 1 : 0; - roce_set_bit(ud_sq_wqe->u32_8, - UD_SEND_WQE_U32_8_LOOPBACK_INDICATOR_S, - loopback); - - roce_set_field(ud_sq_wqe->u32_8, - UD_SEND_WQE_U32_8_OPERATION_TYPE_M, - UD_SEND_WQE_U32_8_OPERATION_TYPE_S, - HNS_ROCE_WQE_OPCODE_SEND); - roce_set_field(ud_sq_wqe->u32_8, - UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_M, - UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_S, - 2); - roce_set_bit(ud_sq_wqe->u32_8, - UD_SEND_WQE_U32_8_SEND_GL_ROUTING_HDR_FLAG_S, - 1); - - ud_sq_wqe->u32_8 |= (wr->send_flags & IB_SEND_SIGNALED ? - cpu_to_le32(HNS_ROCE_WQE_CQ_NOTIFY) : 0) | - (wr->send_flags & IB_SEND_SOLICITED ? - cpu_to_le32(HNS_ROCE_WQE_SE) : 0) | - ((wr->opcode == IB_WR_SEND_WITH_IMM) ? - cpu_to_le32(HNS_ROCE_WQE_IMM) : 0); - - roce_set_field(ud_sq_wqe->u32_16, - UD_SEND_WQE_U32_16_DEST_QP_M, - UD_SEND_WQE_U32_16_DEST_QP_S, - ud_wr(wr)->remote_qpn); - roce_set_field(ud_sq_wqe->u32_16, - UD_SEND_WQE_U32_16_MAX_STATIC_RATE_M, - UD_SEND_WQE_U32_16_MAX_STATIC_RATE_S, - ah->av.stat_rate); - - roce_set_field(ud_sq_wqe->u32_36, - UD_SEND_WQE_U32_36_FLOW_LABEL_M, - UD_SEND_WQE_U32_36_FLOW_LABEL_S, - ah->av.flowlabel); - roce_set_field(ud_sq_wqe->u32_36, - UD_SEND_WQE_U32_36_PRIORITY_M, - UD_SEND_WQE_U32_36_PRIORITY_S, - ah->av.sl); - roce_set_field(ud_sq_wqe->u32_36, - UD_SEND_WQE_U32_36_SGID_INDEX_M, - UD_SEND_WQE_U32_36_SGID_INDEX_S, - hns_get_gid_index(hr_dev, qp->phy_port, - ah->av.gid_index)); - - roce_set_field(ud_sq_wqe->u32_40, - UD_SEND_WQE_U32_40_HOP_LIMIT_M, - UD_SEND_WQE_U32_40_HOP_LIMIT_S, - ah->av.hop_limit); - roce_set_field(ud_sq_wqe->u32_40, - UD_SEND_WQE_U32_40_TRAFFIC_CLASS_M, - UD_SEND_WQE_U32_40_TRAFFIC_CLASS_S, - ah->av.tclass); - - memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0], GID_LEN); - - ud_sq_wqe->va0_l = - cpu_to_le32((u32)wr->sg_list[0].addr); - ud_sq_wqe->va0_h = - cpu_to_le32((wr->sg_list[0].addr) >> 32); - ud_sq_wqe->l_key0 = - cpu_to_le32(wr->sg_list[0].lkey); - - ud_sq_wqe->va1_l = - cpu_to_le32((u32)wr->sg_list[1].addr); - ud_sq_wqe->va1_h = - cpu_to_le32((wr->sg_list[1].addr) >> 32); - ud_sq_wqe->l_key1 = - cpu_to_le32(wr->sg_list[1].lkey); - } else if (ibqp->qp_type == IB_QPT_RC) { - u32 tmp_len = 0; - - ctrl = wqe; - memset(ctrl, 0, sizeof(struct hns_roce_wqe_ctrl_seg)); - for (i = 0; i < wr->num_sge; i++) - tmp_len += wr->sg_list[i].length; - - ctrl->msg_length = - cpu_to_le32(le32_to_cpu(ctrl->msg_length) + tmp_len); - - ctrl->sgl_pa_h = 0; - ctrl->flag = 0; - - switch (wr->opcode) { - case IB_WR_SEND_WITH_IMM: - case IB_WR_RDMA_WRITE_WITH_IMM: - ctrl->imm_data = wr->ex.imm_data; - break; - case IB_WR_SEND_WITH_INV: - ctrl->inv_key = - cpu_to_le32(wr->ex.invalidate_rkey); - break; - default: - ctrl->imm_data = 0; - break; - } - - /* Ctrl field, ctrl set type: sig, solic, imm, fence */ - /* SO wait for conforming application scenarios */ - ctrl->flag |= (wr->send_flags & IB_SEND_SIGNALED ? - cpu_to_le32(HNS_ROCE_WQE_CQ_NOTIFY) : 0) | - (wr->send_flags & IB_SEND_SOLICITED ? - cpu_to_le32(HNS_ROCE_WQE_SE) : 0) | - ((wr->opcode == IB_WR_SEND_WITH_IMM || - wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM) ? - cpu_to_le32(HNS_ROCE_WQE_IMM) : 0) | - (wr->send_flags & IB_SEND_FENCE ? - (cpu_to_le32(HNS_ROCE_WQE_FENCE)) : 0); - - wqe += sizeof(struct hns_roce_wqe_ctrl_seg); - - switch (wr->opcode) { - case IB_WR_RDMA_READ: - ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_READ; - set_raddr_seg(wqe, rdma_wr(wr)->remote_addr, - rdma_wr(wr)->rkey); - break; - case IB_WR_RDMA_WRITE: - case IB_WR_RDMA_WRITE_WITH_IMM: - ps_opcode = HNS_ROCE_WQE_OPCODE_RDMA_WRITE; - set_raddr_seg(wqe, rdma_wr(wr)->remote_addr, - rdma_wr(wr)->rkey); - break; - case IB_WR_SEND: - case IB_WR_SEND_WITH_INV: - case IB_WR_SEND_WITH_IMM: - ps_opcode = HNS_ROCE_WQE_OPCODE_SEND; - break; - case IB_WR_LOCAL_INV: - case IB_WR_ATOMIC_CMP_AND_SWP: - case IB_WR_ATOMIC_FETCH_AND_ADD: - case IB_WR_LSO: - default: - ps_opcode = HNS_ROCE_WQE_OPCODE_MASK; - break; - } - ctrl->flag |= cpu_to_le32(ps_opcode); - wqe += sizeof(struct hns_roce_wqe_raddr_seg); - - dseg = wqe; - if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) { - if (le32_to_cpu(ctrl->msg_length) > - hr_dev->caps.max_sq_inline) { - ret = -EINVAL; - *bad_wr = wr; - dev_err(dev, "inline len(1-%d)=%d, illegal", - le32_to_cpu(ctrl->msg_length), - hr_dev->caps.max_sq_inline); - goto out; - } - for (i = 0; i < wr->num_sge; i++) { - memcpy(wqe, ((void *) (uintptr_t) - wr->sg_list[i].addr), - wr->sg_list[i].length); - wqe += wr->sg_list[i].length; - } - ctrl->flag |= cpu_to_le32(HNS_ROCE_WQE_INLINE); - } else { - /* sqe num is two */ - for (i = 0; i < wr->num_sge; i++) - set_data_seg(dseg + i, wr->sg_list + i); - - ctrl->flag |= cpu_to_le32(wr->num_sge << - HNS_ROCE_WQE_SGE_NUM_BIT); - } - } - } - -out: - /* Set DB return */ - if (likely(nreq)) { - qp->sq.head += nreq; - - roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_SQ_HEAD_M, - SQ_DOORBELL_U32_4_SQ_HEAD_S, - (qp->sq.head & ((qp->sq.wqe_cnt << 1) - 1))); - roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_SL_M, - SQ_DOORBELL_U32_4_SL_S, qp->sl); - roce_set_field(sq_db.u32_4, SQ_DOORBELL_U32_4_PORT_M, - SQ_DOORBELL_U32_4_PORT_S, qp->phy_port); - roce_set_field(sq_db.u32_8, SQ_DOORBELL_U32_8_QPN_M, - SQ_DOORBELL_U32_8_QPN_S, qp->doorbell_qpn); - roce_set_bit(sq_db.u32_8, SQ_DOORBELL_HW_SYNC_S, 1); - - doorbell[0] = sq_db.u32_4; - doorbell[1] = sq_db.u32_8; - - hns_roce_write64_k(doorbell, qp->sq.db_reg); - } - - spin_unlock_irqrestore(&qp->sq.lock, flags); - - return ret; -} - -static int hns_roce_v1_post_recv(struct ib_qp *ibqp, - const struct ib_recv_wr *wr, - const struct ib_recv_wr **bad_wr) -{ - struct hns_roce_rq_wqe_ctrl *ctrl = NULL; - struct hns_roce_wqe_data_seg *scat = NULL; - struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); - struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); - struct device *dev = &hr_dev->pdev->dev; - struct hns_roce_rq_db rq_db = {}; - __le32 doorbell[2] = {0}; - unsigned long flags = 0; - unsigned int wqe_idx; - int ret = 0; - int nreq; - int i; - u32 reg_val; - - spin_lock_irqsave(&hr_qp->rq.lock, flags); - - for (nreq = 0; wr; ++nreq, wr = wr->next) { - if (hns_roce_wq_overflow(&hr_qp->rq, nreq, - hr_qp->ibqp.recv_cq)) { - ret = -ENOMEM; - *bad_wr = wr; - goto out; - } - - wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1); - - if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) { - dev_err(dev, "rq:num_sge=%d > qp->sq.max_gs=%d\n", - wr->num_sge, hr_qp->rq.max_gs); - ret = -EINVAL; - *bad_wr = wr; - goto out; - } - - ctrl = hns_roce_get_recv_wqe(hr_qp, wqe_idx); - - roce_set_field(ctrl->rwqe_byte_12, - RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_M, - RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_S, - wr->num_sge); - - scat = (struct hns_roce_wqe_data_seg *)(ctrl + 1); - - for (i = 0; i < wr->num_sge; i++) - set_data_seg(scat + i, wr->sg_list + i); - - hr_qp->rq.wrid[wqe_idx] = wr->wr_id; - } - -out: - if (likely(nreq)) { - hr_qp->rq.head += nreq; - - if (ibqp->qp_type == IB_QPT_GSI) { - __le32 tmp; - - /* SW update GSI rq header */ - reg_val = roce_read(to_hr_dev(ibqp->device), - ROCEE_QP1C_CFG3_0_REG + - QP1C_CFGN_OFFSET * hr_qp->phy_port); - tmp = cpu_to_le32(reg_val); - roce_set_field(tmp, - ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_M, - ROCEE_QP1C_CFG3_0_ROCEE_QP1C_RQ_HEAD_S, - hr_qp->rq.head); - reg_val = le32_to_cpu(tmp); - roce_write(to_hr_dev(ibqp->device), - ROCEE_QP1C_CFG3_0_REG + - QP1C_CFGN_OFFSET * hr_qp->phy_port, reg_val); - } else { - roce_set_field(rq_db.u32_4, RQ_DOORBELL_U32_4_RQ_HEAD_M, - RQ_DOORBELL_U32_4_RQ_HEAD_S, - hr_qp->rq.head); - roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_QPN_M, - RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn); - roce_set_field(rq_db.u32_8, RQ_DOORBELL_U32_8_CMD_M, - RQ_DOORBELL_U32_8_CMD_S, 1); - roce_set_bit(rq_db.u32_8, RQ_DOORBELL_U32_8_HW_SYNC_S, - 1); - - doorbell[0] = rq_db.u32_4; - doorbell[1] = rq_db.u32_8; - - hns_roce_write64_k(doorbell, hr_qp->rq.db_reg); - } - } - spin_unlock_irqrestore(&hr_qp->rq.lock, flags); - - return ret; -} - -static void hns_roce_set_db_event_mode(struct hns_roce_dev *hr_dev, - int sdb_mode, int odb_mode) -{ - __le32 tmp; - u32 val; - - val = roce_read(hr_dev, ROCEE_GLB_CFG_REG); - tmp = cpu_to_le32(val); - roce_set_bit(tmp, ROCEE_GLB_CFG_ROCEE_DB_SQ_MODE_S, sdb_mode); - roce_set_bit(tmp, ROCEE_GLB_CFG_ROCEE_DB_OTH_MODE_S, odb_mode); - val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_GLB_CFG_REG, val); -} - -static int hns_roce_v1_set_hem(struct hns_roce_dev *hr_dev, - struct hns_roce_hem_table *table, int obj, - int step_idx) -{ - spinlock_t *lock = &hr_dev->bt_cmd_lock; - struct device *dev = hr_dev->dev; - struct hns_roce_hem_iter iter; - void __iomem *bt_cmd; - __le32 bt_cmd_val[2]; - __le32 bt_cmd_h = 0; - unsigned long flags; - __le32 bt_cmd_l; - int ret = 0; - u64 bt_ba; - long end; - - /* Find the HEM(Hardware Entry Memory) entry */ - unsigned long i = obj / (table->table_chunk_size / table->obj_size); - - switch (table->type) { - case HEM_TYPE_QPC: - case HEM_TYPE_MTPT: - case HEM_TYPE_CQC: - case HEM_TYPE_SRQC: - roce_set_field(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M, - ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, table->type); - break; - default: - return ret; - } - - roce_set_field(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M, - ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj); - roce_set_bit(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0); - roce_set_bit(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1); - - /* Currently iter only a chunk */ - for (hns_roce_hem_first(table->hem[i], &iter); - !hns_roce_hem_last(&iter); hns_roce_hem_next(&iter)) { - bt_ba = hns_roce_hem_addr(&iter) >> HNS_HW_PAGE_SHIFT; - - spin_lock_irqsave(lock, flags); - - bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG; - - end = HW_SYNC_TIMEOUT_MSECS; - while (end > 0) { - if (!(readl(bt_cmd) >> BT_CMD_SYNC_SHIFT)) - break; - - mdelay(HW_SYNC_SLEEP_TIME_INTERVAL); - end -= HW_SYNC_SLEEP_TIME_INTERVAL; - } - - if (end <= 0) { - dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n"); - spin_unlock_irqrestore(lock, flags); - return -EBUSY; - } - - bt_cmd_l = cpu_to_le32(bt_ba); - roce_set_field(bt_cmd_h, ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M, - ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S, - upper_32_bits(bt_ba)); - - bt_cmd_val[0] = bt_cmd_l; - bt_cmd_val[1] = bt_cmd_h; - hns_roce_write64_k(bt_cmd_val, - hr_dev->reg_base + ROCEE_BT_CMD_L_REG); - spin_unlock_irqrestore(lock, flags); - } - - return ret; -} - -static void hns_roce_set_db_ext_mode(struct hns_roce_dev *hr_dev, u32 sdb_mode, - u32 odb_mode) -{ - __le32 tmp; - u32 val; - - /* Configure SDB/ODB extend mode */ - val = roce_read(hr_dev, ROCEE_GLB_CFG_REG); - tmp = cpu_to_le32(val); - roce_set_bit(tmp, ROCEE_GLB_CFG_SQ_EXT_DB_MODE_S, sdb_mode); - roce_set_bit(tmp, ROCEE_GLB_CFG_OTH_EXT_DB_MODE_S, odb_mode); - val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_GLB_CFG_REG, val); -} - -static void hns_roce_set_sdb(struct hns_roce_dev *hr_dev, u32 sdb_alept, - u32 sdb_alful) -{ - __le32 tmp; - u32 val; - - /* Configure SDB */ - val = roce_read(hr_dev, ROCEE_DB_SQ_WL_REG); - tmp = cpu_to_le32(val); - roce_set_field(tmp, ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_M, - ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_S, sdb_alful); - roce_set_field(tmp, ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_M, - ROCEE_DB_SQ_WL_ROCEE_DB_SQ_WL_EMPTY_S, sdb_alept); - val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_DB_SQ_WL_REG, val); -} - -static void hns_roce_set_odb(struct hns_roce_dev *hr_dev, u32 odb_alept, - u32 odb_alful) -{ - __le32 tmp; - u32 val; - - /* Configure ODB */ - val = roce_read(hr_dev, ROCEE_DB_OTHERS_WL_REG); - tmp = cpu_to_le32(val); - roce_set_field(tmp, ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_M, - ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_S, odb_alful); - roce_set_field(tmp, ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_M, - ROCEE_DB_OTHERS_WL_ROCEE_DB_OTH_WL_EMPTY_S, odb_alept); - val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_DB_OTHERS_WL_REG, val); -} - -static void hns_roce_set_sdb_ext(struct hns_roce_dev *hr_dev, u32 ext_sdb_alept, - u32 ext_sdb_alful) -{ - struct hns_roce_v1_priv *priv = hr_dev->priv; - struct hns_roce_db_table *db = &priv->db_table; - struct device *dev = &hr_dev->pdev->dev; - dma_addr_t sdb_dma_addr; - __le32 tmp; - u32 val; - - /* Configure extend SDB threshold */ - roce_write(hr_dev, ROCEE_EXT_DB_SQ_WL_EMPTY_REG, ext_sdb_alept); - roce_write(hr_dev, ROCEE_EXT_DB_SQ_WL_REG, ext_sdb_alful); - - /* Configure extend SDB base addr */ - sdb_dma_addr = db->ext_db->sdb_buf_list->map; - roce_write(hr_dev, ROCEE_EXT_DB_SQ_REG, (u32)(sdb_dma_addr >> 12)); - - /* Configure extend SDB depth */ - val = roce_read(hr_dev, ROCEE_EXT_DB_SQ_H_REG); - tmp = cpu_to_le32(val); - roce_set_field(tmp, ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_M, - ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_SHIFT_S, - db->ext_db->esdb_dep); - /* - * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of - * using 4K page, and shift more 32 because of - * calculating the high 32 bit value evaluated to hardware. - */ - roce_set_field(tmp, ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_M, - ROCEE_EXT_DB_SQ_H_EXT_DB_SQ_BA_H_S, sdb_dma_addr >> 44); - val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_EXT_DB_SQ_H_REG, val); - - dev_dbg(dev, "ext SDB depth: 0x%x\n", db->ext_db->esdb_dep); - dev_dbg(dev, "ext SDB threshold: empty: 0x%x, ful: 0x%x\n", - ext_sdb_alept, ext_sdb_alful); -} - -static void hns_roce_set_odb_ext(struct hns_roce_dev *hr_dev, u32 ext_odb_alept, - u32 ext_odb_alful) -{ - struct hns_roce_v1_priv *priv = hr_dev->priv; - struct hns_roce_db_table *db = &priv->db_table; - struct device *dev = &hr_dev->pdev->dev; - dma_addr_t odb_dma_addr; - __le32 tmp; - u32 val; - - /* Configure extend ODB threshold */ - roce_write(hr_dev, ROCEE_EXT_DB_OTHERS_WL_EMPTY_REG, ext_odb_alept); - roce_write(hr_dev, ROCEE_EXT_DB_OTHERS_WL_REG, ext_odb_alful); - - /* Configure extend ODB base addr */ - odb_dma_addr = db->ext_db->odb_buf_list->map; - roce_write(hr_dev, ROCEE_EXT_DB_OTH_REG, (u32)(odb_dma_addr >> 12)); - - /* Configure extend ODB depth */ - val = roce_read(hr_dev, ROCEE_EXT_DB_OTH_H_REG); - tmp = cpu_to_le32(val); - roce_set_field(tmp, ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_M, - ROCEE_EXT_DB_OTH_H_EXT_DB_OTH_SHIFT_S, - db->ext_db->eodb_dep); - roce_set_field(tmp, ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_M, - ROCEE_EXT_DB_SQ_H_EXT_DB_OTH_BA_H_S, - db->ext_db->eodb_dep); - val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_EXT_DB_OTH_H_REG, val); - - dev_dbg(dev, "ext ODB depth: 0x%x\n", db->ext_db->eodb_dep); - dev_dbg(dev, "ext ODB threshold: empty: 0x%x, ful: 0x%x\n", - ext_odb_alept, ext_odb_alful); -} - -static int hns_roce_db_ext_init(struct hns_roce_dev *hr_dev, u32 sdb_ext_mod, - u32 odb_ext_mod) -{ - struct hns_roce_v1_priv *priv = hr_dev->priv; - struct hns_roce_db_table *db = &priv->db_table; - struct device *dev = &hr_dev->pdev->dev; - dma_addr_t sdb_dma_addr; - dma_addr_t odb_dma_addr; - int ret = 0; - - db->ext_db = kmalloc(sizeof(*db->ext_db), GFP_KERNEL); - if (!db->ext_db) - return -ENOMEM; - - if (sdb_ext_mod) { - db->ext_db->sdb_buf_list = kmalloc( - sizeof(*db->ext_db->sdb_buf_list), GFP_KERNEL); - if (!db->ext_db->sdb_buf_list) { - ret = -ENOMEM; - goto ext_sdb_buf_fail_out; - } - - db->ext_db->sdb_buf_list->buf = dma_alloc_coherent(dev, - HNS_ROCE_V1_EXT_SDB_SIZE, - &sdb_dma_addr, GFP_KERNEL); - if (!db->ext_db->sdb_buf_list->buf) { - ret = -ENOMEM; - goto alloc_sq_db_buf_fail; - } - db->ext_db->sdb_buf_list->map = sdb_dma_addr; - - db->ext_db->esdb_dep = ilog2(HNS_ROCE_V1_EXT_SDB_DEPTH); - hns_roce_set_sdb_ext(hr_dev, HNS_ROCE_V1_EXT_SDB_ALEPT, - HNS_ROCE_V1_EXT_SDB_ALFUL); - } else - hns_roce_set_sdb(hr_dev, HNS_ROCE_V1_SDB_ALEPT, - HNS_ROCE_V1_SDB_ALFUL); - - if (odb_ext_mod) { - db->ext_db->odb_buf_list = kmalloc( - sizeof(*db->ext_db->odb_buf_list), GFP_KERNEL); - if (!db->ext_db->odb_buf_list) { - ret = -ENOMEM; - goto ext_odb_buf_fail_out; - } - - db->ext_db->odb_buf_list->buf = dma_alloc_coherent(dev, - HNS_ROCE_V1_EXT_ODB_SIZE, - &odb_dma_addr, GFP_KERNEL); - if (!db->ext_db->odb_buf_list->buf) { - ret = -ENOMEM; - goto alloc_otr_db_buf_fail; - } - db->ext_db->odb_buf_list->map = odb_dma_addr; - - db->ext_db->eodb_dep = ilog2(HNS_ROCE_V1_EXT_ODB_DEPTH); - hns_roce_set_odb_ext(hr_dev, HNS_ROCE_V1_EXT_ODB_ALEPT, - HNS_ROCE_V1_EXT_ODB_ALFUL); - } else - hns_roce_set_odb(hr_dev, HNS_ROCE_V1_ODB_ALEPT, - HNS_ROCE_V1_ODB_ALFUL); - - hns_roce_set_db_ext_mode(hr_dev, sdb_ext_mod, odb_ext_mod); - - return 0; - -alloc_otr_db_buf_fail: - kfree(db->ext_db->odb_buf_list); - -ext_odb_buf_fail_out: - if (sdb_ext_mod) { - dma_free_coherent(dev, HNS_ROCE_V1_EXT_SDB_SIZE, - db->ext_db->sdb_buf_list->buf, - db->ext_db->sdb_buf_list->map); - } - -alloc_sq_db_buf_fail: - if (sdb_ext_mod) - kfree(db->ext_db->sdb_buf_list); - -ext_sdb_buf_fail_out: - kfree(db->ext_db); - return ret; -} - -static struct hns_roce_qp *hns_roce_v1_create_lp_qp(struct hns_roce_dev *hr_dev, - struct ib_pd *pd) -{ - struct device *dev = &hr_dev->pdev->dev; - struct ib_qp_init_attr init_attr; - struct ib_qp *qp; - - memset(&init_attr, 0, sizeof(struct ib_qp_init_attr)); - init_attr.qp_type = IB_QPT_RC; - init_attr.sq_sig_type = IB_SIGNAL_ALL_WR; - init_attr.cap.max_recv_wr = HNS_ROCE_MIN_WQE_NUM; - init_attr.cap.max_send_wr = HNS_ROCE_MIN_WQE_NUM; - - qp = ib_create_qp(pd, &init_attr); - if (IS_ERR(qp)) { - dev_err(dev, "Create loop qp for mr free failed!"); - return NULL; - } - - return to_hr_qp(qp); -} - -static int hns_roce_v1_rsv_lp_qp(struct hns_roce_dev *hr_dev) -{ - struct hns_roce_v1_priv *priv = hr_dev->priv; - struct hns_roce_free_mr *free_mr = &priv->free_mr; - struct hns_roce_caps *caps = &hr_dev->caps; - struct ib_device *ibdev = &hr_dev->ib_dev; - struct device *dev = &hr_dev->pdev->dev; - struct ib_cq_init_attr cq_init_attr; - struct ib_qp_attr attr = { 0 }; - struct hns_roce_qp *hr_qp; - struct ib_cq *cq; - struct ib_pd *pd; - union ib_gid dgid; - __be64 subnet_prefix; - int attr_mask = 0; - int ret; - int i, j; - u8 queue_en[HNS_ROCE_V1_RESV_QP] = { 0 }; - u8 phy_port; - u32 port = 0; - u8 sl; - - /* Reserved cq for loop qp */ - cq_init_attr.cqe = HNS_ROCE_MIN_WQE_NUM * 2; - cq_init_attr.comp_vector = 0; - - cq = rdma_zalloc_drv_obj(ibdev, ib_cq); - if (!cq) - return -ENOMEM; - - ret = hns_roce_create_cq(cq, &cq_init_attr, NULL); - if (ret) { - dev_err(dev, "Create cq for reserved loop qp failed!"); - goto alloc_cq_failed; - } - free_mr->mr_free_cq = to_hr_cq(cq); - free_mr->mr_free_cq->ib_cq.device = &hr_dev->ib_dev; - free_mr->mr_free_cq->ib_cq.uobject = NULL; - free_mr->mr_free_cq->ib_cq.comp_handler = NULL; - free_mr->mr_free_cq->ib_cq.event_handler = NULL; - free_mr->mr_free_cq->ib_cq.cq_context = NULL; - atomic_set(&free_mr->mr_free_cq->ib_cq.usecnt, 0); - - pd = rdma_zalloc_drv_obj(ibdev, ib_pd); - if (!pd) { - ret = -ENOMEM; - goto alloc_mem_failed; - } - - pd->device = ibdev; - ret = hns_roce_alloc_pd(pd, NULL); - if (ret) - goto alloc_pd_failed; - - free_mr->mr_free_pd = to_hr_pd(pd); - free_mr->mr_free_pd->ibpd.device = &hr_dev->ib_dev; - free_mr->mr_free_pd->ibpd.uobject = NULL; - free_mr->mr_free_pd->ibpd.__internal_mr = NULL; - atomic_set(&free_mr->mr_free_pd->ibpd.usecnt, 0); - - attr.qp_access_flags = IB_ACCESS_REMOTE_WRITE; - attr.pkey_index = 0; - attr.min_rnr_timer = 0; - /* Disable read ability */ - attr.max_dest_rd_atomic = 0; - attr.max_rd_atomic = 0; - /* Use arbitrary values as rq_psn and sq_psn */ - attr.rq_psn = 0x0808; - attr.sq_psn = 0x0808; - attr.retry_cnt = 7; - attr.rnr_retry = 7; - attr.timeout = 0x12; - attr.path_mtu = IB_MTU_256; - attr.ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE; - rdma_ah_set_grh(&attr.ah_attr, NULL, 0, 0, 1, 0); - rdma_ah_set_static_rate(&attr.ah_attr, 3); - - subnet_prefix = cpu_to_be64(0xfe80000000000000LL); - for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) { - phy_port = (i >= HNS_ROCE_MAX_PORTS) ? (i - 2) : - (i % HNS_ROCE_MAX_PORTS); - sl = i / HNS_ROCE_MAX_PORTS; - - for (j = 0; j < caps->num_ports; j++) { - if (hr_dev->iboe.phy_port[j] == phy_port) { - queue_en[i] = 1; - port = j; - break; - } - } - - if (!queue_en[i]) - continue; - - free_mr->mr_free_qp[i] = hns_roce_v1_create_lp_qp(hr_dev, pd); - if (!free_mr->mr_free_qp[i]) { - dev_err(dev, "Create loop qp failed!\n"); - ret = -ENOMEM; - goto create_lp_qp_failed; - } - hr_qp = free_mr->mr_free_qp[i]; - - hr_qp->port = port; - hr_qp->phy_port = phy_port; - hr_qp->ibqp.qp_type = IB_QPT_RC; - hr_qp->ibqp.device = &hr_dev->ib_dev; - hr_qp->ibqp.uobject = NULL; - atomic_set(&hr_qp->ibqp.usecnt, 0); - hr_qp->ibqp.pd = pd; - hr_qp->ibqp.recv_cq = cq; - hr_qp->ibqp.send_cq = cq; - - rdma_ah_set_port_num(&attr.ah_attr, port + 1); - rdma_ah_set_sl(&attr.ah_attr, sl); - attr.port_num = port + 1; - - attr.dest_qp_num = hr_qp->qpn; - memcpy(rdma_ah_retrieve_dmac(&attr.ah_attr), - hr_dev->dev_addr[port], - ETH_ALEN); - - memcpy(&dgid.raw, &subnet_prefix, sizeof(u64)); - memcpy(&dgid.raw[8], hr_dev->dev_addr[port], 3); - memcpy(&dgid.raw[13], hr_dev->dev_addr[port] + 3, 3); - dgid.raw[11] = 0xff; - dgid.raw[12] = 0xfe; - dgid.raw[8] ^= 2; - rdma_ah_set_dgid_raw(&attr.ah_attr, dgid.raw); - - ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask, - IB_QPS_RESET, IB_QPS_INIT); - if (ret) { - dev_err(dev, "modify qp failed(%d)!\n", ret); - goto create_lp_qp_failed; - } - - ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, IB_QP_DEST_QPN, - IB_QPS_INIT, IB_QPS_RTR); - if (ret) { - dev_err(dev, "modify qp failed(%d)!\n", ret); - goto create_lp_qp_failed; - } - - ret = hr_dev->hw->modify_qp(&hr_qp->ibqp, &attr, attr_mask, - IB_QPS_RTR, IB_QPS_RTS); - if (ret) { - dev_err(dev, "modify qp failed(%d)!\n", ret); - goto create_lp_qp_failed; - } - } - - return 0; - -create_lp_qp_failed: - for (i -= 1; i >= 0; i--) { - hr_qp = free_mr->mr_free_qp[i]; - if (ib_destroy_qp(&hr_qp->ibqp)) - dev_err(dev, "Destroy qp %d for mr free failed!\n", i); - } - - hns_roce_dealloc_pd(pd, NULL); - -alloc_pd_failed: - kfree(pd); - -alloc_mem_failed: - hns_roce_destroy_cq(cq, NULL); -alloc_cq_failed: - kfree(cq); - return ret; -} - -static void hns_roce_v1_release_lp_qp(struct hns_roce_dev *hr_dev) -{ - struct hns_roce_v1_priv *priv = hr_dev->priv; - struct hns_roce_free_mr *free_mr = &priv->free_mr; - struct device *dev = &hr_dev->pdev->dev; - struct hns_roce_qp *hr_qp; - int ret; - int i; - - for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) { - hr_qp = free_mr->mr_free_qp[i]; - if (!hr_qp) - continue; - - ret = ib_destroy_qp(&hr_qp->ibqp); - if (ret) - dev_err(dev, "Destroy qp %d for mr free failed(%d)!\n", - i, ret); - } - - hns_roce_destroy_cq(&free_mr->mr_free_cq->ib_cq, NULL); - kfree(&free_mr->mr_free_cq->ib_cq); - hns_roce_dealloc_pd(&free_mr->mr_free_pd->ibpd, NULL); - kfree(&free_mr->mr_free_pd->ibpd); -} - -static int hns_roce_db_init(struct hns_roce_dev *hr_dev) -{ - struct hns_roce_v1_priv *priv = hr_dev->priv; - struct hns_roce_db_table *db = &priv->db_table; - struct device *dev = &hr_dev->pdev->dev; - u32 sdb_ext_mod; - u32 odb_ext_mod; - u32 sdb_evt_mod; - u32 odb_evt_mod; - int ret; - - memset(db, 0, sizeof(*db)); - - /* Default DB mode */ - sdb_ext_mod = HNS_ROCE_SDB_EXTEND_MODE; - odb_ext_mod = HNS_ROCE_ODB_EXTEND_MODE; - sdb_evt_mod = HNS_ROCE_SDB_NORMAL_MODE; - odb_evt_mod = HNS_ROCE_ODB_POLL_MODE; - - db->sdb_ext_mod = sdb_ext_mod; - db->odb_ext_mod = odb_ext_mod; - - /* Init extend DB */ - ret = hns_roce_db_ext_init(hr_dev, sdb_ext_mod, odb_ext_mod); - if (ret) { - dev_err(dev, "Failed in extend DB configuration.\n"); - return ret; - } - - hns_roce_set_db_event_mode(hr_dev, sdb_evt_mod, odb_evt_mod); - - return 0; -} - -static void hns_roce_v1_recreate_lp_qp_work_fn(struct work_struct *work) -{ - struct hns_roce_recreate_lp_qp_work *lp_qp_work; - struct hns_roce_dev *hr_dev; - - lp_qp_work = container_of(work, struct hns_roce_recreate_lp_qp_work, - work); - hr_dev = to_hr_dev(lp_qp_work->ib_dev); - - hns_roce_v1_release_lp_qp(hr_dev); - - if (hns_roce_v1_rsv_lp_qp(hr_dev)) - dev_err(&hr_dev->pdev->dev, "create reserver qp failed\n"); - - if (lp_qp_work->comp_flag) - complete(lp_qp_work->comp); - - kfree(lp_qp_work); -} - -static int hns_roce_v1_recreate_lp_qp(struct hns_roce_dev *hr_dev) -{ - long end = HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS; - struct hns_roce_v1_priv *priv = hr_dev->priv; - struct hns_roce_free_mr *free_mr = &priv->free_mr; - struct hns_roce_recreate_lp_qp_work *lp_qp_work; - struct device *dev = &hr_dev->pdev->dev; - struct completion comp; - - lp_qp_work = kzalloc(sizeof(struct hns_roce_recreate_lp_qp_work), - GFP_KERNEL); - if (!lp_qp_work) - return -ENOMEM; - - INIT_WORK(&(lp_qp_work->work), hns_roce_v1_recreate_lp_qp_work_fn); - - lp_qp_work->ib_dev = &(hr_dev->ib_dev); - lp_qp_work->comp = ∁ - lp_qp_work->comp_flag = 1; - - init_completion(lp_qp_work->comp); - - queue_work(free_mr->free_mr_wq, &(lp_qp_work->work)); - - while (end > 0) { - if (try_wait_for_completion(&comp)) - return 0; - msleep(HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE); - end -= HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE; - } - - lp_qp_work->comp_flag = 0; - if (try_wait_for_completion(&comp)) - return 0; - - dev_warn(dev, "recreate lp qp failed 20s timeout and return failed!\n"); - return -ETIMEDOUT; -} - -static int hns_roce_v1_send_lp_wqe(struct hns_roce_qp *hr_qp) -{ - struct hns_roce_dev *hr_dev = to_hr_dev(hr_qp->ibqp.device); - struct device *dev = &hr_dev->pdev->dev; - struct ib_send_wr send_wr; - const struct ib_send_wr *bad_wr; - int ret; - - memset(&send_wr, 0, sizeof(send_wr)); - send_wr.next = NULL; - send_wr.num_sge = 0; - send_wr.send_flags = 0; - send_wr.sg_list = NULL; - send_wr.wr_id = (unsigned long long)&send_wr; - send_wr.opcode = IB_WR_RDMA_WRITE; - - ret = hns_roce_v1_post_send(&hr_qp->ibqp, &send_wr, &bad_wr); - if (ret) { - dev_err(dev, "Post write wqe for mr free failed(%d)!", ret); - return ret; - } - - return 0; -} - -static void hns_roce_v1_mr_free_work_fn(struct work_struct *work) -{ - unsigned long end = - msecs_to_jiffies(HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS) + jiffies; - struct hns_roce_mr_free_work *mr_work = - container_of(work, struct hns_roce_mr_free_work, work); - struct hns_roce_dev *hr_dev = to_hr_dev(mr_work->ib_dev); - struct hns_roce_v1_priv *priv = hr_dev->priv; - struct hns_roce_free_mr *free_mr = &priv->free_mr; - struct hns_roce_cq *mr_free_cq = free_mr->mr_free_cq; - struct hns_roce_mr *hr_mr = mr_work->mr; - struct device *dev = &hr_dev->pdev->dev; - struct ib_wc wc[HNS_ROCE_V1_RESV_QP]; - struct hns_roce_qp *hr_qp; - int ne = 0; - int ret; - int i; - - for (i = 0; i < HNS_ROCE_V1_RESV_QP; i++) { - hr_qp = free_mr->mr_free_qp[i]; - if (!hr_qp) - continue; - ne++; - - ret = hns_roce_v1_send_lp_wqe(hr_qp); - if (ret) { - dev_err(dev, - "Send wqe (qp:0x%lx) for mr free failed(%d)!\n", - hr_qp->qpn, ret); - goto free_work; - } - } - - if (!ne) { - dev_err(dev, "Reserved loop qp is absent!\n"); - goto free_work; - } - - do { - ret = hns_roce_v1_poll_cq(&mr_free_cq->ib_cq, ne, wc); - if (ret < 0 && hr_qp) { - dev_err(dev, - "(qp:0x%lx) starts, Poll cqe failed(%d) for mr 0x%x free! Remain %d cqe\n", - hr_qp->qpn, ret, hr_mr->key, ne); - goto free_work; - } - ne -= ret; - usleep_range(HNS_ROCE_V1_FREE_MR_WAIT_VALUE * 1000, - (1 + HNS_ROCE_V1_FREE_MR_WAIT_VALUE) * 1000); - } while (ne && time_before_eq(jiffies, end)); - - if (ne != 0) - dev_err(dev, - "Poll cqe for mr 0x%x free timeout! Remain %d cqe\n", - hr_mr->key, ne); - -free_work: - if (mr_work->comp_flag) - complete(mr_work->comp); - kfree(mr_work); -} - -static int hns_roce_v1_dereg_mr(struct hns_roce_dev *hr_dev, - struct hns_roce_mr *mr, struct ib_udata *udata) -{ - struct hns_roce_v1_priv *priv = hr_dev->priv; - struct hns_roce_free_mr *free_mr = &priv->free_mr; - long end = HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS; - struct device *dev = &hr_dev->pdev->dev; - struct hns_roce_mr_free_work *mr_work; - unsigned long start = jiffies; - struct completion comp; - int ret = 0; - - if (mr->enabled) { - if (hns_roce_hw_destroy_mpt(hr_dev, NULL, - key_to_hw_index(mr->key) & - (hr_dev->caps.num_mtpts - 1))) - dev_warn(dev, "DESTROY_MPT failed!\n"); - } - - mr_work = kzalloc(sizeof(*mr_work), GFP_KERNEL); - if (!mr_work) { - ret = -ENOMEM; - goto free_mr; - } - - INIT_WORK(&(mr_work->work), hns_roce_v1_mr_free_work_fn); - - mr_work->ib_dev = &(hr_dev->ib_dev); - mr_work->comp = ∁ - mr_work->comp_flag = 1; - mr_work->mr = (void *)mr; - init_completion(mr_work->comp); - - queue_work(free_mr->free_mr_wq, &(mr_work->work)); - - while (end > 0) { - if (try_wait_for_completion(&comp)) - goto free_mr; - msleep(HNS_ROCE_V1_FREE_MR_WAIT_VALUE); - end -= HNS_ROCE_V1_FREE_MR_WAIT_VALUE; - } - - mr_work->comp_flag = 0; - if (try_wait_for_completion(&comp)) - goto free_mr; - - dev_warn(dev, "Free mr work 0x%x over 50s and failed!\n", mr->key); - ret = -ETIMEDOUT; - -free_mr: - dev_dbg(dev, "Free mr 0x%x use 0x%x us.\n", - mr->key, jiffies_to_usecs(jiffies) - jiffies_to_usecs(start)); - - ida_free(&hr_dev->mr_table.mtpt_ida.ida, (int)key_to_hw_index(mr->key)); - hns_roce_mtr_destroy(hr_dev, &mr->pbl_mtr); - kfree(mr); - - return ret; -} - -static void hns_roce_db_free(struct hns_roce_dev *hr_dev) -{ - struct hns_roce_v1_priv *priv = hr_dev->priv; - struct hns_roce_db_table *db = &priv->db_table; - struct device *dev = &hr_dev->pdev->dev; - - if (db->sdb_ext_mod) { - dma_free_coherent(dev, HNS_ROCE_V1_EXT_SDB_SIZE, - db->ext_db->sdb_buf_list->buf, - db->ext_db->sdb_buf_list->map); - kfree(db->ext_db->sdb_buf_list); - } - - if (db->odb_ext_mod) { - dma_free_coherent(dev, HNS_ROCE_V1_EXT_ODB_SIZE, - db->ext_db->odb_buf_list->buf, - db->ext_db->odb_buf_list->map); - kfree(db->ext_db->odb_buf_list); - } - - kfree(db->ext_db); -} - -static int hns_roce_raq_init(struct hns_roce_dev *hr_dev) -{ - struct hns_roce_v1_priv *priv = hr_dev->priv; - struct hns_roce_raq_table *raq = &priv->raq_table; - struct device *dev = &hr_dev->pdev->dev; - dma_addr_t addr; - int raq_shift; - __le32 tmp; - u32 val; - int ret; - - raq->e_raq_buf = kzalloc(sizeof(*(raq->e_raq_buf)), GFP_KERNEL); - if (!raq->e_raq_buf) - return -ENOMEM; - - raq->e_raq_buf->buf = dma_alloc_coherent(dev, HNS_ROCE_V1_RAQ_SIZE, - &addr, GFP_KERNEL); - if (!raq->e_raq_buf->buf) { - ret = -ENOMEM; - goto err_dma_alloc_raq; - } - raq->e_raq_buf->map = addr; - - /* Configure raq extended address. 48bit 4K align */ - roce_write(hr_dev, ROCEE_EXT_RAQ_REG, raq->e_raq_buf->map >> 12); - - /* Configure raq_shift */ - raq_shift = ilog2(HNS_ROCE_V1_RAQ_SIZE / HNS_ROCE_V1_RAQ_ENTRY); - val = roce_read(hr_dev, ROCEE_EXT_RAQ_H_REG); - tmp = cpu_to_le32(val); - roce_set_field(tmp, ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_M, - ROCEE_EXT_RAQ_H_EXT_RAQ_SHIFT_S, raq_shift); - /* - * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of - * using 4K page, and shift more 32 because of - * calculating the high 32 bit value evaluated to hardware. - */ - roce_set_field(tmp, ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_M, - ROCEE_EXT_RAQ_H_EXT_RAQ_BA_H_S, - raq->e_raq_buf->map >> 44); - val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_EXT_RAQ_H_REG, val); - dev_dbg(dev, "Configure raq_shift 0x%x.\n", val); - - /* Configure raq threshold */ - val = roce_read(hr_dev, ROCEE_RAQ_WL_REG); - tmp = cpu_to_le32(val); - roce_set_field(tmp, ROCEE_RAQ_WL_ROCEE_RAQ_WL_M, - ROCEE_RAQ_WL_ROCEE_RAQ_WL_S, - HNS_ROCE_V1_EXT_RAQ_WF); - val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_RAQ_WL_REG, val); - dev_dbg(dev, "Configure raq_wl 0x%x.\n", val); - - /* Enable extend raq */ - val = roce_read(hr_dev, ROCEE_WRMS_POL_TIME_INTERVAL_REG); - tmp = cpu_to_le32(val); - roce_set_field(tmp, - ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_M, - ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_POL_TIME_INTERVAL_S, - POL_TIME_INTERVAL_VAL); - roce_set_bit(tmp, ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_EXT_RAQ_MODE, 1); - roce_set_field(tmp, - ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_M, - ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_CFG_S, - 2); - roce_set_bit(tmp, - ROCEE_WRMS_POL_TIME_INTERVAL_WRMS_RAQ_TIMEOUT_CHK_EN_S, 1); - val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_WRMS_POL_TIME_INTERVAL_REG, val); - dev_dbg(dev, "Configure WrmsPolTimeInterval 0x%x.\n", val); - - /* Enable raq drop */ - val = roce_read(hr_dev, ROCEE_GLB_CFG_REG); - tmp = cpu_to_le32(val); - roce_set_bit(tmp, ROCEE_GLB_CFG_TRP_RAQ_DROP_EN_S, 1); - val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_GLB_CFG_REG, val); - dev_dbg(dev, "Configure GlbCfg = 0x%x.\n", val); - - return 0; - -err_dma_alloc_raq: - kfree(raq->e_raq_buf); - return ret; -} - -static void hns_roce_raq_free(struct hns_roce_dev *hr_dev) -{ - struct hns_roce_v1_priv *priv = hr_dev->priv; - struct hns_roce_raq_table *raq = &priv->raq_table; - struct device *dev = &hr_dev->pdev->dev; - - dma_free_coherent(dev, HNS_ROCE_V1_RAQ_SIZE, raq->e_raq_buf->buf, - raq->e_raq_buf->map); - kfree(raq->e_raq_buf); -} - -static void hns_roce_port_enable(struct hns_roce_dev *hr_dev, int enable_flag) -{ - __le32 tmp; - u32 val; - - if (enable_flag) { - val = roce_read(hr_dev, ROCEE_GLB_CFG_REG); - /* Open all ports */ - tmp = cpu_to_le32(val); - roce_set_field(tmp, ROCEE_GLB_CFG_ROCEE_PORT_ST_M, - ROCEE_GLB_CFG_ROCEE_PORT_ST_S, - ALL_PORT_VAL_OPEN); - val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_GLB_CFG_REG, val); - } else { - val = roce_read(hr_dev, ROCEE_GLB_CFG_REG); - /* Close all ports */ - tmp = cpu_to_le32(val); - roce_set_field(tmp, ROCEE_GLB_CFG_ROCEE_PORT_ST_M, - ROCEE_GLB_CFG_ROCEE_PORT_ST_S, 0x0); - val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_GLB_CFG_REG, val); - } -} - -static int hns_roce_bt_init(struct hns_roce_dev *hr_dev) -{ - struct hns_roce_v1_priv *priv = hr_dev->priv; - struct device *dev = &hr_dev->pdev->dev; - int ret; - - priv->bt_table.qpc_buf.buf = dma_alloc_coherent(dev, - HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.qpc_buf.map, - GFP_KERNEL); - if (!priv->bt_table.qpc_buf.buf) - return -ENOMEM; - - priv->bt_table.mtpt_buf.buf = dma_alloc_coherent(dev, - HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.mtpt_buf.map, - GFP_KERNEL); - if (!priv->bt_table.mtpt_buf.buf) { - ret = -ENOMEM; - goto err_failed_alloc_mtpt_buf; - } - - priv->bt_table.cqc_buf.buf = dma_alloc_coherent(dev, - HNS_ROCE_BT_RSV_BUF_SIZE, &priv->bt_table.cqc_buf.map, - GFP_KERNEL); - if (!priv->bt_table.cqc_buf.buf) { - ret = -ENOMEM; - goto err_failed_alloc_cqc_buf; - } - - return 0; - -err_failed_alloc_cqc_buf: - dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE, - priv->bt_table.mtpt_buf.buf, priv->bt_table.mtpt_buf.map); - -err_failed_alloc_mtpt_buf: - dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE, - priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map); - - return ret; -} - -static void hns_roce_bt_free(struct hns_roce_dev *hr_dev) -{ - struct hns_roce_v1_priv *priv = hr_dev->priv; - struct device *dev = &hr_dev->pdev->dev; - - dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE, - priv->bt_table.cqc_buf.buf, priv->bt_table.cqc_buf.map); - - dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE, - priv->bt_table.mtpt_buf.buf, priv->bt_table.mtpt_buf.map); - - dma_free_coherent(dev, HNS_ROCE_BT_RSV_BUF_SIZE, - priv->bt_table.qpc_buf.buf, priv->bt_table.qpc_buf.map); -} - -static int hns_roce_tptr_init(struct hns_roce_dev *hr_dev) -{ - struct hns_roce_v1_priv *priv = hr_dev->priv; - struct hns_roce_buf_list *tptr_buf = &priv->tptr_table.tptr_buf; - struct device *dev = &hr_dev->pdev->dev; - - /* - * This buffer will be used for CQ's tptr(tail pointer), also - * named ci(customer index). Every CQ will use 2 bytes to save - * cqe ci in hip06. Hardware will read this area to get new ci - * when the queue is almost full. - */ - tptr_buf->buf = dma_alloc_coherent(dev, HNS_ROCE_V1_TPTR_BUF_SIZE, - &tptr_buf->map, GFP_KERNEL); - if (!tptr_buf->buf) - return -ENOMEM; - - hr_dev->tptr_dma_addr = tptr_buf->map; - hr_dev->tptr_size = HNS_ROCE_V1_TPTR_BUF_SIZE; - - return 0; -} - -static void hns_roce_tptr_free(struct hns_roce_dev *hr_dev) -{ - struct hns_roce_v1_priv *priv = hr_dev->priv; - struct hns_roce_buf_list *tptr_buf = &priv->tptr_table.tptr_buf; - struct device *dev = &hr_dev->pdev->dev; - - dma_free_coherent(dev, HNS_ROCE_V1_TPTR_BUF_SIZE, - tptr_buf->buf, tptr_buf->map); -} - -static int hns_roce_free_mr_init(struct hns_roce_dev *hr_dev) -{ - struct hns_roce_v1_priv *priv = hr_dev->priv; - struct hns_roce_free_mr *free_mr = &priv->free_mr; - struct device *dev = &hr_dev->pdev->dev; - int ret; - - free_mr->free_mr_wq = create_singlethread_workqueue("hns_roce_free_mr"); - if (!free_mr->free_mr_wq) { - dev_err(dev, "Create free mr workqueue failed!\n"); - return -ENOMEM; - } - - ret = hns_roce_v1_rsv_lp_qp(hr_dev); - if (ret) { - dev_err(dev, "Reserved loop qp failed(%d)!\n", ret); - destroy_workqueue(free_mr->free_mr_wq); - } - - return ret; -} - -static void hns_roce_free_mr_free(struct hns_roce_dev *hr_dev) -{ - struct hns_roce_v1_priv *priv = hr_dev->priv; - struct hns_roce_free_mr *free_mr = &priv->free_mr; - - destroy_workqueue(free_mr->free_mr_wq); - - hns_roce_v1_release_lp_qp(hr_dev); -} - -/** - * hns_roce_v1_reset - reset RoCE - * @hr_dev: RoCE device struct pointer - * @dereset: true -- drop reset, false -- reset - * return 0 - success , negative --fail - */ -static int hns_roce_v1_reset(struct hns_roce_dev *hr_dev, bool dereset) -{ - struct device_node *dsaf_node; - struct device *dev = &hr_dev->pdev->dev; - struct device_node *np = dev->of_node; - struct fwnode_handle *fwnode; - int ret; - - /* check if this is DT/ACPI case */ - if (dev_of_node(dev)) { - dsaf_node = of_parse_phandle(np, "dsaf-handle", 0); - if (!dsaf_node) { - dev_err(dev, "could not find dsaf-handle\n"); - return -EINVAL; - } - fwnode = &dsaf_node->fwnode; - } else if (is_acpi_device_node(dev->fwnode)) { - struct fwnode_reference_args args; - - ret = acpi_node_get_property_reference(dev->fwnode, - "dsaf-handle", 0, &args); - if (ret) { - dev_err(dev, "could not find dsaf-handle\n"); - return ret; - } - fwnode = args.fwnode; - } else { - dev_err(dev, "cannot read data from DT or ACPI\n"); - return -ENXIO; - } - - ret = hns_dsaf_roce_reset(fwnode, false); - if (ret) - return ret; - - if (dereset) { - msleep(SLEEP_TIME_INTERVAL); - ret = hns_dsaf_roce_reset(fwnode, true); - } - - return ret; -} - -static int hns_roce_v1_profile(struct hns_roce_dev *hr_dev) -{ - struct hns_roce_caps *caps = &hr_dev->caps; - int i; - - hr_dev->vendor_id = roce_read(hr_dev, ROCEE_VENDOR_ID_REG); - hr_dev->vendor_part_id = roce_read(hr_dev, ROCEE_VENDOR_PART_ID_REG); - hr_dev->sys_image_guid = roce_read(hr_dev, ROCEE_SYS_IMAGE_GUID_L_REG) | - ((u64)roce_read(hr_dev, - ROCEE_SYS_IMAGE_GUID_H_REG) << 32); - hr_dev->hw_rev = HNS_ROCE_HW_VER1; - - caps->num_qps = HNS_ROCE_V1_MAX_QP_NUM; - caps->max_wqes = HNS_ROCE_V1_MAX_WQE_NUM; - caps->min_wqes = HNS_ROCE_MIN_WQE_NUM; - caps->num_cqs = HNS_ROCE_V1_MAX_CQ_NUM; - caps->min_cqes = HNS_ROCE_MIN_CQE_NUM; - caps->max_cqes = HNS_ROCE_V1_MAX_CQE_NUM; - caps->max_sq_sg = HNS_ROCE_V1_SG_NUM; - caps->max_rq_sg = HNS_ROCE_V1_SG_NUM; - caps->max_sq_inline = HNS_ROCE_V1_INLINE_SIZE; - caps->num_uars = HNS_ROCE_V1_UAR_NUM; - caps->phy_num_uars = HNS_ROCE_V1_PHY_UAR_NUM; - caps->num_aeq_vectors = HNS_ROCE_V1_AEQE_VEC_NUM; - caps->num_comp_vectors = HNS_ROCE_V1_COMP_VEC_NUM; - caps->num_other_vectors = HNS_ROCE_V1_ABNORMAL_VEC_NUM; - caps->num_mtpts = HNS_ROCE_V1_MAX_MTPT_NUM; - caps->num_mtt_segs = HNS_ROCE_V1_MAX_MTT_SEGS; - caps->num_pds = HNS_ROCE_V1_MAX_PD_NUM; - caps->max_qp_init_rdma = HNS_ROCE_V1_MAX_QP_INIT_RDMA; - caps->max_qp_dest_rdma = HNS_ROCE_V1_MAX_QP_DEST_RDMA; - caps->max_sq_desc_sz = HNS_ROCE_V1_MAX_SQ_DESC_SZ; - caps->max_rq_desc_sz = HNS_ROCE_V1_MAX_RQ_DESC_SZ; - caps->qpc_sz = HNS_ROCE_V1_QPC_SIZE; - caps->irrl_entry_sz = HNS_ROCE_V1_IRRL_ENTRY_SIZE; - caps->cqc_entry_sz = HNS_ROCE_V1_CQC_ENTRY_SIZE; - caps->mtpt_entry_sz = HNS_ROCE_V1_MTPT_ENTRY_SIZE; - caps->mtt_entry_sz = HNS_ROCE_V1_MTT_ENTRY_SIZE; - caps->cqe_sz = HNS_ROCE_V1_CQE_SIZE; - caps->page_size_cap = HNS_ROCE_V1_PAGE_SIZE_SUPPORT; - caps->reserved_lkey = 0; - caps->reserved_pds = 0; - caps->reserved_mrws = 1; - caps->reserved_uars = 0; - caps->reserved_cqs = 0; - caps->reserved_qps = 12; /* 2 SQP per port, six ports total 12 */ - caps->chunk_sz = HNS_ROCE_V1_TABLE_CHUNK_SIZE; - - for (i = 0; i < caps->num_ports; i++) - caps->pkey_table_len[i] = 1; - - for (i = 0; i < caps->num_ports; i++) { - /* Six ports shared 16 GID in v1 engine */ - if (i >= (HNS_ROCE_V1_GID_NUM % caps->num_ports)) - caps->gid_table_len[i] = HNS_ROCE_V1_GID_NUM / - caps->num_ports; - else - caps->gid_table_len[i] = HNS_ROCE_V1_GID_NUM / - caps->num_ports + 1; - } - - caps->ceqe_depth = HNS_ROCE_V1_COMP_EQE_NUM; - caps->aeqe_depth = HNS_ROCE_V1_ASYNC_EQE_NUM; - caps->local_ca_ack_delay = roce_read(hr_dev, ROCEE_ACK_DELAY_REG); - caps->max_mtu = IB_MTU_2048; - - return 0; -} - -static int hns_roce_v1_init(struct hns_roce_dev *hr_dev) -{ - int ret; - u32 val; - __le32 tmp; - struct device *dev = &hr_dev->pdev->dev; - - /* DMAE user config */ - val = roce_read(hr_dev, ROCEE_DMAE_USER_CFG1_REG); - tmp = cpu_to_le32(val); - roce_set_field(tmp, ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_M, - ROCEE_DMAE_USER_CFG1_ROCEE_CACHE_TB_CFG_S, 0xf); - roce_set_field(tmp, ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_M, - ROCEE_DMAE_USER_CFG1_ROCEE_STREAM_ID_TB_CFG_S, - 1 << PAGES_SHIFT_16); - val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_DMAE_USER_CFG1_REG, val); - - val = roce_read(hr_dev, ROCEE_DMAE_USER_CFG2_REG); - tmp = cpu_to_le32(val); - roce_set_field(tmp, ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_M, - ROCEE_DMAE_USER_CFG2_ROCEE_CACHE_PKT_CFG_S, 0xf); - roce_set_field(tmp, ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_M, - ROCEE_DMAE_USER_CFG2_ROCEE_STREAM_ID_PKT_CFG_S, - 1 << PAGES_SHIFT_16); - - ret = hns_roce_db_init(hr_dev); - if (ret) { - dev_err(dev, "doorbell init failed!\n"); - return ret; - } - - ret = hns_roce_raq_init(hr_dev); - if (ret) { - dev_err(dev, "raq init failed!\n"); - goto error_failed_raq_init; - } - - ret = hns_roce_bt_init(hr_dev); - if (ret) { - dev_err(dev, "bt init failed!\n"); - goto error_failed_bt_init; - } - - ret = hns_roce_tptr_init(hr_dev); - if (ret) { - dev_err(dev, "tptr init failed!\n"); - goto error_failed_tptr_init; - } - - ret = hns_roce_free_mr_init(hr_dev); - if (ret) { - dev_err(dev, "free mr init failed!\n"); - goto error_failed_free_mr_init; - } - - hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_UP); - - return 0; - -error_failed_free_mr_init: - hns_roce_tptr_free(hr_dev); - -error_failed_tptr_init: - hns_roce_bt_free(hr_dev); - -error_failed_bt_init: - hns_roce_raq_free(hr_dev); - -error_failed_raq_init: - hns_roce_db_free(hr_dev); - return ret; -} - -static void hns_roce_v1_exit(struct hns_roce_dev *hr_dev) -{ - hns_roce_port_enable(hr_dev, HNS_ROCE_PORT_DOWN); - hns_roce_free_mr_free(hr_dev); - hns_roce_tptr_free(hr_dev); - hns_roce_bt_free(hr_dev); - hns_roce_raq_free(hr_dev); - hns_roce_db_free(hr_dev); -} - -static int hns_roce_v1_cmd_pending(struct hns_roce_dev *hr_dev) -{ - u32 status = readl(hr_dev->reg_base + ROCEE_MB6_REG); - - return (!!(status & (1 << HCR_GO_BIT))); -} - -static int hns_roce_v1_post_mbox(struct hns_roce_dev *hr_dev, u64 in_param, - u64 out_param, u32 in_modifier, u8 op_modifier, - u16 op, u16 token, int event) -{ - u32 __iomem *hcr = (u32 __iomem *)(hr_dev->reg_base + ROCEE_MB1_REG); - unsigned long end; - u32 val = 0; - __le32 tmp; - - end = msecs_to_jiffies(GO_BIT_TIMEOUT_MSECS) + jiffies; - while (hns_roce_v1_cmd_pending(hr_dev)) { - if (time_after(jiffies, end)) { - dev_err(hr_dev->dev, "jiffies=%d end=%d\n", - (int)jiffies, (int)end); - return -EAGAIN; - } - cond_resched(); - } - - tmp = cpu_to_le32(val); - roce_set_field(tmp, ROCEE_MB6_ROCEE_MB_CMD_M, ROCEE_MB6_ROCEE_MB_CMD_S, - op); - roce_set_field(tmp, ROCEE_MB6_ROCEE_MB_CMD_MDF_M, - ROCEE_MB6_ROCEE_MB_CMD_MDF_S, op_modifier); - roce_set_bit(tmp, ROCEE_MB6_ROCEE_MB_EVENT_S, event); - roce_set_bit(tmp, ROCEE_MB6_ROCEE_MB_HW_RUN_S, 1); - roce_set_field(tmp, ROCEE_MB6_ROCEE_MB_TOKEN_M, - ROCEE_MB6_ROCEE_MB_TOKEN_S, token); - - val = le32_to_cpu(tmp); - writeq(in_param, hcr + 0); - writeq(out_param, hcr + 2); - writel(in_modifier, hcr + 4); - /* Memory barrier */ - wmb(); - - writel(val, hcr + 5); - - return 0; -} - -static int hns_roce_v1_chk_mbox(struct hns_roce_dev *hr_dev, - unsigned int timeout) -{ - u8 __iomem *hcr = hr_dev->reg_base + ROCEE_MB1_REG; - unsigned long end; - u32 status = 0; - - end = msecs_to_jiffies(timeout) + jiffies; - while (hns_roce_v1_cmd_pending(hr_dev) && time_before(jiffies, end)) - cond_resched(); - - if (hns_roce_v1_cmd_pending(hr_dev)) { - dev_err(hr_dev->dev, "[cmd_poll]hw run cmd TIMEDOUT!\n"); - return -ETIMEDOUT; - } - - status = le32_to_cpu((__force __le32) - __raw_readl(hcr + HCR_STATUS_OFFSET)); - if ((status & STATUS_MASK) != 0x1) { - dev_err(hr_dev->dev, "mailbox status 0x%x!\n", status); - return -EBUSY; - } - - return 0; -} - -static int hns_roce_v1_set_gid(struct hns_roce_dev *hr_dev, u32 port, - int gid_index, const union ib_gid *gid, - const struct ib_gid_attr *attr) -{ - unsigned long flags; - u32 *p = NULL; - u8 gid_idx; - - gid_idx = hns_get_gid_index(hr_dev, port, gid_index); - - spin_lock_irqsave(&hr_dev->iboe.lock, flags); - - p = (u32 *)&gid->raw[0]; - roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_L_0_REG + - (HNS_ROCE_V1_GID_NUM * gid_idx)); - - p = (u32 *)&gid->raw[4]; - roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_ML_0_REG + - (HNS_ROCE_V1_GID_NUM * gid_idx)); - - p = (u32 *)&gid->raw[8]; - roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_MH_0_REG + - (HNS_ROCE_V1_GID_NUM * gid_idx)); - - p = (u32 *)&gid->raw[0xc]; - roce_raw_write(*p, hr_dev->reg_base + ROCEE_PORT_GID_H_0_REG + - (HNS_ROCE_V1_GID_NUM * gid_idx)); - - spin_unlock_irqrestore(&hr_dev->iboe.lock, flags); - - return 0; -} - -static int hns_roce_v1_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port, - u8 *addr) -{ - u32 reg_smac_l; - u16 reg_smac_h; - __le32 tmp; - u16 *p_h; - u32 *p; - u32 val; - - /* - * When mac changed, loopback may fail - * because of smac not equal to dmac. - * We Need to release and create reserved qp again. - */ - if (hr_dev->hw->dereg_mr) { - int ret; - - ret = hns_roce_v1_recreate_lp_qp(hr_dev); - if (ret && ret != -ETIMEDOUT) - return ret; - } - - p = (u32 *)(&addr[0]); - reg_smac_l = *p; - roce_raw_write(reg_smac_l, hr_dev->reg_base + ROCEE_SMAC_L_0_REG + - PHY_PORT_OFFSET * phy_port); - - val = roce_read(hr_dev, - ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET); - tmp = cpu_to_le32(val); - p_h = (u16 *)(&addr[4]); - reg_smac_h = *p_h; - roce_set_field(tmp, ROCEE_SMAC_H_ROCEE_SMAC_H_M, - ROCEE_SMAC_H_ROCEE_SMAC_H_S, reg_smac_h); - val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET, - val); - - return 0; -} - -static void hns_roce_v1_set_mtu(struct hns_roce_dev *hr_dev, u8 phy_port, - enum ib_mtu mtu) -{ - __le32 tmp; - u32 val; - - val = roce_read(hr_dev, - ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET); - tmp = cpu_to_le32(val); - roce_set_field(tmp, ROCEE_SMAC_H_ROCEE_PORT_MTU_M, - ROCEE_SMAC_H_ROCEE_PORT_MTU_S, mtu); - val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_SMAC_H_0_REG + phy_port * PHY_PORT_OFFSET, - val); -} - -static int hns_roce_v1_write_mtpt(struct hns_roce_dev *hr_dev, void *mb_buf, - struct hns_roce_mr *mr, - unsigned long mtpt_idx) -{ - u64 pages[HNS_ROCE_MAX_INNER_MTPT_NUM] = { 0 }; - struct ib_device *ibdev = &hr_dev->ib_dev; - struct hns_roce_v1_mpt_entry *mpt_entry; - dma_addr_t pbl_ba; - int count; - int i; - - /* MPT filled into mailbox buf */ - mpt_entry = (struct hns_roce_v1_mpt_entry *)mb_buf; - memset(mpt_entry, 0, sizeof(*mpt_entry)); - - roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_KEY_STATE_M, - MPT_BYTE_4_KEY_STATE_S, KEY_VALID); - roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_KEY_M, - MPT_BYTE_4_KEY_S, mr->key); - roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_PAGE_SIZE_M, - MPT_BYTE_4_PAGE_SIZE_S, MR_SIZE_4K); - roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_MW_TYPE_S, 0); - roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_MW_BIND_ENABLE_S, - (mr->access & IB_ACCESS_MW_BIND ? 1 : 0)); - roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_OWN_S, 0); - roce_set_field(mpt_entry->mpt_byte_4, MPT_BYTE_4_MEMORY_LOCATION_TYPE_M, - MPT_BYTE_4_MEMORY_LOCATION_TYPE_S, mr->type); - roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_ATOMIC_S, 0); - roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_LOCAL_WRITE_S, - (mr->access & IB_ACCESS_LOCAL_WRITE ? 1 : 0)); - roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_WRITE_S, - (mr->access & IB_ACCESS_REMOTE_WRITE ? 1 : 0)); - roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_READ_S, - (mr->access & IB_ACCESS_REMOTE_READ ? 1 : 0)); - roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_REMOTE_INVAL_ENABLE_S, - 0); - roce_set_bit(mpt_entry->mpt_byte_4, MPT_BYTE_4_ADDRESS_TYPE_S, 0); - - roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_PBL_ADDR_H_M, - MPT_BYTE_12_PBL_ADDR_H_S, 0); - roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_MW_BIND_COUNTER_M, - MPT_BYTE_12_MW_BIND_COUNTER_S, 0); - - mpt_entry->virt_addr_l = cpu_to_le32((u32)mr->iova); - mpt_entry->virt_addr_h = cpu_to_le32((u32)(mr->iova >> 32)); - mpt_entry->length = cpu_to_le32((u32)mr->size); - - roce_set_field(mpt_entry->mpt_byte_28, MPT_BYTE_28_PD_M, - MPT_BYTE_28_PD_S, mr->pd); - roce_set_field(mpt_entry->mpt_byte_28, MPT_BYTE_28_L_KEY_IDX_L_M, - MPT_BYTE_28_L_KEY_IDX_L_S, mtpt_idx); - roce_set_field(mpt_entry->mpt_byte_64, MPT_BYTE_64_L_KEY_IDX_H_M, - MPT_BYTE_64_L_KEY_IDX_H_S, mtpt_idx >> MTPT_IDX_SHIFT); - - /* DMA memory register */ - if (mr->type == MR_TYPE_DMA) - return 0; - - count = hns_roce_mtr_find(hr_dev, &mr->pbl_mtr, 0, pages, - ARRAY_SIZE(pages), &pbl_ba); - if (count < 1) { - ibdev_err(ibdev, "failed to find PBL mtr, count = %d.", count); - return -ENOBUFS; - } - - /* Register user mr */ - for (i = 0; i < count; i++) { - switch (i) { - case 0: - mpt_entry->pa0_l = cpu_to_le32((u32)(pages[i])); - roce_set_field(mpt_entry->mpt_byte_36, - MPT_BYTE_36_PA0_H_M, - MPT_BYTE_36_PA0_H_S, - (u32)(pages[i] >> PAGES_SHIFT_32)); - break; - case 1: - roce_set_field(mpt_entry->mpt_byte_36, - MPT_BYTE_36_PA1_L_M, - MPT_BYTE_36_PA1_L_S, (u32)(pages[i])); - roce_set_field(mpt_entry->mpt_byte_40, - MPT_BYTE_40_PA1_H_M, - MPT_BYTE_40_PA1_H_S, - (u32)(pages[i] >> PAGES_SHIFT_24)); - break; - case 2: - roce_set_field(mpt_entry->mpt_byte_40, - MPT_BYTE_40_PA2_L_M, - MPT_BYTE_40_PA2_L_S, (u32)(pages[i])); - roce_set_field(mpt_entry->mpt_byte_44, - MPT_BYTE_44_PA2_H_M, - MPT_BYTE_44_PA2_H_S, - (u32)(pages[i] >> PAGES_SHIFT_16)); - break; - case 3: - roce_set_field(mpt_entry->mpt_byte_44, - MPT_BYTE_44_PA3_L_M, - MPT_BYTE_44_PA3_L_S, (u32)(pages[i])); - roce_set_field(mpt_entry->mpt_byte_48, - MPT_BYTE_48_PA3_H_M, - MPT_BYTE_48_PA3_H_S, - (u32)(pages[i] >> PAGES_SHIFT_8)); - break; - case 4: - mpt_entry->pa4_l = cpu_to_le32((u32)(pages[i])); - roce_set_field(mpt_entry->mpt_byte_56, - MPT_BYTE_56_PA4_H_M, - MPT_BYTE_56_PA4_H_S, - (u32)(pages[i] >> PAGES_SHIFT_32)); - break; - case 5: - roce_set_field(mpt_entry->mpt_byte_56, - MPT_BYTE_56_PA5_L_M, - MPT_BYTE_56_PA5_L_S, (u32)(pages[i])); - roce_set_field(mpt_entry->mpt_byte_60, - MPT_BYTE_60_PA5_H_M, - MPT_BYTE_60_PA5_H_S, - (u32)(pages[i] >> PAGES_SHIFT_24)); - break; - case 6: - roce_set_field(mpt_entry->mpt_byte_60, - MPT_BYTE_60_PA6_L_M, - MPT_BYTE_60_PA6_L_S, (u32)(pages[i])); - roce_set_field(mpt_entry->mpt_byte_64, - MPT_BYTE_64_PA6_H_M, - MPT_BYTE_64_PA6_H_S, - (u32)(pages[i] >> PAGES_SHIFT_16)); - break; - default: - break; - } - } - - mpt_entry->pbl_addr_l = cpu_to_le32(pbl_ba); - roce_set_field(mpt_entry->mpt_byte_12, MPT_BYTE_12_PBL_ADDR_H_M, - MPT_BYTE_12_PBL_ADDR_H_S, upper_32_bits(pbl_ba)); - - return 0; -} - -static void *get_cqe(struct hns_roce_cq *hr_cq, int n) -{ - return hns_roce_buf_offset(hr_cq->mtr.kmem, n * HNS_ROCE_V1_CQE_SIZE); -} - -static void *get_sw_cqe(struct hns_roce_cq *hr_cq, int n) -{ - struct hns_roce_cqe *hr_cqe = get_cqe(hr_cq, n & hr_cq->ib_cq.cqe); - - /* Get cqe when Owner bit is Conversely with the MSB of cons_idx */ - return (roce_get_bit(hr_cqe->cqe_byte_4, CQE_BYTE_4_OWNER_S) ^ - !!(n & hr_cq->cq_depth)) ? hr_cqe : NULL; -} - -static struct hns_roce_cqe *next_cqe_sw(struct hns_roce_cq *hr_cq) -{ - return get_sw_cqe(hr_cq, hr_cq->cons_index); -} - -static void hns_roce_v1_cq_set_ci(struct hns_roce_cq *hr_cq, u32 cons_index) -{ - __le32 doorbell[2]; - - doorbell[0] = cpu_to_le32(cons_index & ((hr_cq->cq_depth << 1) - 1)); - doorbell[1] = 0; - roce_set_bit(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S, 1); - roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M, - ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S, 3); - roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_M, - ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S, 0); - roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_M, - ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S, hr_cq->cqn); - - hns_roce_write64_k(doorbell, hr_cq->db_reg); -} - -static void __hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn, - struct hns_roce_srq *srq) -{ - struct hns_roce_cqe *cqe, *dest; - u32 prod_index; - int nfreed = 0; - u8 owner_bit; - - for (prod_index = hr_cq->cons_index; get_sw_cqe(hr_cq, prod_index); - ++prod_index) { - if (prod_index == hr_cq->cons_index + hr_cq->ib_cq.cqe) - break; - } - - /* - * Now backwards through the CQ, removing CQ entries - * that match our QP by overwriting them with next entries. - */ - while ((int) --prod_index - (int) hr_cq->cons_index >= 0) { - cqe = get_cqe(hr_cq, prod_index & hr_cq->ib_cq.cqe); - if ((roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M, - CQE_BYTE_16_LOCAL_QPN_S) & - HNS_ROCE_CQE_QPN_MASK) == qpn) { - /* In v1 engine, not support SRQ */ - ++nfreed; - } else if (nfreed) { - dest = get_cqe(hr_cq, (prod_index + nfreed) & - hr_cq->ib_cq.cqe); - owner_bit = roce_get_bit(dest->cqe_byte_4, - CQE_BYTE_4_OWNER_S); - memcpy(dest, cqe, sizeof(*cqe)); - roce_set_bit(dest->cqe_byte_4, CQE_BYTE_4_OWNER_S, - owner_bit); - } - } - - if (nfreed) { - hr_cq->cons_index += nfreed; - hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index); - } -} - -static void hns_roce_v1_cq_clean(struct hns_roce_cq *hr_cq, u32 qpn, - struct hns_roce_srq *srq) -{ - spin_lock_irq(&hr_cq->lock); - __hns_roce_v1_cq_clean(hr_cq, qpn, srq); - spin_unlock_irq(&hr_cq->lock); -} - -static void hns_roce_v1_write_cqc(struct hns_roce_dev *hr_dev, - struct hns_roce_cq *hr_cq, void *mb_buf, - u64 *mtts, dma_addr_t dma_handle) -{ - struct hns_roce_v1_priv *priv = hr_dev->priv; - struct hns_roce_buf_list *tptr_buf = &priv->tptr_table.tptr_buf; - struct hns_roce_cq_context *cq_context = mb_buf; - dma_addr_t tptr_dma_addr; - int offset; - - memset(cq_context, 0, sizeof(*cq_context)); - - /* Get the tptr for this CQ. */ - offset = hr_cq->cqn * HNS_ROCE_V1_TPTR_ENTRY_SIZE; - tptr_dma_addr = tptr_buf->map + offset; - hr_cq->tptr_addr = (u16 *)(tptr_buf->buf + offset); - - /* Register cq_context members */ - roce_set_field(cq_context->cqc_byte_4, - CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_M, - CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_S, CQ_STATE_VALID); - roce_set_field(cq_context->cqc_byte_4, CQ_CONTEXT_CQC_BYTE_4_CQN_M, - CQ_CONTEXT_CQC_BYTE_4_CQN_S, hr_cq->cqn); - - cq_context->cq_bt_l = cpu_to_le32((u32)dma_handle); - - roce_set_field(cq_context->cqc_byte_12, - CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_M, - CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_S, - ((u64)dma_handle >> 32)); - roce_set_field(cq_context->cqc_byte_12, - CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_M, - CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_S, - ilog2(hr_cq->cq_depth)); - roce_set_field(cq_context->cqc_byte_12, CQ_CONTEXT_CQC_BYTE_12_CEQN_M, - CQ_CONTEXT_CQC_BYTE_12_CEQN_S, hr_cq->vector); - - cq_context->cur_cqe_ba0_l = cpu_to_le32((u32)(mtts[0])); - - roce_set_field(cq_context->cqc_byte_20, - CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_M, - CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_S, (mtts[0]) >> 32); - /* Dedicated hardware, directly set 0 */ - roce_set_field(cq_context->cqc_byte_20, - CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_M, - CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_S, 0); - /** - * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of - * using 4K page, and shift more 32 because of - * calculating the high 32 bit value evaluated to hardware. - */ - roce_set_field(cq_context->cqc_byte_20, - CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_M, - CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_S, - tptr_dma_addr >> 44); - - cq_context->cqe_tptr_addr_l = cpu_to_le32((u32)(tptr_dma_addr >> 12)); - - roce_set_field(cq_context->cqc_byte_32, - CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_M, - CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_S, 0); - roce_set_bit(cq_context->cqc_byte_32, - CQ_CONTEXT_CQC_BYTE_32_SE_FLAG_S, 0); - roce_set_bit(cq_context->cqc_byte_32, - CQ_CONTEXT_CQC_BYTE_32_CE_FLAG_S, 0); - roce_set_bit(cq_context->cqc_byte_32, - CQ_CONTEXT_CQC_BYTE_32_NOTIFICATION_FLAG_S, 0); - roce_set_bit(cq_context->cqc_byte_32, - CQ_CQNTEXT_CQC_BYTE_32_TYPE_OF_COMPLETION_NOTIFICATION_S, - 0); - /* The initial value of cq's ci is 0 */ - roce_set_field(cq_context->cqc_byte_32, - CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_M, - CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_S, 0); -} - -static int hns_roce_v1_req_notify_cq(struct ib_cq *ibcq, - enum ib_cq_notify_flags flags) -{ - struct hns_roce_cq *hr_cq = to_hr_cq(ibcq); - u32 notification_flag; - __le32 doorbell[2] = {}; - - notification_flag = (flags & IB_CQ_SOLICITED_MASK) == - IB_CQ_SOLICITED ? CQ_DB_REQ_NOT : CQ_DB_REQ_NOT_SOL; - /* - * flags = 0; Notification Flag = 1, next - * flags = 1; Notification Flag = 0, solocited - */ - doorbell[0] = - cpu_to_le32(hr_cq->cons_index & ((hr_cq->cq_depth << 1) - 1)); - roce_set_bit(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_HW_SYNS_S, 1); - roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_M, - ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_S, 3); - roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_M, - ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_CMD_MDF_S, 1); - roce_set_field(doorbell[1], ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_M, - ROCEE_DB_OTHERS_H_ROCEE_DB_OTH_INP_H_S, - hr_cq->cqn | notification_flag); - - hns_roce_write64_k(doorbell, hr_cq->db_reg); - - return 0; -} - -static int hns_roce_v1_poll_one(struct hns_roce_cq *hr_cq, - struct hns_roce_qp **cur_qp, struct ib_wc *wc) -{ - int qpn; - int is_send; - u16 wqe_ctr; - u32 status; - u32 opcode; - struct hns_roce_cqe *cqe; - struct hns_roce_qp *hr_qp; - struct hns_roce_wq *wq; - struct hns_roce_wqe_ctrl_seg *sq_wqe; - struct hns_roce_dev *hr_dev = to_hr_dev(hr_cq->ib_cq.device); - struct device *dev = &hr_dev->pdev->dev; - - /* Find cqe according consumer index */ - cqe = next_cqe_sw(hr_cq); - if (!cqe) - return -EAGAIN; - - ++hr_cq->cons_index; - /* Memory barrier */ - rmb(); - /* 0->SQ, 1->RQ */ - is_send = !(roce_get_bit(cqe->cqe_byte_4, CQE_BYTE_4_SQ_RQ_FLAG_S)); - - /* Local_qpn in UD cqe is always 1, so it needs to compute new qpn */ - if (roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M, - CQE_BYTE_16_LOCAL_QPN_S) <= 1) { - qpn = roce_get_field(cqe->cqe_byte_20, CQE_BYTE_20_PORT_NUM_M, - CQE_BYTE_20_PORT_NUM_S) + - roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M, - CQE_BYTE_16_LOCAL_QPN_S) * - HNS_ROCE_MAX_PORTS; - } else { - qpn = roce_get_field(cqe->cqe_byte_16, CQE_BYTE_16_LOCAL_QPN_M, - CQE_BYTE_16_LOCAL_QPN_S); - } - - if (!*cur_qp || (qpn & HNS_ROCE_CQE_QPN_MASK) != (*cur_qp)->qpn) { - hr_qp = __hns_roce_qp_lookup(hr_dev, qpn); - if (unlikely(!hr_qp)) { - dev_err(dev, "CQ %06lx with entry for unknown QPN %06x\n", - hr_cq->cqn, (qpn & HNS_ROCE_CQE_QPN_MASK)); - return -EINVAL; - } - - *cur_qp = hr_qp; - } - - wc->qp = &(*cur_qp)->ibqp; - wc->vendor_err = 0; - - status = roce_get_field(cqe->cqe_byte_4, - CQE_BYTE_4_STATUS_OF_THE_OPERATION_M, - CQE_BYTE_4_STATUS_OF_THE_OPERATION_S) & - HNS_ROCE_CQE_STATUS_MASK; - switch (status) { - case HNS_ROCE_CQE_SUCCESS: - wc->status = IB_WC_SUCCESS; - break; - case HNS_ROCE_CQE_SYNDROME_LOCAL_LENGTH_ERR: - wc->status = IB_WC_LOC_LEN_ERR; - break; - case HNS_ROCE_CQE_SYNDROME_LOCAL_QP_OP_ERR: - wc->status = IB_WC_LOC_QP_OP_ERR; - break; - case HNS_ROCE_CQE_SYNDROME_LOCAL_PROT_ERR: - wc->status = IB_WC_LOC_PROT_ERR; - break; - case HNS_ROCE_CQE_SYNDROME_WR_FLUSH_ERR: - wc->status = IB_WC_WR_FLUSH_ERR; - break; - case HNS_ROCE_CQE_SYNDROME_MEM_MANAGE_OPERATE_ERR: - wc->status = IB_WC_MW_BIND_ERR; - break; - case HNS_ROCE_CQE_SYNDROME_BAD_RESP_ERR: - wc->status = IB_WC_BAD_RESP_ERR; - break; - case HNS_ROCE_CQE_SYNDROME_LOCAL_ACCESS_ERR: - wc->status = IB_WC_LOC_ACCESS_ERR; - break; - case HNS_ROCE_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR: - wc->status = IB_WC_REM_INV_REQ_ERR; - break; - case HNS_ROCE_CQE_SYNDROME_REMOTE_ACCESS_ERR: - wc->status = IB_WC_REM_ACCESS_ERR; - break; - case HNS_ROCE_CQE_SYNDROME_REMOTE_OP_ERR: - wc->status = IB_WC_REM_OP_ERR; - break; - case HNS_ROCE_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR: - wc->status = IB_WC_RETRY_EXC_ERR; - break; - case HNS_ROCE_CQE_SYNDROME_RNR_RETRY_EXC_ERR: - wc->status = IB_WC_RNR_RETRY_EXC_ERR; - break; - default: - wc->status = IB_WC_GENERAL_ERR; - break; - } - - /* CQE status error, directly return */ - if (wc->status != IB_WC_SUCCESS) - return 0; - - if (is_send) { - /* SQ conrespond to CQE */ - sq_wqe = hns_roce_get_send_wqe(*cur_qp, - roce_get_field(cqe->cqe_byte_4, - CQE_BYTE_4_WQE_INDEX_M, - CQE_BYTE_4_WQE_INDEX_S) & - ((*cur_qp)->sq.wqe_cnt-1)); - switch (le32_to_cpu(sq_wqe->flag) & HNS_ROCE_WQE_OPCODE_MASK) { - case HNS_ROCE_WQE_OPCODE_SEND: - wc->opcode = IB_WC_SEND; - break; - case HNS_ROCE_WQE_OPCODE_RDMA_READ: - wc->opcode = IB_WC_RDMA_READ; - wc->byte_len = le32_to_cpu(cqe->byte_cnt); - break; - case HNS_ROCE_WQE_OPCODE_RDMA_WRITE: - wc->opcode = IB_WC_RDMA_WRITE; - break; - case HNS_ROCE_WQE_OPCODE_LOCAL_INV: - wc->opcode = IB_WC_LOCAL_INV; - break; - case HNS_ROCE_WQE_OPCODE_UD_SEND: - wc->opcode = IB_WC_SEND; - break; - default: - wc->status = IB_WC_GENERAL_ERR; - break; - } - wc->wc_flags = (le32_to_cpu(sq_wqe->flag) & HNS_ROCE_WQE_IMM ? - IB_WC_WITH_IMM : 0); - - wq = &(*cur_qp)->sq; - if ((*cur_qp)->sq_signal_bits) { - /* - * If sg_signal_bit is 1, - * firstly tail pointer updated to wqe - * which current cqe correspond to - */ - wqe_ctr = (u16)roce_get_field(cqe->cqe_byte_4, - CQE_BYTE_4_WQE_INDEX_M, - CQE_BYTE_4_WQE_INDEX_S); - wq->tail += (wqe_ctr - (u16)wq->tail) & - (wq->wqe_cnt - 1); - } - wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; - ++wq->tail; - } else { - /* RQ conrespond to CQE */ - wc->byte_len = le32_to_cpu(cqe->byte_cnt); - opcode = roce_get_field(cqe->cqe_byte_4, - CQE_BYTE_4_OPERATION_TYPE_M, - CQE_BYTE_4_OPERATION_TYPE_S) & - HNS_ROCE_CQE_OPCODE_MASK; - switch (opcode) { - case HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE: - wc->opcode = IB_WC_RECV_RDMA_WITH_IMM; - wc->wc_flags = IB_WC_WITH_IMM; - wc->ex.imm_data = - cpu_to_be32(le32_to_cpu(cqe->immediate_data)); - break; - case HNS_ROCE_OPCODE_SEND_DATA_RECEIVE: - if (roce_get_bit(cqe->cqe_byte_4, - CQE_BYTE_4_IMM_INDICATOR_S)) { - wc->opcode = IB_WC_RECV; - wc->wc_flags = IB_WC_WITH_IMM; - wc->ex.imm_data = cpu_to_be32( - le32_to_cpu(cqe->immediate_data)); - } else { - wc->opcode = IB_WC_RECV; - wc->wc_flags = 0; - } - break; - default: - wc->status = IB_WC_GENERAL_ERR; - break; - } - - /* Update tail pointer, record wr_id */ - wq = &(*cur_qp)->rq; - wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; - ++wq->tail; - wc->sl = (u8)roce_get_field(cqe->cqe_byte_20, CQE_BYTE_20_SL_M, - CQE_BYTE_20_SL_S); - wc->src_qp = (u8)roce_get_field(cqe->cqe_byte_20, - CQE_BYTE_20_REMOTE_QPN_M, - CQE_BYTE_20_REMOTE_QPN_S); - wc->wc_flags |= (roce_get_bit(cqe->cqe_byte_20, - CQE_BYTE_20_GRH_PRESENT_S) ? - IB_WC_GRH : 0); - wc->pkey_index = (u16)roce_get_field(cqe->cqe_byte_28, - CQE_BYTE_28_P_KEY_IDX_M, - CQE_BYTE_28_P_KEY_IDX_S); - } - - return 0; -} - -int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) -{ - struct hns_roce_cq *hr_cq = to_hr_cq(ibcq); - struct hns_roce_qp *cur_qp = NULL; - unsigned long flags; - int npolled; - int ret; - - spin_lock_irqsave(&hr_cq->lock, flags); - - for (npolled = 0; npolled < num_entries; ++npolled) { - ret = hns_roce_v1_poll_one(hr_cq, &cur_qp, wc + npolled); - if (ret) - break; - } - - if (npolled) { - *hr_cq->tptr_addr = hr_cq->cons_index & - ((hr_cq->cq_depth << 1) - 1); - - hns_roce_v1_cq_set_ci(hr_cq, hr_cq->cons_index); - } - - spin_unlock_irqrestore(&hr_cq->lock, flags); - - if (ret == 0 || ret == -EAGAIN) - return npolled; - else - return ret; -} - -static int hns_roce_v1_clear_hem(struct hns_roce_dev *hr_dev, - struct hns_roce_hem_table *table, int obj, - int step_idx) -{ - struct hns_roce_v1_priv *priv = hr_dev->priv; - struct device *dev = &hr_dev->pdev->dev; - long end = HW_SYNC_TIMEOUT_MSECS; - __le32 bt_cmd_val[2] = {0}; - unsigned long flags = 0; - void __iomem *bt_cmd; - u64 bt_ba = 0; - - switch (table->type) { - case HEM_TYPE_QPC: - bt_ba = priv->bt_table.qpc_buf.map >> 12; - break; - case HEM_TYPE_MTPT: - bt_ba = priv->bt_table.mtpt_buf.map >> 12; - break; - case HEM_TYPE_CQC: - bt_ba = priv->bt_table.cqc_buf.map >> 12; - break; - case HEM_TYPE_SRQC: - dev_dbg(dev, "HEM_TYPE_SRQC not support.\n"); - return -EINVAL; - default: - return 0; - } - roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_M, - ROCEE_BT_CMD_H_ROCEE_BT_CMD_MDF_S, table->type); - roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_M, - ROCEE_BT_CMD_H_ROCEE_BT_CMD_IN_MDF_S, obj); - roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_S, 0); - roce_set_bit(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_HW_SYNS_S, 1); - - spin_lock_irqsave(&hr_dev->bt_cmd_lock, flags); - - bt_cmd = hr_dev->reg_base + ROCEE_BT_CMD_H_REG; - - while (1) { - if (readl(bt_cmd) >> BT_CMD_SYNC_SHIFT) { - if (!end) { - dev_err(dev, "Write bt_cmd err,hw_sync is not zero.\n"); - spin_unlock_irqrestore(&hr_dev->bt_cmd_lock, - flags); - return -EBUSY; - } - } else { - break; - } - mdelay(HW_SYNC_SLEEP_TIME_INTERVAL); - end -= HW_SYNC_SLEEP_TIME_INTERVAL; - } - - bt_cmd_val[0] = cpu_to_le32(bt_ba); - roce_set_field(bt_cmd_val[1], ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_M, - ROCEE_BT_CMD_H_ROCEE_BT_CMD_BA_H_S, bt_ba >> 32); - hns_roce_write64_k(bt_cmd_val, hr_dev->reg_base + ROCEE_BT_CMD_L_REG); - - spin_unlock_irqrestore(&hr_dev->bt_cmd_lock, flags); - - return 0; -} - -static int hns_roce_v1_qp_modify(struct hns_roce_dev *hr_dev, - enum hns_roce_qp_state cur_state, - enum hns_roce_qp_state new_state, - struct hns_roce_qp_context *context, - struct hns_roce_qp *hr_qp) -{ - static const u16 - op[HNS_ROCE_QP_NUM_STATE][HNS_ROCE_QP_NUM_STATE] = { - [HNS_ROCE_QP_STATE_RST] = { - [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP, - [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP, - [HNS_ROCE_QP_STATE_INIT] = HNS_ROCE_CMD_RST2INIT_QP, - }, - [HNS_ROCE_QP_STATE_INIT] = { - [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP, - [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP, - /* Note: In v1 engine, HW doesn't support RST2INIT. - * We use RST2INIT cmd instead of INIT2INIT. - */ - [HNS_ROCE_QP_STATE_INIT] = HNS_ROCE_CMD_RST2INIT_QP, - [HNS_ROCE_QP_STATE_RTR] = HNS_ROCE_CMD_INIT2RTR_QP, - }, - [HNS_ROCE_QP_STATE_RTR] = { - [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP, - [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP, - [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_RTR2RTS_QP, - }, - [HNS_ROCE_QP_STATE_RTS] = { - [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP, - [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP, - [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_RTS2RTS_QP, - [HNS_ROCE_QP_STATE_SQD] = HNS_ROCE_CMD_RTS2SQD_QP, - }, - [HNS_ROCE_QP_STATE_SQD] = { - [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP, - [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP, - [HNS_ROCE_QP_STATE_RTS] = HNS_ROCE_CMD_SQD2RTS_QP, - [HNS_ROCE_QP_STATE_SQD] = HNS_ROCE_CMD_SQD2SQD_QP, - }, - [HNS_ROCE_QP_STATE_ERR] = { - [HNS_ROCE_QP_STATE_RST] = HNS_ROCE_CMD_2RST_QP, - [HNS_ROCE_QP_STATE_ERR] = HNS_ROCE_CMD_2ERR_QP, - } - }; - - struct hns_roce_cmd_mailbox *mailbox; - struct device *dev = &hr_dev->pdev->dev; - int ret; - - if (cur_state >= HNS_ROCE_QP_NUM_STATE || - new_state >= HNS_ROCE_QP_NUM_STATE || - !op[cur_state][new_state]) { - dev_err(dev, "[modify_qp]not support state %d to %d\n", - cur_state, new_state); - return -EINVAL; - } - - if (op[cur_state][new_state] == HNS_ROCE_CMD_2RST_QP) - return hns_roce_cmd_mbox(hr_dev, 0, 0, hr_qp->qpn, 2, - HNS_ROCE_CMD_2RST_QP, - HNS_ROCE_CMD_TIMEOUT_MSECS); - - if (op[cur_state][new_state] == HNS_ROCE_CMD_2ERR_QP) - return hns_roce_cmd_mbox(hr_dev, 0, 0, hr_qp->qpn, 2, - HNS_ROCE_CMD_2ERR_QP, - HNS_ROCE_CMD_TIMEOUT_MSECS); - - mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); - if (IS_ERR(mailbox)) - return PTR_ERR(mailbox); - - memcpy(mailbox->buf, context, sizeof(*context)); - - ret = hns_roce_cmd_mbox(hr_dev, mailbox->dma, 0, hr_qp->qpn, 0, - op[cur_state][new_state], - HNS_ROCE_CMD_TIMEOUT_MSECS); - - hns_roce_free_cmd_mailbox(hr_dev, mailbox); - return ret; -} - -static int find_wqe_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, - u64 *sq_ba, u64 *rq_ba, dma_addr_t *bt_ba) -{ - struct ib_device *ibdev = &hr_dev->ib_dev; - int count; - - count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, 0, sq_ba, 1, bt_ba); - if (count < 1) { - ibdev_err(ibdev, "Failed to find SQ ba\n"); - return -ENOBUFS; - } - - count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, hr_qp->rq.offset, rq_ba, - 1, NULL); - if (!count) { - ibdev_err(ibdev, "Failed to find RQ ba\n"); - return -ENOBUFS; - } - - return 0; -} - -static int hns_roce_v1_m_sqp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, - int attr_mask, enum ib_qp_state cur_state, - enum ib_qp_state new_state) -{ - struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); - struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); - struct hns_roce_sqp_context *context; - dma_addr_t dma_handle = 0; - u32 __iomem *addr; - u64 sq_ba = 0; - u64 rq_ba = 0; - __le32 tmp; - u32 reg_val; - - context = kzalloc(sizeof(*context), GFP_KERNEL); - if (!context) - return -ENOMEM; - - /* Search QP buf's MTTs */ - if (find_wqe_mtt(hr_dev, hr_qp, &sq_ba, &rq_ba, &dma_handle)) - goto out; - - if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { - roce_set_field(context->qp1c_bytes_4, - QP1C_BYTES_4_SQ_WQE_SHIFT_M, - QP1C_BYTES_4_SQ_WQE_SHIFT_S, - ilog2((unsigned int)hr_qp->sq.wqe_cnt)); - roce_set_field(context->qp1c_bytes_4, - QP1C_BYTES_4_RQ_WQE_SHIFT_M, - QP1C_BYTES_4_RQ_WQE_SHIFT_S, - ilog2((unsigned int)hr_qp->rq.wqe_cnt)); - roce_set_field(context->qp1c_bytes_4, QP1C_BYTES_4_PD_M, - QP1C_BYTES_4_PD_S, to_hr_pd(ibqp->pd)->pdn); - - context->sq_rq_bt_l = cpu_to_le32(dma_handle); - roce_set_field(context->qp1c_bytes_12, - QP1C_BYTES_12_SQ_RQ_BT_H_M, - QP1C_BYTES_12_SQ_RQ_BT_H_S, - upper_32_bits(dma_handle)); - - roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_HEAD_M, - QP1C_BYTES_16_RQ_HEAD_S, hr_qp->rq.head); - roce_set_field(context->qp1c_bytes_16, QP1C_BYTES_16_PORT_NUM_M, - QP1C_BYTES_16_PORT_NUM_S, hr_qp->phy_port); - roce_set_bit(context->qp1c_bytes_16, - QP1C_BYTES_16_SIGNALING_TYPE_S, - hr_qp->sq_signal_bits); - roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_RQ_BA_FLG_S, - 1); - roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_SQ_BA_FLG_S, - 1); - roce_set_bit(context->qp1c_bytes_16, QP1C_BYTES_16_QP1_ERR_S, - 0); - - roce_set_field(context->qp1c_bytes_20, QP1C_BYTES_20_SQ_HEAD_M, - QP1C_BYTES_20_SQ_HEAD_S, hr_qp->sq.head); - roce_set_field(context->qp1c_bytes_20, QP1C_BYTES_20_PKEY_IDX_M, - QP1C_BYTES_20_PKEY_IDX_S, attr->pkey_index); - - context->cur_rq_wqe_ba_l = cpu_to_le32(rq_ba); - - roce_set_field(context->qp1c_bytes_28, - QP1C_BYTES_28_CUR_RQ_WQE_BA_H_M, - QP1C_BYTES_28_CUR_RQ_WQE_BA_H_S, - upper_32_bits(rq_ba)); - roce_set_field(context->qp1c_bytes_28, - QP1C_BYTES_28_RQ_CUR_IDX_M, - QP1C_BYTES_28_RQ_CUR_IDX_S, 0); - - roce_set_field(context->qp1c_bytes_32, - QP1C_BYTES_32_RX_CQ_NUM_M, - QP1C_BYTES_32_RX_CQ_NUM_S, - to_hr_cq(ibqp->recv_cq)->cqn); - roce_set_field(context->qp1c_bytes_32, - QP1C_BYTES_32_TX_CQ_NUM_M, - QP1C_BYTES_32_TX_CQ_NUM_S, - to_hr_cq(ibqp->send_cq)->cqn); - - context->cur_sq_wqe_ba_l = cpu_to_le32(sq_ba); - - roce_set_field(context->qp1c_bytes_40, - QP1C_BYTES_40_CUR_SQ_WQE_BA_H_M, - QP1C_BYTES_40_CUR_SQ_WQE_BA_H_S, - upper_32_bits(sq_ba)); - roce_set_field(context->qp1c_bytes_40, - QP1C_BYTES_40_SQ_CUR_IDX_M, - QP1C_BYTES_40_SQ_CUR_IDX_S, 0); - - /* Copy context to QP1C register */ - addr = (u32 __iomem *)(hr_dev->reg_base + - ROCEE_QP1C_CFG0_0_REG + - hr_qp->phy_port * sizeof(*context)); - - writel(le32_to_cpu(context->qp1c_bytes_4), addr); - writel(le32_to_cpu(context->sq_rq_bt_l), addr + 1); - writel(le32_to_cpu(context->qp1c_bytes_12), addr + 2); - writel(le32_to_cpu(context->qp1c_bytes_16), addr + 3); - writel(le32_to_cpu(context->qp1c_bytes_20), addr + 4); - writel(le32_to_cpu(context->cur_rq_wqe_ba_l), addr + 5); - writel(le32_to_cpu(context->qp1c_bytes_28), addr + 6); - writel(le32_to_cpu(context->qp1c_bytes_32), addr + 7); - writel(le32_to_cpu(context->cur_sq_wqe_ba_l), addr + 8); - writel(le32_to_cpu(context->qp1c_bytes_40), addr + 9); - } - - /* Modify QP1C status */ - reg_val = roce_read(hr_dev, ROCEE_QP1C_CFG0_0_REG + - hr_qp->phy_port * sizeof(*context)); - tmp = cpu_to_le32(reg_val); - roce_set_field(tmp, ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_M, - ROCEE_QP1C_CFG0_0_ROCEE_QP1C_QP_ST_S, new_state); - reg_val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_QP1C_CFG0_0_REG + - hr_qp->phy_port * sizeof(*context), reg_val); - - hr_qp->state = new_state; - if (new_state == IB_QPS_RESET) { - hns_roce_v1_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn, - ibqp->srq ? to_hr_srq(ibqp->srq) : NULL); - if (ibqp->send_cq != ibqp->recv_cq) - hns_roce_v1_cq_clean(to_hr_cq(ibqp->send_cq), - hr_qp->qpn, NULL); - - hr_qp->rq.head = 0; - hr_qp->rq.tail = 0; - hr_qp->sq.head = 0; - hr_qp->sq.tail = 0; - } - - kfree(context); - return 0; - -out: - kfree(context); - return -EINVAL; -} - -static bool check_qp_state(enum ib_qp_state cur_state, - enum ib_qp_state new_state) -{ - static const bool sm[][IB_QPS_ERR + 1] = { - [IB_QPS_RESET] = { [IB_QPS_RESET] = true, - [IB_QPS_INIT] = true }, - [IB_QPS_INIT] = { [IB_QPS_RESET] = true, - [IB_QPS_INIT] = true, - [IB_QPS_RTR] = true, - [IB_QPS_ERR] = true }, - [IB_QPS_RTR] = { [IB_QPS_RESET] = true, - [IB_QPS_RTS] = true, - [IB_QPS_ERR] = true }, - [IB_QPS_RTS] = { [IB_QPS_RESET] = true, [IB_QPS_ERR] = true }, - [IB_QPS_SQD] = {}, - [IB_QPS_SQE] = {}, - [IB_QPS_ERR] = { [IB_QPS_RESET] = true, [IB_QPS_ERR] = true } - }; - - return sm[cur_state][new_state]; -} - -static int hns_roce_v1_m_qp(struct ib_qp *ibqp, const struct ib_qp_attr *attr, - int attr_mask, enum ib_qp_state cur_state, - enum ib_qp_state new_state) -{ - struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); - struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); - struct device *dev = &hr_dev->pdev->dev; - struct hns_roce_qp_context *context; - const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr); - dma_addr_t dma_handle_2 = 0; - dma_addr_t dma_handle = 0; - __le32 doorbell[2] = {0}; - u64 *mtts_2 = NULL; - int ret = -EINVAL; - u64 sq_ba = 0; - u64 rq_ba = 0; - u32 port; - u32 port_num; - u8 *dmac; - u8 *smac; - - if (!check_qp_state(cur_state, new_state)) { - ibdev_err(ibqp->device, - "not support QP(%u) status from %d to %d\n", - ibqp->qp_num, cur_state, new_state); - return -EINVAL; - } - - context = kzalloc(sizeof(*context), GFP_KERNEL); - if (!context) - return -ENOMEM; - - /* Search qp buf's mtts */ - if (find_wqe_mtt(hr_dev, hr_qp, &sq_ba, &rq_ba, &dma_handle)) - goto out; - - /* Search IRRL's mtts */ - mtts_2 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table, - hr_qp->qpn, &dma_handle_2); - if (mtts_2 == NULL) { - dev_err(dev, "qp irrl_table find failed\n"); - goto out; - } - - /* - * Reset to init - * Mandatory param: - * IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT | IB_QP_ACCESS_FLAGS - * Optional param: NA - */ - if (cur_state == IB_QPS_RESET && new_state == IB_QPS_INIT) { - roce_set_field(context->qpc_bytes_4, - QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M, - QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S, - to_hr_qp_type(hr_qp->ibqp.qp_type)); - - roce_set_bit(context->qpc_bytes_4, - QP_CONTEXT_QPC_BYTE_4_ENABLE_FPMR_S, 0); - roce_set_bit(context->qpc_bytes_4, - QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S, - !!(attr->qp_access_flags & IB_ACCESS_REMOTE_READ)); - roce_set_bit(context->qpc_bytes_4, - QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S, - !!(attr->qp_access_flags & IB_ACCESS_REMOTE_WRITE) - ); - roce_set_bit(context->qpc_bytes_4, - QP_CONTEXT_QPC_BYTE_4_ATOMIC_OPERATION_ENABLE_S, - !!(attr->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC) - ); - roce_set_bit(context->qpc_bytes_4, - QP_CONTEXT_QPC_BYTE_4_RDMAR_USE_S, 1); - roce_set_field(context->qpc_bytes_4, - QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_M, - QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S, - ilog2((unsigned int)hr_qp->sq.wqe_cnt)); - roce_set_field(context->qpc_bytes_4, - QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_M, - QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S, - ilog2((unsigned int)hr_qp->rq.wqe_cnt)); - roce_set_field(context->qpc_bytes_4, - QP_CONTEXT_QPC_BYTES_4_PD_M, - QP_CONTEXT_QPC_BYTES_4_PD_S, - to_hr_pd(ibqp->pd)->pdn); - hr_qp->access_flags = attr->qp_access_flags; - roce_set_field(context->qpc_bytes_8, - QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_M, - QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S, - to_hr_cq(ibqp->send_cq)->cqn); - roce_set_field(context->qpc_bytes_8, - QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_M, - QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S, - to_hr_cq(ibqp->recv_cq)->cqn); - - if (ibqp->srq) - roce_set_field(context->qpc_bytes_12, - QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_M, - QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S, - to_hr_srq(ibqp->srq)->srqn); - - roce_set_field(context->qpc_bytes_12, - QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M, - QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S, - attr->pkey_index); - hr_qp->pkey_index = attr->pkey_index; - roce_set_field(context->qpc_bytes_16, - QP_CONTEXT_QPC_BYTES_16_QP_NUM_M, - QP_CONTEXT_QPC_BYTES_16_QP_NUM_S, hr_qp->qpn); - } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) { - roce_set_field(context->qpc_bytes_4, - QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M, - QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S, - to_hr_qp_type(hr_qp->ibqp.qp_type)); - roce_set_bit(context->qpc_bytes_4, - QP_CONTEXT_QPC_BYTE_4_ENABLE_FPMR_S, 0); - if (attr_mask & IB_QP_ACCESS_FLAGS) { - roce_set_bit(context->qpc_bytes_4, - QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S, - !!(attr->qp_access_flags & - IB_ACCESS_REMOTE_READ)); - roce_set_bit(context->qpc_bytes_4, - QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S, - !!(attr->qp_access_flags & - IB_ACCESS_REMOTE_WRITE)); - } else { - roce_set_bit(context->qpc_bytes_4, - QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S, - !!(hr_qp->access_flags & - IB_ACCESS_REMOTE_READ)); - roce_set_bit(context->qpc_bytes_4, - QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S, - !!(hr_qp->access_flags & - IB_ACCESS_REMOTE_WRITE)); - } - - roce_set_bit(context->qpc_bytes_4, - QP_CONTEXT_QPC_BYTE_4_RDMAR_USE_S, 1); - roce_set_field(context->qpc_bytes_4, - QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_M, - QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S, - ilog2((unsigned int)hr_qp->sq.wqe_cnt)); - roce_set_field(context->qpc_bytes_4, - QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_M, - QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S, - ilog2((unsigned int)hr_qp->rq.wqe_cnt)); - roce_set_field(context->qpc_bytes_4, - QP_CONTEXT_QPC_BYTES_4_PD_M, - QP_CONTEXT_QPC_BYTES_4_PD_S, - to_hr_pd(ibqp->pd)->pdn); - - roce_set_field(context->qpc_bytes_8, - QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_M, - QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S, - to_hr_cq(ibqp->send_cq)->cqn); - roce_set_field(context->qpc_bytes_8, - QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_M, - QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S, - to_hr_cq(ibqp->recv_cq)->cqn); - - if (ibqp->srq) - roce_set_field(context->qpc_bytes_12, - QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_M, - QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S, - to_hr_srq(ibqp->srq)->srqn); - if (attr_mask & IB_QP_PKEY_INDEX) - roce_set_field(context->qpc_bytes_12, - QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M, - QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S, - attr->pkey_index); - else - roce_set_field(context->qpc_bytes_12, - QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M, - QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S, - hr_qp->pkey_index); - - roce_set_field(context->qpc_bytes_16, - QP_CONTEXT_QPC_BYTES_16_QP_NUM_M, - QP_CONTEXT_QPC_BYTES_16_QP_NUM_S, hr_qp->qpn); - } else if (cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) { - if ((attr_mask & IB_QP_ALT_PATH) || - (attr_mask & IB_QP_ACCESS_FLAGS) || - (attr_mask & IB_QP_PKEY_INDEX) || - (attr_mask & IB_QP_QKEY)) { - dev_err(dev, "INIT2RTR attr_mask error\n"); - goto out; - } - - dmac = (u8 *)attr->ah_attr.roce.dmac; - - context->sq_rq_bt_l = cpu_to_le32(dma_handle); - roce_set_field(context->qpc_bytes_24, - QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_M, - QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_S, - upper_32_bits(dma_handle)); - roce_set_bit(context->qpc_bytes_24, - QP_CONTEXT_QPC_BYTE_24_REMOTE_ENABLE_E2E_CREDITS_S, - 1); - roce_set_field(context->qpc_bytes_24, - QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_M, - QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S, - attr->min_rnr_timer); - context->irrl_ba_l = cpu_to_le32((u32)(dma_handle_2)); - roce_set_field(context->qpc_bytes_32, - QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_M, - QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_S, - ((u32)(dma_handle_2 >> 32)) & - QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_M); - roce_set_field(context->qpc_bytes_32, - QP_CONTEXT_QPC_BYTES_32_MIG_STATE_M, - QP_CONTEXT_QPC_BYTES_32_MIG_STATE_S, 0); - roce_set_bit(context->qpc_bytes_32, - QP_CONTEXT_QPC_BYTE_32_LOCAL_ENABLE_E2E_CREDITS_S, - 1); - roce_set_bit(context->qpc_bytes_32, - QP_CONTEXT_QPC_BYTE_32_SIGNALING_TYPE_S, - hr_qp->sq_signal_bits); - - port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) : - hr_qp->port; - smac = (u8 *)hr_dev->dev_addr[port]; - /* when dmac equals smac or loop_idc is 1, it should loopback */ - if (ether_addr_equal_unaligned(dmac, smac) || - hr_dev->loop_idc == 0x1) - roce_set_bit(context->qpc_bytes_32, - QP_CONTEXT_QPC_BYTE_32_LOOPBACK_INDICATOR_S, 1); - - roce_set_bit(context->qpc_bytes_32, - QP_CONTEXT_QPC_BYTE_32_GLOBAL_HEADER_S, - rdma_ah_get_ah_flags(&attr->ah_attr)); - roce_set_field(context->qpc_bytes_32, - QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_M, - QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S, - ilog2((unsigned int)attr->max_dest_rd_atomic)); - - if (attr_mask & IB_QP_DEST_QPN) - roce_set_field(context->qpc_bytes_36, - QP_CONTEXT_QPC_BYTES_36_DEST_QP_M, - QP_CONTEXT_QPC_BYTES_36_DEST_QP_S, - attr->dest_qp_num); - - /* Configure GID index */ - port_num = rdma_ah_get_port_num(&attr->ah_attr); - roce_set_field(context->qpc_bytes_36, - QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_M, - QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S, - hns_get_gid_index(hr_dev, - port_num - 1, - grh->sgid_index)); - - memcpy(&(context->dmac_l), dmac, 4); - - roce_set_field(context->qpc_bytes_44, - QP_CONTEXT_QPC_BYTES_44_DMAC_H_M, - QP_CONTEXT_QPC_BYTES_44_DMAC_H_S, - *((u16 *)(&dmac[4]))); - roce_set_field(context->qpc_bytes_44, - QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_M, - QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_S, - rdma_ah_get_static_rate(&attr->ah_attr)); - roce_set_field(context->qpc_bytes_44, - QP_CONTEXT_QPC_BYTES_44_HOPLMT_M, - QP_CONTEXT_QPC_BYTES_44_HOPLMT_S, - grh->hop_limit); - - roce_set_field(context->qpc_bytes_48, - QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_M, - QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S, - grh->flow_label); - roce_set_field(context->qpc_bytes_48, - QP_CONTEXT_QPC_BYTES_48_TCLASS_M, - QP_CONTEXT_QPC_BYTES_48_TCLASS_S, - grh->traffic_class); - roce_set_field(context->qpc_bytes_48, - QP_CONTEXT_QPC_BYTES_48_MTU_M, - QP_CONTEXT_QPC_BYTES_48_MTU_S, attr->path_mtu); - - memcpy(context->dgid, grh->dgid.raw, - sizeof(grh->dgid.raw)); - - dev_dbg(dev, "dmac:%x :%lx\n", context->dmac_l, - roce_get_field(context->qpc_bytes_44, - QP_CONTEXT_QPC_BYTES_44_DMAC_H_M, - QP_CONTEXT_QPC_BYTES_44_DMAC_H_S)); - - roce_set_field(context->qpc_bytes_68, - QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_M, - QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_S, - hr_qp->rq.head); - roce_set_field(context->qpc_bytes_68, - QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_M, - QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_S, 0); - - context->cur_rq_wqe_ba_l = cpu_to_le32(rq_ba); - - roce_set_field(context->qpc_bytes_76, - QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_M, - QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_S, - upper_32_bits(rq_ba)); - roce_set_field(context->qpc_bytes_76, - QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_M, - QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_S, 0); - - context->rx_rnr_time = 0; - - roce_set_field(context->qpc_bytes_84, - QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_M, - QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_S, - attr->rq_psn - 1); - roce_set_field(context->qpc_bytes_84, - QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_M, - QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_S, 0); - - roce_set_field(context->qpc_bytes_88, - QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_M, - QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S, - attr->rq_psn); - roce_set_bit(context->qpc_bytes_88, - QP_CONTEXT_QPC_BYTES_88_RX_REQ_PSN_ERR_FLAG_S, 0); - roce_set_bit(context->qpc_bytes_88, - QP_CONTEXT_QPC_BYTES_88_RX_LAST_OPCODE_FLG_S, 0); - roce_set_field(context->qpc_bytes_88, - QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_M, - QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_S, - 0); - roce_set_field(context->qpc_bytes_88, - QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_M, - QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_S, - 0); - - context->dma_length = 0; - context->r_key = 0; - context->va_l = 0; - context->va_h = 0; - - roce_set_field(context->qpc_bytes_108, - QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_M, - QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_S, 0); - roce_set_bit(context->qpc_bytes_108, - QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_FLG_S, 0); - roce_set_bit(context->qpc_bytes_108, - QP_CONTEXT_QPC_BYTES_108_TRRL_TDB_PSN_FLG_S, 0); - - roce_set_field(context->qpc_bytes_112, - QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_M, - QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_S, 0); - roce_set_field(context->qpc_bytes_112, - QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_M, - QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_S, 0); - - /* For chip resp ack */ - roce_set_field(context->qpc_bytes_156, - QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M, - QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S, - hr_qp->phy_port); - roce_set_field(context->qpc_bytes_156, - QP_CONTEXT_QPC_BYTES_156_SL_M, - QP_CONTEXT_QPC_BYTES_156_SL_S, - rdma_ah_get_sl(&attr->ah_attr)); - hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr); - } else if (cur_state == IB_QPS_RTR && new_state == IB_QPS_RTS) { - /* If exist optional param, return error */ - if ((attr_mask & IB_QP_ALT_PATH) || - (attr_mask & IB_QP_ACCESS_FLAGS) || - (attr_mask & IB_QP_QKEY) || - (attr_mask & IB_QP_PATH_MIG_STATE) || - (attr_mask & IB_QP_CUR_STATE) || - (attr_mask & IB_QP_MIN_RNR_TIMER)) { - dev_err(dev, "RTR2RTS attr_mask error\n"); - goto out; - } - - context->rx_cur_sq_wqe_ba_l = cpu_to_le32(sq_ba); - - roce_set_field(context->qpc_bytes_120, - QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_M, - QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_S, - upper_32_bits(sq_ba)); - - roce_set_field(context->qpc_bytes_124, - QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_M, - QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_S, 0); - roce_set_field(context->qpc_bytes_124, - QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_M, - QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_S, 0); - - roce_set_field(context->qpc_bytes_128, - QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_M, - QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_S, - attr->sq_psn); - roce_set_bit(context->qpc_bytes_128, - QP_CONTEXT_QPC_BYTES_128_RX_ACK_PSN_ERR_FLG_S, 0); - roce_set_field(context->qpc_bytes_128, - QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_M, - QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_S, - 0); - roce_set_bit(context->qpc_bytes_128, - QP_CONTEXT_QPC_BYTES_128_IRRL_PSN_VLD_FLG_S, 0); - - roce_set_field(context->qpc_bytes_132, - QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_M, - QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_S, 0); - roce_set_field(context->qpc_bytes_132, - QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_M, - QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_S, 0); - - roce_set_field(context->qpc_bytes_136, - QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_M, - QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_S, - attr->sq_psn); - roce_set_field(context->qpc_bytes_136, - QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_M, - QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_S, - attr->sq_psn); - - roce_set_field(context->qpc_bytes_140, - QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_M, - QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_S, - (attr->sq_psn >> SQ_PSN_SHIFT)); - roce_set_field(context->qpc_bytes_140, - QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_M, - QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_S, 0); - roce_set_bit(context->qpc_bytes_140, - QP_CONTEXT_QPC_BYTES_140_RNR_RETRY_FLG_S, 0); - - roce_set_field(context->qpc_bytes_148, - QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_M, - QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_S, 0); - roce_set_field(context->qpc_bytes_148, - QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M, - QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S, - attr->retry_cnt); - roce_set_field(context->qpc_bytes_148, - QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_M, - QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_S, - attr->rnr_retry); - roce_set_field(context->qpc_bytes_148, - QP_CONTEXT_QPC_BYTES_148_LSN_M, - QP_CONTEXT_QPC_BYTES_148_LSN_S, 0x100); - - context->rnr_retry = 0; - - roce_set_field(context->qpc_bytes_156, - QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_M, - QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_S, - attr->retry_cnt); - if (attr->timeout < 0x12) { - dev_info(dev, "ack timeout value(0x%x) must bigger than 0x12.\n", - attr->timeout); - roce_set_field(context->qpc_bytes_156, - QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M, - QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S, - 0x12); - } else { - roce_set_field(context->qpc_bytes_156, - QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M, - QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S, - attr->timeout); - } - roce_set_field(context->qpc_bytes_156, - QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_M, - QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_S, - attr->rnr_retry); - roce_set_field(context->qpc_bytes_156, - QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M, - QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S, - hr_qp->phy_port); - roce_set_field(context->qpc_bytes_156, - QP_CONTEXT_QPC_BYTES_156_SL_M, - QP_CONTEXT_QPC_BYTES_156_SL_S, - rdma_ah_get_sl(&attr->ah_attr)); - hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr); - roce_set_field(context->qpc_bytes_156, - QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M, - QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S, - ilog2((unsigned int)attr->max_rd_atomic)); - roce_set_field(context->qpc_bytes_156, - QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_M, - QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_S, 0); - context->pkt_use_len = 0; - - roce_set_field(context->qpc_bytes_164, - QP_CONTEXT_QPC_BYTES_164_SQ_PSN_M, - QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S, attr->sq_psn); - roce_set_field(context->qpc_bytes_164, - QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_M, - QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_S, 0); - - roce_set_field(context->qpc_bytes_168, - QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_M, - QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_S, - attr->sq_psn); - roce_set_field(context->qpc_bytes_168, - QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_M, - QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_S, 0); - roce_set_field(context->qpc_bytes_168, - QP_CONTEXT_QPC_BYTES_168_DB_TYPE_M, - QP_CONTEXT_QPC_BYTES_168_DB_TYPE_S, 0); - roce_set_bit(context->qpc_bytes_168, - QP_CONTEXT_QPC_BYTES_168_MSG_LP_IND_S, 0); - roce_set_bit(context->qpc_bytes_168, - QP_CONTEXT_QPC_BYTES_168_CSDB_LP_IND_S, 0); - roce_set_bit(context->qpc_bytes_168, - QP_CONTEXT_QPC_BYTES_168_QP_ERR_FLG_S, 0); - context->sge_use_len = 0; - - roce_set_field(context->qpc_bytes_176, - QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_M, - QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_S, 0); - roce_set_field(context->qpc_bytes_176, - QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_M, - QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_S, - 0); - roce_set_field(context->qpc_bytes_180, - QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_M, - QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_S, 0); - roce_set_field(context->qpc_bytes_180, - QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_M, - QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_S, 0); - - context->tx_cur_sq_wqe_ba_l = cpu_to_le32(sq_ba); - - roce_set_field(context->qpc_bytes_188, - QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_M, - QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_S, - upper_32_bits(sq_ba)); - roce_set_bit(context->qpc_bytes_188, - QP_CONTEXT_QPC_BYTES_188_PKT_RETRY_FLG_S, 0); - roce_set_field(context->qpc_bytes_188, - QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_M, - QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S, - 0); - } - - /* Every status migrate must change state */ - roce_set_field(context->qpc_bytes_144, - QP_CONTEXT_QPC_BYTES_144_QP_STATE_M, - QP_CONTEXT_QPC_BYTES_144_QP_STATE_S, new_state); - - /* SW pass context to HW */ - ret = hns_roce_v1_qp_modify(hr_dev, to_hns_roce_state(cur_state), - to_hns_roce_state(new_state), context, - hr_qp); - if (ret) { - dev_err(dev, "hns_roce_qp_modify failed\n"); - goto out; - } - - /* - * Use rst2init to instead of init2init with drv, - * need to hw to flash RQ HEAD by DB again - */ - if (cur_state == IB_QPS_INIT && new_state == IB_QPS_INIT) { - roce_set_field(doorbell[0], RQ_DOORBELL_U32_4_RQ_HEAD_M, - RQ_DOORBELL_U32_4_RQ_HEAD_S, hr_qp->rq.head); - roce_set_field(doorbell[1], RQ_DOORBELL_U32_8_QPN_M, - RQ_DOORBELL_U32_8_QPN_S, hr_qp->qpn); - roce_set_field(doorbell[1], RQ_DOORBELL_U32_8_CMD_M, - RQ_DOORBELL_U32_8_CMD_S, 1); - roce_set_bit(doorbell[1], RQ_DOORBELL_U32_8_HW_SYNC_S, 1); - - if (ibqp->uobject) { - hr_qp->rq.db_reg = hr_dev->reg_base + - hr_dev->odb_offset + - DB_REG_OFFSET * hr_dev->priv_uar.index; - } - - hns_roce_write64_k(doorbell, hr_qp->rq.db_reg); - } - - hr_qp->state = new_state; - - if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) - hr_qp->resp_depth = attr->max_dest_rd_atomic; - if (attr_mask & IB_QP_PORT) { - hr_qp->port = attr->port_num - 1; - hr_qp->phy_port = hr_dev->iboe.phy_port[hr_qp->port]; - } - - if (new_state == IB_QPS_RESET && !ibqp->uobject) { - hns_roce_v1_cq_clean(to_hr_cq(ibqp->recv_cq), hr_qp->qpn, - ibqp->srq ? to_hr_srq(ibqp->srq) : NULL); - if (ibqp->send_cq != ibqp->recv_cq) - hns_roce_v1_cq_clean(to_hr_cq(ibqp->send_cq), - hr_qp->qpn, NULL); - - hr_qp->rq.head = 0; - hr_qp->rq.tail = 0; - hr_qp->sq.head = 0; - hr_qp->sq.tail = 0; - } -out: - kfree(context); - return ret; -} - -static int hns_roce_v1_modify_qp(struct ib_qp *ibqp, - const struct ib_qp_attr *attr, int attr_mask, - enum ib_qp_state cur_state, - enum ib_qp_state new_state) -{ - if (attr_mask & ~IB_QP_ATTR_STANDARD_BITS) - return -EOPNOTSUPP; - - if (ibqp->qp_type == IB_QPT_GSI || ibqp->qp_type == IB_QPT_SMI) - return hns_roce_v1_m_sqp(ibqp, attr, attr_mask, cur_state, - new_state); - else - return hns_roce_v1_m_qp(ibqp, attr, attr_mask, cur_state, - new_state); -} - -static enum ib_qp_state to_ib_qp_state(enum hns_roce_qp_state state) -{ - switch (state) { - case HNS_ROCE_QP_STATE_RST: - return IB_QPS_RESET; - case HNS_ROCE_QP_STATE_INIT: - return IB_QPS_INIT; - case HNS_ROCE_QP_STATE_RTR: - return IB_QPS_RTR; - case HNS_ROCE_QP_STATE_RTS: - return IB_QPS_RTS; - case HNS_ROCE_QP_STATE_SQD: - return IB_QPS_SQD; - case HNS_ROCE_QP_STATE_ERR: - return IB_QPS_ERR; - default: - return IB_QPS_ERR; - } -} - -static int hns_roce_v1_query_qpc(struct hns_roce_dev *hr_dev, - struct hns_roce_qp *hr_qp, - struct hns_roce_qp_context *hr_context) -{ - struct hns_roce_cmd_mailbox *mailbox; - int ret; - - mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); - if (IS_ERR(mailbox)) - return PTR_ERR(mailbox); - - ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, hr_qp->qpn, 0, - HNS_ROCE_CMD_QUERY_QP, - HNS_ROCE_CMD_TIMEOUT_MSECS); - if (!ret) - memcpy(hr_context, mailbox->buf, sizeof(*hr_context)); - else - dev_err(&hr_dev->pdev->dev, "QUERY QP cmd process error\n"); - - hns_roce_free_cmd_mailbox(hr_dev, mailbox); - - return ret; -} - -static int hns_roce_v1_q_sqp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, - int qp_attr_mask, - struct ib_qp_init_attr *qp_init_attr) -{ - struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); - struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); - struct hns_roce_sqp_context context; - u32 addr; - - mutex_lock(&hr_qp->mutex); - - if (hr_qp->state == IB_QPS_RESET) { - qp_attr->qp_state = IB_QPS_RESET; - goto done; - } - - addr = ROCEE_QP1C_CFG0_0_REG + - hr_qp->port * sizeof(struct hns_roce_sqp_context); - context.qp1c_bytes_4 = cpu_to_le32(roce_read(hr_dev, addr)); - context.sq_rq_bt_l = cpu_to_le32(roce_read(hr_dev, addr + 1)); - context.qp1c_bytes_12 = cpu_to_le32(roce_read(hr_dev, addr + 2)); - context.qp1c_bytes_16 = cpu_to_le32(roce_read(hr_dev, addr + 3)); - context.qp1c_bytes_20 = cpu_to_le32(roce_read(hr_dev, addr + 4)); - context.cur_rq_wqe_ba_l = cpu_to_le32(roce_read(hr_dev, addr + 5)); - context.qp1c_bytes_28 = cpu_to_le32(roce_read(hr_dev, addr + 6)); - context.qp1c_bytes_32 = cpu_to_le32(roce_read(hr_dev, addr + 7)); - context.cur_sq_wqe_ba_l = cpu_to_le32(roce_read(hr_dev, addr + 8)); - context.qp1c_bytes_40 = cpu_to_le32(roce_read(hr_dev, addr + 9)); - - hr_qp->state = roce_get_field(context.qp1c_bytes_4, - QP1C_BYTES_4_QP_STATE_M, - QP1C_BYTES_4_QP_STATE_S); - qp_attr->qp_state = hr_qp->state; - qp_attr->path_mtu = IB_MTU_256; - qp_attr->path_mig_state = IB_MIG_ARMED; - qp_attr->qkey = QKEY_VAL; - qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE; - qp_attr->rq_psn = 0; - qp_attr->sq_psn = 0; - qp_attr->dest_qp_num = 1; - qp_attr->qp_access_flags = 6; - - qp_attr->pkey_index = roce_get_field(context.qp1c_bytes_20, - QP1C_BYTES_20_PKEY_IDX_M, - QP1C_BYTES_20_PKEY_IDX_S); - qp_attr->port_num = hr_qp->port + 1; - qp_attr->sq_draining = 0; - qp_attr->max_rd_atomic = 0; - qp_attr->max_dest_rd_atomic = 0; - qp_attr->min_rnr_timer = 0; - qp_attr->timeout = 0; - qp_attr->retry_cnt = 0; - qp_attr->rnr_retry = 0; - qp_attr->alt_timeout = 0; - -done: - qp_attr->cur_qp_state = qp_attr->qp_state; - qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt; - qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs; - qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt; - qp_attr->cap.max_send_sge = hr_qp->sq.max_gs; - qp_attr->cap.max_inline_data = 0; - qp_init_attr->cap = qp_attr->cap; - qp_init_attr->create_flags = 0; - - mutex_unlock(&hr_qp->mutex); - - return 0; -} - -static int hns_roce_v1_q_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, - int qp_attr_mask, - struct ib_qp_init_attr *qp_init_attr) -{ - struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); - struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); - struct device *dev = &hr_dev->pdev->dev; - struct hns_roce_qp_context *context; - int tmp_qp_state; - int ret = 0; - int state; - - context = kzalloc(sizeof(*context), GFP_KERNEL); - if (!context) - return -ENOMEM; - - memset(qp_attr, 0, sizeof(*qp_attr)); - memset(qp_init_attr, 0, sizeof(*qp_init_attr)); - - mutex_lock(&hr_qp->mutex); - - if (hr_qp->state == IB_QPS_RESET) { - qp_attr->qp_state = IB_QPS_RESET; - goto done; - } - - ret = hns_roce_v1_query_qpc(hr_dev, hr_qp, context); - if (ret) { - dev_err(dev, "query qpc error\n"); - ret = -EINVAL; - goto out; - } - - state = roce_get_field(context->qpc_bytes_144, - QP_CONTEXT_QPC_BYTES_144_QP_STATE_M, - QP_CONTEXT_QPC_BYTES_144_QP_STATE_S); - tmp_qp_state = (int)to_ib_qp_state((enum hns_roce_qp_state)state); - if (tmp_qp_state == -1) { - dev_err(dev, "to_ib_qp_state error\n"); - ret = -EINVAL; - goto out; - } - hr_qp->state = (u8)tmp_qp_state; - qp_attr->qp_state = (enum ib_qp_state)hr_qp->state; - qp_attr->path_mtu = (enum ib_mtu)roce_get_field(context->qpc_bytes_48, - QP_CONTEXT_QPC_BYTES_48_MTU_M, - QP_CONTEXT_QPC_BYTES_48_MTU_S); - qp_attr->path_mig_state = IB_MIG_ARMED; - qp_attr->ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE; - if (hr_qp->ibqp.qp_type == IB_QPT_UD) - qp_attr->qkey = QKEY_VAL; - - qp_attr->rq_psn = roce_get_field(context->qpc_bytes_88, - QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_M, - QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S); - qp_attr->sq_psn = (u32)roce_get_field(context->qpc_bytes_164, - QP_CONTEXT_QPC_BYTES_164_SQ_PSN_M, - QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S); - qp_attr->dest_qp_num = (u8)roce_get_field(context->qpc_bytes_36, - QP_CONTEXT_QPC_BYTES_36_DEST_QP_M, - QP_CONTEXT_QPC_BYTES_36_DEST_QP_S); - qp_attr->qp_access_flags = ((roce_get_bit(context->qpc_bytes_4, - QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S)) << 2) | - ((roce_get_bit(context->qpc_bytes_4, - QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S)) << 1) | - ((roce_get_bit(context->qpc_bytes_4, - QP_CONTEXT_QPC_BYTE_4_ATOMIC_OPERATION_ENABLE_S)) << 3); - - if (hr_qp->ibqp.qp_type == IB_QPT_RC) { - struct ib_global_route *grh = - rdma_ah_retrieve_grh(&qp_attr->ah_attr); - - rdma_ah_set_sl(&qp_attr->ah_attr, - roce_get_field(context->qpc_bytes_156, - QP_CONTEXT_QPC_BYTES_156_SL_M, - QP_CONTEXT_QPC_BYTES_156_SL_S)); - rdma_ah_set_ah_flags(&qp_attr->ah_attr, IB_AH_GRH); - grh->flow_label = - roce_get_field(context->qpc_bytes_48, - QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_M, - QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S); - grh->sgid_index = - roce_get_field(context->qpc_bytes_36, - QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_M, - QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S); - grh->hop_limit = - roce_get_field(context->qpc_bytes_44, - QP_CONTEXT_QPC_BYTES_44_HOPLMT_M, - QP_CONTEXT_QPC_BYTES_44_HOPLMT_S); - grh->traffic_class = - roce_get_field(context->qpc_bytes_48, - QP_CONTEXT_QPC_BYTES_48_TCLASS_M, - QP_CONTEXT_QPC_BYTES_48_TCLASS_S); - - memcpy(grh->dgid.raw, context->dgid, - sizeof(grh->dgid.raw)); - } - - qp_attr->pkey_index = roce_get_field(context->qpc_bytes_12, - QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M, - QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S); - qp_attr->port_num = hr_qp->port + 1; - qp_attr->sq_draining = 0; - qp_attr->max_rd_atomic = 1 << roce_get_field(context->qpc_bytes_156, - QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M, - QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S); - qp_attr->max_dest_rd_atomic = 1 << roce_get_field(context->qpc_bytes_32, - QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_M, - QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S); - qp_attr->min_rnr_timer = (u8)(roce_get_field(context->qpc_bytes_24, - QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_M, - QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S)); - qp_attr->timeout = (u8)(roce_get_field(context->qpc_bytes_156, - QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M, - QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S)); - qp_attr->retry_cnt = roce_get_field(context->qpc_bytes_148, - QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M, - QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S); - qp_attr->rnr_retry = (u8)le32_to_cpu(context->rnr_retry); - -done: - qp_attr->cur_qp_state = qp_attr->qp_state; - qp_attr->cap.max_recv_wr = hr_qp->rq.wqe_cnt; - qp_attr->cap.max_recv_sge = hr_qp->rq.max_gs; - - if (!ibqp->uobject) { - qp_attr->cap.max_send_wr = hr_qp->sq.wqe_cnt; - qp_attr->cap.max_send_sge = hr_qp->sq.max_gs; - } else { - qp_attr->cap.max_send_wr = 0; - qp_attr->cap.max_send_sge = 0; - } - - qp_init_attr->cap = qp_attr->cap; - -out: - mutex_unlock(&hr_qp->mutex); - kfree(context); - return ret; -} - -static int hns_roce_v1_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, - int qp_attr_mask, - struct ib_qp_init_attr *qp_init_attr) -{ - struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); - - return hr_qp->doorbell_qpn <= 1 ? - hns_roce_v1_q_sqp(ibqp, qp_attr, qp_attr_mask, qp_init_attr) : - hns_roce_v1_q_qp(ibqp, qp_attr, qp_attr_mask, qp_init_attr); -} - -int hns_roce_v1_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) -{ - struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); - struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); - struct hns_roce_cq *send_cq, *recv_cq; - int ret; - - ret = hns_roce_v1_modify_qp(ibqp, NULL, 0, hr_qp->state, IB_QPS_RESET); - if (ret) - return ret; - - send_cq = hr_qp->ibqp.send_cq ? to_hr_cq(hr_qp->ibqp.send_cq) : NULL; - recv_cq = hr_qp->ibqp.recv_cq ? to_hr_cq(hr_qp->ibqp.recv_cq) : NULL; - - hns_roce_lock_cqs(send_cq, recv_cq); - if (!udata) { - if (recv_cq) - __hns_roce_v1_cq_clean(recv_cq, hr_qp->qpn, - (hr_qp->ibqp.srq ? - to_hr_srq(hr_qp->ibqp.srq) : - NULL)); - - if (send_cq && send_cq != recv_cq) - __hns_roce_v1_cq_clean(send_cq, hr_qp->qpn, NULL); - } - hns_roce_qp_remove(hr_dev, hr_qp); - hns_roce_unlock_cqs(send_cq, recv_cq); - - hns_roce_qp_destroy(hr_dev, hr_qp, udata); - - return 0; -} - -static int hns_roce_v1_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) -{ - struct hns_roce_dev *hr_dev = to_hr_dev(ibcq->device); - struct hns_roce_cq *hr_cq = to_hr_cq(ibcq); - struct device *dev = &hr_dev->pdev->dev; - u32 cqe_cnt_ori; - u32 cqe_cnt_cur; - int wait_time = 0; - - /* - * Before freeing cq buffer, we need to ensure that the outstanding CQE - * have been written by checking the CQE counter. - */ - cqe_cnt_ori = roce_read(hr_dev, ROCEE_SCAEP_WR_CQE_CNT); - while (1) { - if (roce_read(hr_dev, ROCEE_CAEP_CQE_WCMD_EMPTY) & - HNS_ROCE_CQE_WCMD_EMPTY_BIT) - break; - - cqe_cnt_cur = roce_read(hr_dev, ROCEE_SCAEP_WR_CQE_CNT); - if ((cqe_cnt_cur - cqe_cnt_ori) >= HNS_ROCE_MIN_CQE_CNT) - break; - - msleep(HNS_ROCE_EACH_FREE_CQ_WAIT_MSECS); - if (wait_time > HNS_ROCE_MAX_FREE_CQ_WAIT_CNT) { - dev_warn(dev, "Destroy cq 0x%lx timeout!\n", - hr_cq->cqn); - break; - } - wait_time++; - } - return 0; -} - -static void set_eq_cons_index_v1(struct hns_roce_eq *eq, u32 req_not) -{ - roce_raw_write((eq->cons_index & HNS_ROCE_V1_CONS_IDX_M) | - (req_not << eq->log_entries), eq->db_reg); -} - -static void hns_roce_v1_wq_catas_err_handle(struct hns_roce_dev *hr_dev, - struct hns_roce_aeqe *aeqe, int qpn) -{ - struct device *dev = &hr_dev->pdev->dev; - - dev_warn(dev, "Local Work Queue Catastrophic Error.\n"); - switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M, - HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) { - case HNS_ROCE_LWQCE_QPC_ERROR: - dev_warn(dev, "QP %d, QPC error.\n", qpn); - break; - case HNS_ROCE_LWQCE_MTU_ERROR: - dev_warn(dev, "QP %d, MTU error.\n", qpn); - break; - case HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR: - dev_warn(dev, "QP %d, WQE BA addr error.\n", qpn); - break; - case HNS_ROCE_LWQCE_WQE_ADDR_ERROR: - dev_warn(dev, "QP %d, WQE addr error.\n", qpn); - break; - case HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR: - dev_warn(dev, "QP %d, WQE shift error\n", qpn); - break; - case HNS_ROCE_LWQCE_SL_ERROR: - dev_warn(dev, "QP %d, SL error.\n", qpn); - break; - case HNS_ROCE_LWQCE_PORT_ERROR: - dev_warn(dev, "QP %d, port error.\n", qpn); - break; - default: - break; - } -} - -static void hns_roce_v1_local_wq_access_err_handle(struct hns_roce_dev *hr_dev, - struct hns_roce_aeqe *aeqe, - int qpn) -{ - struct device *dev = &hr_dev->pdev->dev; - - dev_warn(dev, "Local Access Violation Work Queue Error.\n"); - switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M, - HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) { - case HNS_ROCE_LAVWQE_R_KEY_VIOLATION: - dev_warn(dev, "QP %d, R_key violation.\n", qpn); - break; - case HNS_ROCE_LAVWQE_LENGTH_ERROR: - dev_warn(dev, "QP %d, length error.\n", qpn); - break; - case HNS_ROCE_LAVWQE_VA_ERROR: - dev_warn(dev, "QP %d, VA error.\n", qpn); - break; - case HNS_ROCE_LAVWQE_PD_ERROR: - dev_err(dev, "QP %d, PD error.\n", qpn); - break; - case HNS_ROCE_LAVWQE_RW_ACC_ERROR: - dev_warn(dev, "QP %d, rw acc error.\n", qpn); - break; - case HNS_ROCE_LAVWQE_KEY_STATE_ERROR: - dev_warn(dev, "QP %d, key state error.\n", qpn); - break; - case HNS_ROCE_LAVWQE_MR_OPERATION_ERROR: - dev_warn(dev, "QP %d, MR operation error.\n", qpn); - break; - default: - break; - } -} - -static void hns_roce_v1_qp_err_handle(struct hns_roce_dev *hr_dev, - struct hns_roce_aeqe *aeqe, - int event_type) -{ - struct device *dev = &hr_dev->pdev->dev; - int phy_port; - int qpn; - - qpn = roce_get_field(aeqe->event.queue_event.num, - HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M, - HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S); - phy_port = roce_get_field(aeqe->event.queue_event.num, - HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M, - HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S); - if (qpn <= 1) - qpn = HNS_ROCE_MAX_PORTS * qpn + phy_port; - - switch (event_type) { - case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR: - dev_warn(dev, "Invalid Req Local Work Queue Error.\n" - "QP %d, phy_port %d.\n", qpn, phy_port); - break; - case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR: - hns_roce_v1_wq_catas_err_handle(hr_dev, aeqe, qpn); - break; - case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR: - hns_roce_v1_local_wq_access_err_handle(hr_dev, aeqe, qpn); - break; - default: - break; - } - - hns_roce_qp_event(hr_dev, qpn, event_type); -} - -static void hns_roce_v1_cq_err_handle(struct hns_roce_dev *hr_dev, - struct hns_roce_aeqe *aeqe, - int event_type) -{ - struct device *dev = &hr_dev->pdev->dev; - u32 cqn; - - cqn = roce_get_field(aeqe->event.queue_event.num, - HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M, - HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S); - - switch (event_type) { - case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR: - dev_warn(dev, "CQ 0x%x access err.\n", cqn); - break; - case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW: - dev_warn(dev, "CQ 0x%x overflow\n", cqn); - break; - case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID: - dev_warn(dev, "CQ 0x%x ID invalid.\n", cqn); - break; - default: - break; - } - - hns_roce_cq_event(hr_dev, cqn, event_type); -} - -static void hns_roce_v1_db_overflow_handle(struct hns_roce_dev *hr_dev, - struct hns_roce_aeqe *aeqe) -{ - struct device *dev = &hr_dev->pdev->dev; - - switch (roce_get_field(aeqe->asyn, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M, - HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S)) { - case HNS_ROCE_DB_SUBTYPE_SDB_OVF: - dev_warn(dev, "SDB overflow.\n"); - break; - case HNS_ROCE_DB_SUBTYPE_SDB_ALM_OVF: - dev_warn(dev, "SDB almost overflow.\n"); - break; - case HNS_ROCE_DB_SUBTYPE_SDB_ALM_EMP: - dev_warn(dev, "SDB almost empty.\n"); - break; - case HNS_ROCE_DB_SUBTYPE_ODB_OVF: - dev_warn(dev, "ODB overflow.\n"); - break; - case HNS_ROCE_DB_SUBTYPE_ODB_ALM_OVF: - dev_warn(dev, "ODB almost overflow.\n"); - break; - case HNS_ROCE_DB_SUBTYPE_ODB_ALM_EMP: - dev_warn(dev, "SDB almost empty.\n"); - break; - default: - break; - } -} - -static struct hns_roce_aeqe *get_aeqe_v1(struct hns_roce_eq *eq, u32 entry) -{ - unsigned long off = (entry & (eq->entries - 1)) * HNS_ROCE_AEQE_SIZE; - - return (struct hns_roce_aeqe *)((u8 *) - (eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) + - off % HNS_ROCE_BA_SIZE); -} - -static struct hns_roce_aeqe *next_aeqe_sw_v1(struct hns_roce_eq *eq) -{ - struct hns_roce_aeqe *aeqe = get_aeqe_v1(eq, eq->cons_index); - - return (roce_get_bit(aeqe->asyn, HNS_ROCE_AEQE_U32_4_OWNER_S) ^ - !!(eq->cons_index & eq->entries)) ? aeqe : NULL; -} - -static int hns_roce_v1_aeq_int(struct hns_roce_dev *hr_dev, - struct hns_roce_eq *eq) -{ - struct device *dev = &hr_dev->pdev->dev; - struct hns_roce_aeqe *aeqe; - int aeqes_found = 0; - int event_type; - - while ((aeqe = next_aeqe_sw_v1(eq))) { - /* Make sure we read the AEQ entry after we have checked the - * ownership bit - */ - dma_rmb(); - - dev_dbg(dev, "aeqe = %pK, aeqe->asyn.event_type = 0x%lx\n", - aeqe, - roce_get_field(aeqe->asyn, - HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M, - HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S)); - event_type = roce_get_field(aeqe->asyn, - HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M, - HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S); - switch (event_type) { - case HNS_ROCE_EVENT_TYPE_PATH_MIG: - dev_warn(dev, "PATH MIG not supported\n"); - break; - case HNS_ROCE_EVENT_TYPE_COMM_EST: - dev_warn(dev, "COMMUNICATION established\n"); - break; - case HNS_ROCE_EVENT_TYPE_SQ_DRAINED: - dev_warn(dev, "SQ DRAINED not supported\n"); - break; - case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED: - dev_warn(dev, "PATH MIG failed\n"); - break; - case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR: - case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR: - case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR: - hns_roce_v1_qp_err_handle(hr_dev, aeqe, event_type); - break; - case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH: - case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR: - case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH: - dev_warn(dev, "SRQ not support!\n"); - break; - case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR: - case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW: - case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID: - hns_roce_v1_cq_err_handle(hr_dev, aeqe, event_type); - break; - case HNS_ROCE_EVENT_TYPE_PORT_CHANGE: - dev_warn(dev, "port change.\n"); - break; - case HNS_ROCE_EVENT_TYPE_MB: - hns_roce_cmd_event(hr_dev, - le16_to_cpu(aeqe->event.cmd.token), - aeqe->event.cmd.status, - le64_to_cpu(aeqe->event.cmd.out_param - )); - break; - case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW: - hns_roce_v1_db_overflow_handle(hr_dev, aeqe); - break; - default: - dev_warn(dev, "Unhandled event %d on EQ %d at idx %u.\n", - event_type, eq->eqn, eq->cons_index); - break; - } - - eq->cons_index++; - aeqes_found = 1; - - if (eq->cons_index > 2 * hr_dev->caps.aeqe_depth - 1) - eq->cons_index = 0; - } - - set_eq_cons_index_v1(eq, 0); - - return aeqes_found; -} - -static struct hns_roce_ceqe *get_ceqe_v1(struct hns_roce_eq *eq, u32 entry) -{ - unsigned long off = (entry & (eq->entries - 1)) * HNS_ROCE_CEQE_SIZE; - - return (struct hns_roce_ceqe *)((u8 *) - (eq->buf_list[off / HNS_ROCE_BA_SIZE].buf) + - off % HNS_ROCE_BA_SIZE); -} - -static struct hns_roce_ceqe *next_ceqe_sw_v1(struct hns_roce_eq *eq) -{ - struct hns_roce_ceqe *ceqe = get_ceqe_v1(eq, eq->cons_index); - - return (!!(roce_get_bit(ceqe->comp, - HNS_ROCE_CEQE_CEQE_COMP_OWNER_S))) ^ - (!!(eq->cons_index & eq->entries)) ? ceqe : NULL; -} - -static int hns_roce_v1_ceq_int(struct hns_roce_dev *hr_dev, - struct hns_roce_eq *eq) -{ - struct hns_roce_ceqe *ceqe; - int ceqes_found = 0; - u32 cqn; - - while ((ceqe = next_ceqe_sw_v1(eq))) { - /* Make sure we read CEQ entry after we have checked the - * ownership bit - */ - dma_rmb(); - - cqn = roce_get_field(ceqe->comp, - HNS_ROCE_CEQE_CEQE_COMP_CQN_M, - HNS_ROCE_CEQE_CEQE_COMP_CQN_S); - hns_roce_cq_completion(hr_dev, cqn); - - ++eq->cons_index; - ceqes_found = 1; - - if (eq->cons_index > - EQ_DEPTH_COEFF * hr_dev->caps.ceqe_depth - 1) - eq->cons_index = 0; - } - - set_eq_cons_index_v1(eq, 0); - - return ceqes_found; -} - -static irqreturn_t hns_roce_v1_msix_interrupt_eq(int irq, void *eq_ptr) -{ - struct hns_roce_eq *eq = eq_ptr; - struct hns_roce_dev *hr_dev = eq->hr_dev; - int int_work; - - if (eq->type_flag == HNS_ROCE_CEQ) - /* CEQ irq routine, CEQ is pulse irq, not clear */ - int_work = hns_roce_v1_ceq_int(hr_dev, eq); - else - /* AEQ irq routine, AEQ is pulse irq, not clear */ - int_work = hns_roce_v1_aeq_int(hr_dev, eq); - - return IRQ_RETVAL(int_work); -} - -static irqreturn_t hns_roce_v1_msix_interrupt_abn(int irq, void *dev_id) -{ - struct hns_roce_dev *hr_dev = dev_id; - struct device *dev = &hr_dev->pdev->dev; - int int_work = 0; - u32 caepaemask_val; - u32 cealmovf_val; - u32 caepaest_val; - u32 aeshift_val; - u32 ceshift_val; - u32 cemask_val; - __le32 tmp; - int i; - - /* - * Abnormal interrupt: - * AEQ overflow, ECC multi-bit err, CEQ overflow must clear - * interrupt, mask irq, clear irq, cancel mask operation - */ - aeshift_val = roce_read(hr_dev, ROCEE_CAEP_AEQC_AEQE_SHIFT_REG); - tmp = cpu_to_le32(aeshift_val); - - /* AEQE overflow */ - if (roce_get_bit(tmp, - ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQ_ALM_OVF_INT_ST_S) == 1) { - dev_warn(dev, "AEQ overflow!\n"); - - /* Set mask */ - caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG); - tmp = cpu_to_le32(caepaemask_val); - roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S, - HNS_ROCE_INT_MASK_ENABLE); - caepaemask_val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val); - - /* Clear int state(INT_WC : write 1 clear) */ - caepaest_val = roce_read(hr_dev, ROCEE_CAEP_AE_ST_REG); - tmp = cpu_to_le32(caepaest_val); - roce_set_bit(tmp, ROCEE_CAEP_AE_ST_CAEP_AEQ_ALM_OVF_S, 1); - caepaest_val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_CAEP_AE_ST_REG, caepaest_val); - - /* Clear mask */ - caepaemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG); - tmp = cpu_to_le32(caepaemask_val); - roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S, - HNS_ROCE_INT_MASK_DISABLE); - caepaemask_val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, caepaemask_val); - } - - /* CEQ almost overflow */ - for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) { - ceshift_val = roce_read(hr_dev, ROCEE_CAEP_CEQC_SHIFT_0_REG + - i * CEQ_REG_OFFSET); - tmp = cpu_to_le32(ceshift_val); - - if (roce_get_bit(tmp, - ROCEE_CAEP_CEQC_SHIFT_CAEP_CEQ_ALM_OVF_INT_ST_S) == 1) { - dev_warn(dev, "CEQ[%d] almost overflow!\n", i); - int_work++; - - /* Set mask */ - cemask_val = roce_read(hr_dev, - ROCEE_CAEP_CE_IRQ_MASK_0_REG + - i * CEQ_REG_OFFSET); - tmp = cpu_to_le32(cemask_val); - roce_set_bit(tmp, - ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S, - HNS_ROCE_INT_MASK_ENABLE); - cemask_val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG + - i * CEQ_REG_OFFSET, cemask_val); - - /* Clear int state(INT_WC : write 1 clear) */ - cealmovf_val = roce_read(hr_dev, - ROCEE_CAEP_CEQ_ALM_OVF_0_REG + - i * CEQ_REG_OFFSET); - tmp = cpu_to_le32(cealmovf_val); - roce_set_bit(tmp, - ROCEE_CAEP_CEQ_ALM_OVF_CAEP_CEQ_ALM_OVF_S, - 1); - cealmovf_val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_CAEP_CEQ_ALM_OVF_0_REG + - i * CEQ_REG_OFFSET, cealmovf_val); - - /* Clear mask */ - cemask_val = roce_read(hr_dev, - ROCEE_CAEP_CE_IRQ_MASK_0_REG + - i * CEQ_REG_OFFSET); - tmp = cpu_to_le32(cemask_val); - roce_set_bit(tmp, - ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S, - HNS_ROCE_INT_MASK_DISABLE); - cemask_val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG + - i * CEQ_REG_OFFSET, cemask_val); - } - } - - /* ECC multi-bit error alarm */ - dev_warn(dev, "ECC UCERR ALARM: 0x%x, 0x%x, 0x%x\n", - roce_read(hr_dev, ROCEE_ECC_UCERR_ALM0_REG), - roce_read(hr_dev, ROCEE_ECC_UCERR_ALM1_REG), - roce_read(hr_dev, ROCEE_ECC_UCERR_ALM2_REG)); - - dev_warn(dev, "ECC CERR ALARM: 0x%x, 0x%x, 0x%x\n", - roce_read(hr_dev, ROCEE_ECC_CERR_ALM0_REG), - roce_read(hr_dev, ROCEE_ECC_CERR_ALM1_REG), - roce_read(hr_dev, ROCEE_ECC_CERR_ALM2_REG)); - - return IRQ_RETVAL(int_work); -} - -static void hns_roce_v1_int_mask_enable(struct hns_roce_dev *hr_dev) -{ - u32 aemask_val; - int masken = 0; - __le32 tmp; - int i; - - /* AEQ INT */ - aemask_val = roce_read(hr_dev, ROCEE_CAEP_AE_MASK_REG); - tmp = cpu_to_le32(aemask_val); - roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S, - masken); - roce_set_bit(tmp, ROCEE_CAEP_AE_MASK_CAEP_AE_IRQ_MASK_S, masken); - aemask_val = le32_to_cpu(tmp); - roce_write(hr_dev, ROCEE_CAEP_AE_MASK_REG, aemask_val); - - /* CEQ INT */ - for (i = 0; i < hr_dev->caps.num_comp_vectors; i++) { - /* IRQ mask */ - roce_write(hr_dev, ROCEE_CAEP_CE_IRQ_MASK_0_REG + - i * CEQ_REG_OFFSET, masken); - } -} - -static void hns_roce_v1_free_eq(struct hns_roce_dev *hr_dev, - struct hns_roce_eq *eq) -{ - int npages = (PAGE_ALIGN(eq->eqe_size * eq->entries) + - HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE; - int i; - - if (!eq->buf_list) - return; - - for (i = 0; i < npages; ++i) - dma_free_coherent(&hr_dev->pdev->dev, HNS_ROCE_BA_SIZE, - eq->buf_list[i].buf, eq->buf_list[i].map); - - kfree(eq->buf_list); -} - -static void hns_roce_v1_enable_eq(struct hns_roce_dev *hr_dev, int eq_num, - int enable_flag) -{ - void __iomem *eqc = hr_dev->eq_table.eqc_base[eq_num]; - __le32 tmp; - u32 val; - - val = readl(eqc); - tmp = cpu_to_le32(val); - - if (enable_flag) - roce_set_field(tmp, - ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M, - ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S, - HNS_ROCE_EQ_STAT_VALID); - else - roce_set_field(tmp, - ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M, - ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S, - HNS_ROCE_EQ_STAT_INVALID); - - val = le32_to_cpu(tmp); - writel(val, eqc); -} - -static int hns_roce_v1_create_eq(struct hns_roce_dev *hr_dev, - struct hns_roce_eq *eq) -{ - void __iomem *eqc = hr_dev->eq_table.eqc_base[eq->eqn]; - struct device *dev = &hr_dev->pdev->dev; - dma_addr_t tmp_dma_addr; - u32 eqcuridx_val; - u32 eqconsindx_val; - u32 eqshift_val; - __le32 tmp2 = 0; - __le32 tmp1 = 0; - __le32 tmp = 0; - int num_bas; - int ret; - int i; - - num_bas = (PAGE_ALIGN(eq->entries * eq->eqe_size) + - HNS_ROCE_BA_SIZE - 1) / HNS_ROCE_BA_SIZE; - - if ((eq->entries * eq->eqe_size) > HNS_ROCE_BA_SIZE) { - dev_err(dev, "[error]eq buf %d gt ba size(%d) need bas=%d\n", - (eq->entries * eq->eqe_size), HNS_ROCE_BA_SIZE, - num_bas); - return -EINVAL; - } - - eq->buf_list = kcalloc(num_bas, sizeof(*eq->buf_list), GFP_KERNEL); - if (!eq->buf_list) - return -ENOMEM; - - for (i = 0; i < num_bas; ++i) { - eq->buf_list[i].buf = dma_alloc_coherent(dev, HNS_ROCE_BA_SIZE, - &tmp_dma_addr, - GFP_KERNEL); - if (!eq->buf_list[i].buf) { - ret = -ENOMEM; - goto err_out_free_pages; - } - - eq->buf_list[i].map = tmp_dma_addr; - } - eq->cons_index = 0; - roce_set_field(tmp, ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M, - ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S, - HNS_ROCE_EQ_STAT_INVALID); - roce_set_field(tmp, ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_M, - ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_S, - eq->log_entries); - eqshift_val = le32_to_cpu(tmp); - writel(eqshift_val, eqc); - - /* Configure eq extended address 12~44bit */ - writel((u32)(eq->buf_list[0].map >> 12), eqc + 4); - - /* - * Configure eq extended address 45~49 bit. - * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of - * using 4K page, and shift more 32 because of - * calculating the high 32 bit value evaluated to hardware. - */ - roce_set_field(tmp1, ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_M, - ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S, - eq->buf_list[0].map >> 44); - roce_set_field(tmp1, ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_M, - ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S, 0); - eqcuridx_val = le32_to_cpu(tmp1); - writel(eqcuridx_val, eqc + 8); - - /* Configure eq consumer index */ - roce_set_field(tmp2, ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_M, - ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S, 0); - eqconsindx_val = le32_to_cpu(tmp2); - writel(eqconsindx_val, eqc + 0xc); - - return 0; - -err_out_free_pages: - for (i -= 1; i >= 0; i--) - dma_free_coherent(dev, HNS_ROCE_BA_SIZE, eq->buf_list[i].buf, - eq->buf_list[i].map); - - kfree(eq->buf_list); - return ret; -} - -static int hns_roce_v1_init_eq_table(struct hns_roce_dev *hr_dev) -{ - struct hns_roce_eq_table *eq_table = &hr_dev->eq_table; - struct device *dev = &hr_dev->pdev->dev; - struct hns_roce_eq *eq; - int irq_num; - int eq_num; - int ret; - int i, j; - - eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors; - irq_num = eq_num + hr_dev->caps.num_other_vectors; - - eq_table->eq = kcalloc(eq_num, sizeof(*eq_table->eq), GFP_KERNEL); - if (!eq_table->eq) - return -ENOMEM; - - eq_table->eqc_base = kcalloc(eq_num, sizeof(*eq_table->eqc_base), - GFP_KERNEL); - if (!eq_table->eqc_base) { - ret = -ENOMEM; - goto err_eqc_base_alloc_fail; - } - - for (i = 0; i < eq_num; i++) { - eq = &eq_table->eq[i]; - eq->hr_dev = hr_dev; - eq->eqn = i; - eq->irq = hr_dev->irq[i]; - eq->log_page_size = PAGE_SHIFT; - - if (i < hr_dev->caps.num_comp_vectors) { - /* CEQ */ - eq_table->eqc_base[i] = hr_dev->reg_base + - ROCEE_CAEP_CEQC_SHIFT_0_REG + - CEQ_REG_OFFSET * i; - eq->type_flag = HNS_ROCE_CEQ; - eq->db_reg = hr_dev->reg_base + - ROCEE_CAEP_CEQC_CONS_IDX_0_REG + - CEQ_REG_OFFSET * i; - eq->entries = hr_dev->caps.ceqe_depth; - eq->log_entries = ilog2(eq->entries); - eq->eqe_size = HNS_ROCE_CEQE_SIZE; - } else { - /* AEQ */ - eq_table->eqc_base[i] = hr_dev->reg_base + - ROCEE_CAEP_AEQC_AEQE_SHIFT_REG; - eq->type_flag = HNS_ROCE_AEQ; - eq->db_reg = hr_dev->reg_base + - ROCEE_CAEP_AEQE_CONS_IDX_REG; - eq->entries = hr_dev->caps.aeqe_depth; - eq->log_entries = ilog2(eq->entries); - eq->eqe_size = HNS_ROCE_AEQE_SIZE; - } - } - - /* Disable irq */ - hns_roce_v1_int_mask_enable(hr_dev); - - /* Configure ce int interval */ - roce_write(hr_dev, ROCEE_CAEP_CE_INTERVAL_CFG_REG, - HNS_ROCE_CEQ_DEFAULT_INTERVAL); - - /* Configure ce int burst num */ - roce_write(hr_dev, ROCEE_CAEP_CE_BURST_NUM_CFG_REG, - HNS_ROCE_CEQ_DEFAULT_BURST_NUM); - - for (i = 0; i < eq_num; i++) { - ret = hns_roce_v1_create_eq(hr_dev, &eq_table->eq[i]); - if (ret) { - dev_err(dev, "eq create failed\n"); - goto err_create_eq_fail; - } - } - - for (j = 0; j < irq_num; j++) { - if (j < eq_num) - ret = request_irq(hr_dev->irq[j], - hns_roce_v1_msix_interrupt_eq, 0, - hr_dev->irq_names[j], - &eq_table->eq[j]); - else - ret = request_irq(hr_dev->irq[j], - hns_roce_v1_msix_interrupt_abn, 0, - hr_dev->irq_names[j], hr_dev); - - if (ret) { - dev_err(dev, "request irq error!\n"); - goto err_request_irq_fail; - } - } - - for (i = 0; i < eq_num; i++) - hns_roce_v1_enable_eq(hr_dev, i, EQ_ENABLE); - - return 0; - -err_request_irq_fail: - for (j -= 1; j >= 0; j--) - free_irq(hr_dev->irq[j], &eq_table->eq[j]); - -err_create_eq_fail: - for (i -= 1; i >= 0; i--) - hns_roce_v1_free_eq(hr_dev, &eq_table->eq[i]); - - kfree(eq_table->eqc_base); - -err_eqc_base_alloc_fail: - kfree(eq_table->eq); - - return ret; -} - -static void hns_roce_v1_cleanup_eq_table(struct hns_roce_dev *hr_dev) -{ - struct hns_roce_eq_table *eq_table = &hr_dev->eq_table; - int irq_num; - int eq_num; - int i; - - eq_num = hr_dev->caps.num_comp_vectors + hr_dev->caps.num_aeq_vectors; - irq_num = eq_num + hr_dev->caps.num_other_vectors; - for (i = 0; i < eq_num; i++) { - /* Disable EQ */ - hns_roce_v1_enable_eq(hr_dev, i, EQ_DISABLE); - - free_irq(hr_dev->irq[i], &eq_table->eq[i]); - - hns_roce_v1_free_eq(hr_dev, &eq_table->eq[i]); - } - for (i = eq_num; i < irq_num; i++) - free_irq(hr_dev->irq[i], hr_dev); - - kfree(eq_table->eqc_base); - kfree(eq_table->eq); -} - -static const struct ib_device_ops hns_roce_v1_dev_ops = { - .destroy_qp = hns_roce_v1_destroy_qp, - .poll_cq = hns_roce_v1_poll_cq, - .post_recv = hns_roce_v1_post_recv, - .post_send = hns_roce_v1_post_send, - .query_qp = hns_roce_v1_query_qp, - .req_notify_cq = hns_roce_v1_req_notify_cq, -}; - -static const struct hns_roce_hw hns_roce_hw_v1 = { - .reset = hns_roce_v1_reset, - .hw_profile = hns_roce_v1_profile, - .hw_init = hns_roce_v1_init, - .hw_exit = hns_roce_v1_exit, - .post_mbox = hns_roce_v1_post_mbox, - .poll_mbox_done = hns_roce_v1_chk_mbox, - .set_gid = hns_roce_v1_set_gid, - .set_mac = hns_roce_v1_set_mac, - .set_mtu = hns_roce_v1_set_mtu, - .write_mtpt = hns_roce_v1_write_mtpt, - .write_cqc = hns_roce_v1_write_cqc, - .set_hem = hns_roce_v1_set_hem, - .clear_hem = hns_roce_v1_clear_hem, - .modify_qp = hns_roce_v1_modify_qp, - .dereg_mr = hns_roce_v1_dereg_mr, - .destroy_cq = hns_roce_v1_destroy_cq, - .init_eq = hns_roce_v1_init_eq_table, - .cleanup_eq = hns_roce_v1_cleanup_eq_table, - .hns_roce_dev_ops = &hns_roce_v1_dev_ops, -}; - -static const struct of_device_id hns_roce_of_match[] = { - { .compatible = "hisilicon,hns-roce-v1", .data = &hns_roce_hw_v1, }, - {}, -}; -MODULE_DEVICE_TABLE(of, hns_roce_of_match); - -static const struct acpi_device_id hns_roce_acpi_match[] = { - { "HISI00D1", (kernel_ulong_t)&hns_roce_hw_v1 }, - {}, -}; -MODULE_DEVICE_TABLE(acpi, hns_roce_acpi_match); - -static struct -platform_device *hns_roce_find_pdev(struct fwnode_handle *fwnode) -{ - struct device *dev; - - /* get the 'device' corresponding to the matching 'fwnode' */ - dev = bus_find_device_by_fwnode(&platform_bus_type, fwnode); - /* get the platform device */ - return dev ? to_platform_device(dev) : NULL; -} - -static int hns_roce_get_cfg(struct hns_roce_dev *hr_dev) -{ - struct device *dev = &hr_dev->pdev->dev; - struct platform_device *pdev = NULL; - struct net_device *netdev = NULL; - struct device_node *net_node; - int port_cnt = 0; - u8 phy_port; - int ret; - int i; - - /* check if we are compatible with the underlying SoC */ - if (dev_of_node(dev)) { - const struct of_device_id *of_id; - - of_id = of_match_node(hns_roce_of_match, dev->of_node); - if (!of_id) { - dev_err(dev, "device is not compatible!\n"); - return -ENXIO; - } - hr_dev->hw = (const struct hns_roce_hw *)of_id->data; - if (!hr_dev->hw) { - dev_err(dev, "couldn't get H/W specific DT data!\n"); - return -ENXIO; - } - } else if (is_acpi_device_node(dev->fwnode)) { - const struct acpi_device_id *acpi_id; - - acpi_id = acpi_match_device(hns_roce_acpi_match, dev); - if (!acpi_id) { - dev_err(dev, "device is not compatible!\n"); - return -ENXIO; - } - hr_dev->hw = (const struct hns_roce_hw *) acpi_id->driver_data; - if (!hr_dev->hw) { - dev_err(dev, "couldn't get H/W specific ACPI data!\n"); - return -ENXIO; - } - } else { - dev_err(dev, "can't read compatibility data from DT or ACPI\n"); - return -ENXIO; - } - - /* get the mapped register base address */ - hr_dev->reg_base = devm_platform_ioremap_resource(hr_dev->pdev, 0); - if (IS_ERR(hr_dev->reg_base)) - return PTR_ERR(hr_dev->reg_base); - - /* read the node_guid of IB device from the DT or ACPI */ - ret = device_property_read_u8_array(dev, "node-guid", - (u8 *)&hr_dev->ib_dev.node_guid, - GUID_LEN); - if (ret) { - dev_err(dev, "couldn't get node_guid from DT or ACPI!\n"); - return ret; - } - - /* get the RoCE associated ethernet ports or netdevices */ - for (i = 0; i < HNS_ROCE_MAX_PORTS; i++) { - if (dev_of_node(dev)) { - net_node = of_parse_phandle(dev->of_node, "eth-handle", - i); - if (!net_node) - continue; - pdev = of_find_device_by_node(net_node); - } else if (is_acpi_device_node(dev->fwnode)) { - struct fwnode_reference_args args; - - ret = acpi_node_get_property_reference(dev->fwnode, - "eth-handle", - i, &args); - if (ret) - continue; - pdev = hns_roce_find_pdev(args.fwnode); - } else { - dev_err(dev, "cannot read data from DT or ACPI\n"); - return -ENXIO; - } - - if (pdev) { - netdev = platform_get_drvdata(pdev); - phy_port = (u8)i; - if (netdev) { - hr_dev->iboe.netdevs[port_cnt] = netdev; - hr_dev->iboe.phy_port[port_cnt] = phy_port; - } else { - dev_err(dev, "no netdev found with pdev %s\n", - pdev->name); - return -ENODEV; - } - port_cnt++; - } - } - - if (port_cnt == 0) { - dev_err(dev, "unable to get eth-handle for available ports!\n"); - return -EINVAL; - } - - hr_dev->caps.num_ports = port_cnt; - - /* cmd issue mode: 0 is poll, 1 is event */ - hr_dev->cmd_mod = 1; - hr_dev->loop_idc = 0; - hr_dev->sdb_offset = ROCEE_DB_SQ_L_0_REG; - hr_dev->odb_offset = ROCEE_DB_OTHERS_L_0_REG; - - /* read the interrupt names from the DT or ACPI */ - ret = device_property_read_string_array(dev, "interrupt-names", - hr_dev->irq_names, - HNS_ROCE_V1_MAX_IRQ_NUM); - if (ret < 0) { - dev_err(dev, "couldn't get interrupt names from DT or ACPI!\n"); - return ret; - } - - /* fetch the interrupt numbers */ - for (i = 0; i < HNS_ROCE_V1_MAX_IRQ_NUM; i++) { - hr_dev->irq[i] = platform_get_irq(hr_dev->pdev, i); - if (hr_dev->irq[i] <= 0) - return -EINVAL; - } - - return 0; -} - -/** - * hns_roce_probe - RoCE driver entrance - * @pdev: pointer to platform device - * Return : int - * - */ -static int hns_roce_probe(struct platform_device *pdev) -{ - int ret; - struct hns_roce_dev *hr_dev; - struct device *dev = &pdev->dev; - - hr_dev = ib_alloc_device(hns_roce_dev, ib_dev); - if (!hr_dev) - return -ENOMEM; - - hr_dev->priv = kzalloc(sizeof(struct hns_roce_v1_priv), GFP_KERNEL); - if (!hr_dev->priv) { - ret = -ENOMEM; - goto error_failed_kzalloc; - } - - hr_dev->pdev = pdev; - hr_dev->dev = dev; - platform_set_drvdata(pdev, hr_dev); - - if (dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64ULL)) && - dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32ULL))) { - dev_err(dev, "Not usable DMA addressing mode\n"); - ret = -EIO; - goto error_failed_get_cfg; - } - - ret = hns_roce_get_cfg(hr_dev); - if (ret) { - dev_err(dev, "Get Configuration failed!\n"); - goto error_failed_get_cfg; - } - - ret = hns_roce_init(hr_dev); - if (ret) { - dev_err(dev, "RoCE engine init failed!\n"); - goto error_failed_get_cfg; - } - - return 0; - -error_failed_get_cfg: - kfree(hr_dev->priv); - -error_failed_kzalloc: - ib_dealloc_device(&hr_dev->ib_dev); - - return ret; -} - -/** - * hns_roce_remove - remove RoCE device - * @pdev: pointer to platform device - */ -static int hns_roce_remove(struct platform_device *pdev) -{ - struct hns_roce_dev *hr_dev = platform_get_drvdata(pdev); - - hns_roce_exit(hr_dev); - kfree(hr_dev->priv); - ib_dealloc_device(&hr_dev->ib_dev); - - return 0; -} - -static struct platform_driver hns_roce_driver = { - .probe = hns_roce_probe, - .remove = hns_roce_remove, - .driver = { - .name = DRV_NAME, - .of_match_table = hns_roce_of_match, - .acpi_match_table = ACPI_PTR(hns_roce_acpi_match), - }, -}; - -module_platform_driver(hns_roce_driver); - -MODULE_LICENSE("Dual BSD/GPL"); -MODULE_AUTHOR("Wei Hu <xavier.huwei@huawei.com>"); -MODULE_AUTHOR("Nenglong Zhao <zhaonenglong@hisilicon.com>"); -MODULE_AUTHOR("Lijun Ou <oulijun@huawei.com>"); -MODULE_DESCRIPTION("Hisilicon Hip06 Family RoCE Driver"); diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h b/drivers/infiniband/hw/hns/hns_roce_hw_v1.h deleted file mode 100644 index 60fdcbae6729..000000000000 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v1.h +++ /dev/null @@ -1,1147 +0,0 @@ -/* - * Copyright (c) 2016 Hisilicon Limited. - * - * This software is available to you under a choice of one of two - * licenses. You may choose to be licensed under the terms of the GNU - * General Public License (GPL) Version 2, available from the file - * COPYING in the main directory of this source tree, or the - * OpenIB.org BSD license below: - * - * Redistribution and use in source and binary forms, with or - * without modification, are permitted provided that the following - * conditions are met: - * - * - Redistributions of source code must retain the above - * copyright notice, this list of conditions and the following - * disclaimer. - * - * - Redistributions in binary form must reproduce the above - * copyright notice, this list of conditions and the following - * disclaimer in the documentation and/or other materials - * provided with the distribution. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND - * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS - * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN - * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN - * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#ifndef _HNS_ROCE_HW_V1_H -#define _HNS_ROCE_HW_V1_H - -#define CQ_STATE_VALID 2 - -#define HNS_ROCE_V1_MAX_PD_NUM 0x8000 -#define HNS_ROCE_V1_MAX_CQ_NUM 0x10000 -#define HNS_ROCE_V1_MAX_CQE_NUM 0x8000 - -#define HNS_ROCE_V1_MAX_QP_NUM 0x40000 -#define HNS_ROCE_V1_MAX_WQE_NUM 0x4000 - -#define HNS_ROCE_V1_MAX_MTPT_NUM 0x80000 - -#define HNS_ROCE_V1_MAX_MTT_SEGS 0x100000 - -#define HNS_ROCE_V1_MAX_QP_INIT_RDMA 128 -#define HNS_ROCE_V1_MAX_QP_DEST_RDMA 128 - -#define HNS_ROCE_V1_MAX_SQ_DESC_SZ 64 -#define HNS_ROCE_V1_MAX_RQ_DESC_SZ 64 -#define HNS_ROCE_V1_SG_NUM 2 -#define HNS_ROCE_V1_INLINE_SIZE 32 - -#define HNS_ROCE_V1_UAR_NUM 256 -#define HNS_ROCE_V1_PHY_UAR_NUM 8 - -#define HNS_ROCE_V1_GID_NUM 16 -#define HNS_ROCE_V1_RESV_QP 8 - -#define HNS_ROCE_V1_MAX_IRQ_NUM 34 -#define HNS_ROCE_V1_COMP_VEC_NUM 32 -#define HNS_ROCE_V1_AEQE_VEC_NUM 1 -#define HNS_ROCE_V1_ABNORMAL_VEC_NUM 1 - -#define HNS_ROCE_V1_COMP_EQE_NUM 0x8000 -#define HNS_ROCE_V1_ASYNC_EQE_NUM 0x400 - -#define HNS_ROCE_V1_QPC_SIZE 256 -#define HNS_ROCE_V1_IRRL_ENTRY_SIZE 8 -#define HNS_ROCE_V1_CQC_ENTRY_SIZE 64 -#define HNS_ROCE_V1_MTPT_ENTRY_SIZE 64 -#define HNS_ROCE_V1_MTT_ENTRY_SIZE 64 - -#define HNS_ROCE_V1_CQE_SIZE 32 -#define HNS_ROCE_V1_PAGE_SIZE_SUPPORT 0xFFFFF000 - -#define HNS_ROCE_V1_TABLE_CHUNK_SIZE (1 << 17) - -#define HNS_ROCE_V1_EXT_RAQ_WF 8 -#define HNS_ROCE_V1_RAQ_ENTRY 64 -#define HNS_ROCE_V1_RAQ_DEPTH 32768 -#define HNS_ROCE_V1_RAQ_SIZE (HNS_ROCE_V1_RAQ_ENTRY * HNS_ROCE_V1_RAQ_DEPTH) - -#define HNS_ROCE_V1_SDB_DEPTH 0x400 -#define HNS_ROCE_V1_ODB_DEPTH 0x400 - -#define HNS_ROCE_V1_DB_RSVD 0x80 - -#define HNS_ROCE_V1_SDB_ALEPT HNS_ROCE_V1_DB_RSVD -#define HNS_ROCE_V1_SDB_ALFUL (HNS_ROCE_V1_SDB_DEPTH - HNS_ROCE_V1_DB_RSVD) -#define HNS_ROCE_V1_ODB_ALEPT HNS_ROCE_V1_DB_RSVD -#define HNS_ROCE_V1_ODB_ALFUL (HNS_ROCE_V1_ODB_DEPTH - HNS_ROCE_V1_DB_RSVD) - -#define HNS_ROCE_V1_EXT_SDB_DEPTH 0x4000 -#define HNS_ROCE_V1_EXT_ODB_DEPTH 0x4000 -#define HNS_ROCE_V1_EXT_SDB_ENTRY 16 -#define HNS_ROCE_V1_EXT_ODB_ENTRY 16 -#define HNS_ROCE_V1_EXT_SDB_SIZE \ - (HNS_ROCE_V1_EXT_SDB_DEPTH * HNS_ROCE_V1_EXT_SDB_ENTRY) -#define HNS_ROCE_V1_EXT_ODB_SIZE \ - (HNS_ROCE_V1_EXT_ODB_DEPTH * HNS_ROCE_V1_EXT_ODB_ENTRY) - -#define HNS_ROCE_V1_EXT_SDB_ALEPT HNS_ROCE_V1_DB_RSVD -#define HNS_ROCE_V1_EXT_SDB_ALFUL \ - (HNS_ROCE_V1_EXT_SDB_DEPTH - HNS_ROCE_V1_DB_RSVD) -#define HNS_ROCE_V1_EXT_ODB_ALEPT HNS_ROCE_V1_DB_RSVD -#define HNS_ROCE_V1_EXT_ODB_ALFUL \ - (HNS_ROCE_V1_EXT_ODB_DEPTH - HNS_ROCE_V1_DB_RSVD) - -#define HNS_ROCE_V1_FREE_MR_TIMEOUT_MSECS 50000 -#define HNS_ROCE_V1_RECREATE_LP_QP_TIMEOUT_MSECS 10000 -#define HNS_ROCE_V1_FREE_MR_WAIT_VALUE 5 -#define HNS_ROCE_V1_RECREATE_LP_QP_WAIT_VALUE 20 - -#define HNS_ROCE_BT_RSV_BUF_SIZE (1 << 17) - -#define HNS_ROCE_V1_TPTR_ENTRY_SIZE 2 -#define HNS_ROCE_V1_TPTR_BUF_SIZE \ - (HNS_ROCE_V1_TPTR_ENTRY_SIZE * HNS_ROCE_V1_MAX_CQ_NUM) - -#define HNS_ROCE_ODB_POLL_MODE 0 - -#define HNS_ROCE_SDB_NORMAL_MODE 0 -#define HNS_ROCE_SDB_EXTEND_MODE 1 - -#define HNS_ROCE_ODB_EXTEND_MODE 1 - -#define KEY_VALID 0x02 - -#define HNS_ROCE_CQE_QPN_MASK 0x3ffff -#define HNS_ROCE_CQE_STATUS_MASK 0x1f -#define HNS_ROCE_CQE_OPCODE_MASK 0xf - -#define HNS_ROCE_CQE_SUCCESS 0x00 -#define HNS_ROCE_CQE_SYNDROME_LOCAL_LENGTH_ERR 0x01 -#define HNS_ROCE_CQE_SYNDROME_LOCAL_QP_OP_ERR 0x02 -#define HNS_ROCE_CQE_SYNDROME_LOCAL_PROT_ERR 0x03 -#define HNS_ROCE_CQE_SYNDROME_WR_FLUSH_ERR 0x04 -#define HNS_ROCE_CQE_SYNDROME_MEM_MANAGE_OPERATE_ERR 0x05 -#define HNS_ROCE_CQE_SYNDROME_BAD_RESP_ERR 0x06 -#define HNS_ROCE_CQE_SYNDROME_LOCAL_ACCESS_ERR 0x07 -#define HNS_ROCE_CQE_SYNDROME_REMOTE_INVAL_REQ_ERR 0x08 -#define HNS_ROCE_CQE_SYNDROME_REMOTE_ACCESS_ERR 0x09 -#define HNS_ROCE_CQE_SYNDROME_REMOTE_OP_ERR 0x0a -#define HNS_ROCE_CQE_SYNDROME_TRANSPORT_RETRY_EXC_ERR 0x0b -#define HNS_ROCE_CQE_SYNDROME_RNR_RETRY_EXC_ERR 0x0c - -#define QP1C_CFGN_OFFSET 0x28 -#define PHY_PORT_OFFSET 0x8 -#define MTPT_IDX_SHIFT 16 -#define ALL_PORT_VAL_OPEN 0x3f -#define POL_TIME_INTERVAL_VAL 0x80 -#define SLEEP_TIME_INTERVAL 20 -#define SQ_PSN_SHIFT 8 -#define QKEY_VAL 0x80010000 -#define SDB_INV_CNT_OFFSET 8 - -#define HNS_ROCE_CEQ_DEFAULT_INTERVAL 0x10 -#define HNS_ROCE_CEQ_DEFAULT_BURST_NUM 0x10 - -#define HNS_ROCE_INT_MASK_DISABLE 0 -#define HNS_ROCE_INT_MASK_ENABLE 1 - -#define CEQ_REG_OFFSET 0x18 - -#define HNS_ROCE_CEQE_CEQE_COMP_OWNER_S 0 - -#define HNS_ROCE_V1_CONS_IDX_M GENMASK(15, 0) - -#define HNS_ROCE_CEQE_CEQE_COMP_CQN_S 16 -#define HNS_ROCE_CEQE_CEQE_COMP_CQN_M GENMASK(31, 16) - -#define HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S 16 -#define HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M GENMASK(23, 16) - -#define HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S 24 -#define HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M GENMASK(30, 24) - -#define HNS_ROCE_AEQE_U32_4_OWNER_S 31 - -#define HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S 0 -#define HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M GENMASK(23, 0) - -#define HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S 25 -#define HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M GENMASK(27, 25) - -#define HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S 0 -#define HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M GENMASK(15, 0) - -#define HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S 0 -#define HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_M GENMASK(4, 0) - -/* Local Work Queue Catastrophic Error,SUBTYPE 0x5 */ -enum { - HNS_ROCE_LWQCE_QPC_ERROR = 1, - HNS_ROCE_LWQCE_MTU_ERROR, - HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR, - HNS_ROCE_LWQCE_WQE_ADDR_ERROR, - HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR, - HNS_ROCE_LWQCE_SL_ERROR, - HNS_ROCE_LWQCE_PORT_ERROR, -}; - -/* Local Access Violation Work Queue Error,SUBTYPE 0x7 */ -enum { - HNS_ROCE_LAVWQE_R_KEY_VIOLATION = 1, - HNS_ROCE_LAVWQE_LENGTH_ERROR, - HNS_ROCE_LAVWQE_VA_ERROR, - HNS_ROCE_LAVWQE_PD_ERROR, - HNS_ROCE_LAVWQE_RW_ACC_ERROR, - HNS_ROCE_LAVWQE_KEY_STATE_ERROR, - HNS_ROCE_LAVWQE_MR_OPERATION_ERROR, -}; - -/* DOORBELL overflow subtype */ -enum { - HNS_ROCE_DB_SUBTYPE_SDB_OVF = 1, - HNS_ROCE_DB_SUBTYPE_SDB_ALM_OVF, - HNS_ROCE_DB_SUBTYPE_ODB_OVF, - HNS_ROCE_DB_SUBTYPE_ODB_ALM_OVF, - HNS_ROCE_DB_SUBTYPE_SDB_ALM_EMP, - HNS_ROCE_DB_SUBTYPE_ODB_ALM_EMP, -}; - -enum { - /* RQ&SRQ related operations */ - HNS_ROCE_OPCODE_SEND_DATA_RECEIVE = 0x06, - HNS_ROCE_OPCODE_RDMA_WITH_IMM_RECEIVE, -}; - -enum { - HNS_ROCE_PORT_DOWN = 0, - HNS_ROCE_PORT_UP, -}; - -struct hns_roce_cq_context { - __le32 cqc_byte_4; - __le32 cq_bt_l; - __le32 cqc_byte_12; - __le32 cur_cqe_ba0_l; - __le32 cqc_byte_20; - __le32 cqe_tptr_addr_l; - __le32 cur_cqe_ba1_l; - __le32 cqc_byte_32; -}; - -#define CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_S 0 -#define CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_M \ - (((1UL << 2) - 1) << CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_S) - -#define CQ_CONTEXT_CQC_BYTE_4_CQN_S 16 -#define CQ_CONTEXT_CQC_BYTE_4_CQN_M \ - (((1UL << 16) - 1) << CQ_CONTEXT_CQC_BYTE_4_CQN_S) - -#define CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_S 0 -#define CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_M \ - (((1UL << 17) - 1) << CQ_CONTEXT_CQC_BYTE_12_CQ_BT_H_S) - -#define CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_S 20 -#define CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_M \ - (((1UL << 4) - 1) << CQ_CONTEXT_CQC_BYTE_12_CQ_CQE_SHIFT_S) - -#define CQ_CONTEXT_CQC_BYTE_12_CEQN_S 24 -#define CQ_CONTEXT_CQC_BYTE_12_CEQN_M \ - (((1UL << 5) - 1) << CQ_CONTEXT_CQC_BYTE_12_CEQN_S) - -#define CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_S 0 -#define CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_M \ - (((1UL << 5) - 1) << CQ_CONTEXT_CQC_BYTE_20_CUR_CQE_BA0_H_S) - -#define CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_S 16 -#define CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_M \ - (((1UL << 16) - 1) << CQ_CONTEXT_CQC_BYTE_20_CQ_CUR_INDEX_S) - -#define CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_S 8 -#define CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_M \ - (((1UL << 5) - 1) << CQ_CONTEXT_CQC_BYTE_20_CQE_TPTR_ADDR_H_S) - -#define CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_S 0 -#define CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_M \ - (((1UL << 5) - 1) << CQ_CONTEXT_CQC_BYTE_32_CUR_CQE_BA1_H_S) - -#define CQ_CONTEXT_CQC_BYTE_32_SE_FLAG_S 9 - -#define CQ_CONTEXT_CQC_BYTE_32_CE_FLAG_S 8 -#define CQ_CONTEXT_CQC_BYTE_32_NOTIFICATION_FLAG_S 14 -#define CQ_CQNTEXT_CQC_BYTE_32_TYPE_OF_COMPLETION_NOTIFICATION_S 15 - -#define CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_S 16 -#define CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_M \ - (((1UL << 16) - 1) << CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_S) - -struct hns_roce_cqe { - __le32 cqe_byte_4; - union { - __le32 r_key; - __le32 immediate_data; - }; - __le32 byte_cnt; - __le32 cqe_byte_16; - __le32 cqe_byte_20; - __le32 s_mac_l; - __le32 cqe_byte_28; - __le32 reserved; -}; - -#define CQE_BYTE_4_OWNER_S 7 -#define CQE_BYTE_4_SQ_RQ_FLAG_S 14 - -#define CQE_BYTE_4_STATUS_OF_THE_OPERATION_S 8 -#define CQE_BYTE_4_STATUS_OF_THE_OPERATION_M \ - (((1UL << 5) - 1) << CQE_BYTE_4_STATUS_OF_THE_OPERATION_S) - -#define CQE_BYTE_4_WQE_INDEX_S 16 -#define CQE_BYTE_4_WQE_INDEX_M (((1UL << 14) - 1) << CQE_BYTE_4_WQE_INDEX_S) - -#define CQE_BYTE_4_OPERATION_TYPE_S 0 -#define CQE_BYTE_4_OPERATION_TYPE_M \ - (((1UL << 4) - 1) << CQE_BYTE_4_OPERATION_TYPE_S) - -#define CQE_BYTE_4_IMM_INDICATOR_S 15 - -#define CQE_BYTE_16_LOCAL_QPN_S 0 -#define CQE_BYTE_16_LOCAL_QPN_M (((1UL << 24) - 1) << CQE_BYTE_16_LOCAL_QPN_S) - -#define CQE_BYTE_20_PORT_NUM_S 26 -#define CQE_BYTE_20_PORT_NUM_M (((1UL << 3) - 1) << CQE_BYTE_20_PORT_NUM_S) - -#define CQE_BYTE_20_SL_S 24 -#define CQE_BYTE_20_SL_M (((1UL << 2) - 1) << CQE_BYTE_20_SL_S) - -#define CQE_BYTE_20_REMOTE_QPN_S 0 -#define CQE_BYTE_20_REMOTE_QPN_M \ - (((1UL << 24) - 1) << CQE_BYTE_20_REMOTE_QPN_S) - -#define CQE_BYTE_20_GRH_PRESENT_S 29 - -#define CQE_BYTE_28_P_KEY_IDX_S 16 -#define CQE_BYTE_28_P_KEY_IDX_M (((1UL << 16) - 1) << CQE_BYTE_28_P_KEY_IDX_S) - -#define CQ_DB_REQ_NOT_SOL 0 -#define CQ_DB_REQ_NOT (1 << 16) - -struct hns_roce_v1_mpt_entry { - __le32 mpt_byte_4; - __le32 pbl_addr_l; - __le32 mpt_byte_12; - __le32 virt_addr_l; - __le32 virt_addr_h; - __le32 length; - __le32 mpt_byte_28; - __le32 pa0_l; - __le32 mpt_byte_36; - __le32 mpt_byte_40; - __le32 mpt_byte_44; - __le32 mpt_byte_48; - __le32 pa4_l; - __le32 mpt_byte_56; - __le32 mpt_byte_60; - __le32 mpt_byte_64; -}; - -#define MPT_BYTE_4_KEY_STATE_S 0 -#define MPT_BYTE_4_KEY_STATE_M (((1UL << 2) - 1) << MPT_BYTE_4_KEY_STATE_S) - -#define MPT_BYTE_4_KEY_S 8 -#define MPT_BYTE_4_KEY_M (((1UL << 8) - 1) << MPT_BYTE_4_KEY_S) - -#define MPT_BYTE_4_PAGE_SIZE_S 16 -#define MPT_BYTE_4_PAGE_SIZE_M (((1UL << 2) - 1) << MPT_BYTE_4_PAGE_SIZE_S) - -#define MPT_BYTE_4_MW_TYPE_S 20 - -#define MPT_BYTE_4_MW_BIND_ENABLE_S 21 - -#define MPT_BYTE_4_OWN_S 22 - -#define MPT_BYTE_4_MEMORY_LOCATION_TYPE_S 24 -#define MPT_BYTE_4_MEMORY_LOCATION_TYPE_M \ - (((1UL << 2) - 1) << MPT_BYTE_4_MEMORY_LOCATION_TYPE_S) - -#define MPT_BYTE_4_REMOTE_ATOMIC_S 26 -#define MPT_BYTE_4_LOCAL_WRITE_S 27 -#define MPT_BYTE_4_REMOTE_WRITE_S 28 -#define MPT_BYTE_4_REMOTE_READ_S 29 -#define MPT_BYTE_4_REMOTE_INVAL_ENABLE_S 30 -#define MPT_BYTE_4_ADDRESS_TYPE_S 31 - -#define MPT_BYTE_12_PBL_ADDR_H_S 0 -#define MPT_BYTE_12_PBL_ADDR_H_M \ - (((1UL << 17) - 1) << MPT_BYTE_12_PBL_ADDR_H_S) - -#define MPT_BYTE_12_MW_BIND_COUNTER_S 17 -#define MPT_BYTE_12_MW_BIND_COUNTER_M \ - (((1UL << 15) - 1) << MPT_BYTE_12_MW_BIND_COUNTER_S) - -#define MPT_BYTE_28_PD_S 0 -#define MPT_BYTE_28_PD_M (((1UL << 16) - 1) << MPT_BYTE_28_PD_S) - -#define MPT_BYTE_28_L_KEY_IDX_L_S 16 -#define MPT_BYTE_28_L_KEY_IDX_L_M \ - (((1UL << 16) - 1) << MPT_BYTE_28_L_KEY_IDX_L_S) - -#define MPT_BYTE_36_PA0_H_S 0 -#define MPT_BYTE_36_PA0_H_M (((1UL << 5) - 1) << MPT_BYTE_36_PA0_H_S) - -#define MPT_BYTE_36_PA1_L_S 8 -#define MPT_BYTE_36_PA1_L_M (((1UL << 24) - 1) << MPT_BYTE_36_PA1_L_S) - -#define MPT_BYTE_40_PA1_H_S 0 -#define MPT_BYTE_40_PA1_H_M (((1UL << 13) - 1) << MPT_BYTE_40_PA1_H_S) - -#define MPT_BYTE_40_PA2_L_S 16 -#define MPT_BYTE_40_PA2_L_M (((1UL << 16) - 1) << MPT_BYTE_40_PA2_L_S) - -#define MPT_BYTE_44_PA2_H_S 0 -#define MPT_BYTE_44_PA2_H_M (((1UL << 21) - 1) << MPT_BYTE_44_PA2_H_S) - -#define MPT_BYTE_44_PA3_L_S 24 -#define MPT_BYTE_44_PA3_L_M (((1UL << 8) - 1) << MPT_BYTE_44_PA3_L_S) - -#define MPT_BYTE_48_PA3_H_S 0 -#define MPT_BYTE_48_PA3_H_M (((1UL << 29) - 1) << MPT_BYTE_48_PA3_H_S) - -#define MPT_BYTE_56_PA4_H_S 0 -#define MPT_BYTE_56_PA4_H_M (((1UL << 5) - 1) << MPT_BYTE_56_PA4_H_S) - -#define MPT_BYTE_56_PA5_L_S 8 -#define MPT_BYTE_56_PA5_L_M (((1UL << 24) - 1) << MPT_BYTE_56_PA5_L_S) - -#define MPT_BYTE_60_PA5_H_S 0 -#define MPT_BYTE_60_PA5_H_M (((1UL << 13) - 1) << MPT_BYTE_60_PA5_H_S) - -#define MPT_BYTE_60_PA6_L_S 16 -#define MPT_BYTE_60_PA6_L_M (((1UL << 16) - 1) << MPT_BYTE_60_PA6_L_S) - -#define MPT_BYTE_64_PA6_H_S 0 -#define MPT_BYTE_64_PA6_H_M (((1UL << 21) - 1) << MPT_BYTE_64_PA6_H_S) - -#define MPT_BYTE_64_L_KEY_IDX_H_S 24 -#define MPT_BYTE_64_L_KEY_IDX_H_M \ - (((1UL << 8) - 1) << MPT_BYTE_64_L_KEY_IDX_H_S) - -struct hns_roce_wqe_ctrl_seg { - __le32 sgl_pa_h; - __le32 flag; - union { - __be32 imm_data; - __le32 inv_key; - }; - __le32 msg_length; -}; - -struct hns_roce_wqe_data_seg { - __le64 addr; - __le32 lkey; - __le32 len; -}; - -struct hns_roce_wqe_raddr_seg { - __le32 rkey; - __le32 len; /* reserved */ - __le64 raddr; -}; - -struct hns_roce_rq_wqe_ctrl { - __le32 rwqe_byte_4; - __le32 rocee_sgl_ba_l; - __le32 rwqe_byte_12; - __le32 reserved[5]; -}; - -#define RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_S 16 -#define RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_M \ - (((1UL << 6) - 1) << RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_S) - -#define HNS_ROCE_QP_DESTROY_TIMEOUT_MSECS 10000 - -#define GID_LEN 16 - -struct hns_roce_ud_send_wqe { - __le32 dmac_h; - __le32 u32_8; - __le32 immediate_data; - - __le32 u32_16; - union { - unsigned char dgid[GID_LEN]; - struct { - __le32 u32_20; - __le32 u32_24; - __le32 u32_28; - __le32 u32_32; - }; - }; - - __le32 u32_36; - __le32 u32_40; - - __le32 va0_l; - __le32 va0_h; - __le32 l_key0; - - __le32 va1_l; - __le32 va1_h; - __le32 l_key1; -}; - -#define UD_SEND_WQE_U32_4_DMAC_0_S 0 -#define UD_SEND_WQE_U32_4_DMAC_0_M \ - (((1UL << 8) - 1) << UD_SEND_WQE_U32_4_DMAC_0_S) - -#define UD_SEND_WQE_U32_4_DMAC_1_S 8 -#define UD_SEND_WQE_U32_4_DMAC_1_M \ - (((1UL << 8) - 1) << UD_SEND_WQE_U32_4_DMAC_1_S) - -#define UD_SEND_WQE_U32_4_DMAC_2_S 16 -#define UD_SEND_WQE_U32_4_DMAC_2_M \ - (((1UL << 8) - 1) << UD_SEND_WQE_U32_4_DMAC_2_S) - -#define UD_SEND_WQE_U32_4_DMAC_3_S 24 -#define UD_SEND_WQE_U32_4_DMAC_3_M \ - (((1UL << 8) - 1) << UD_SEND_WQE_U32_4_DMAC_3_S) - -#define UD_SEND_WQE_U32_8_DMAC_4_S 0 -#define UD_SEND_WQE_U32_8_DMAC_4_M \ - (((1UL << 8) - 1) << UD_SEND_WQE_U32_8_DMAC_4_S) - -#define UD_SEND_WQE_U32_8_DMAC_5_S 8 -#define UD_SEND_WQE_U32_8_DMAC_5_M \ - (((1UL << 8) - 1) << UD_SEND_WQE_U32_8_DMAC_5_S) - -#define UD_SEND_WQE_U32_8_LOOPBACK_INDICATOR_S 22 - -#define UD_SEND_WQE_U32_8_OPERATION_TYPE_S 16 -#define UD_SEND_WQE_U32_8_OPERATION_TYPE_M \ - (((1UL << 4) - 1) << UD_SEND_WQE_U32_8_OPERATION_TYPE_S) - -#define UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_S 24 -#define UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_M \ - (((1UL << 6) - 1) << UD_SEND_WQE_U32_8_NUMBER_OF_DATA_SEG_S) - -#define UD_SEND_WQE_U32_8_SEND_GL_ROUTING_HDR_FLAG_S 31 - -#define UD_SEND_WQE_U32_16_DEST_QP_S 0 -#define UD_SEND_WQE_U32_16_DEST_QP_M \ - (((1UL << 24) - 1) << UD_SEND_WQE_U32_16_DEST_QP_S) - -#define UD_SEND_WQE_U32_16_MAX_STATIC_RATE_S 24 -#define UD_SEND_WQE_U32_16_MAX_STATIC_RATE_M \ - (((1UL << 8) - 1) << UD_SEND_WQE_U32_16_MAX_STATIC_RATE_S) - -#define UD_SEND_WQE_U32_36_FLOW_LABEL_S 0 -#define UD_SEND_WQE_U32_36_FLOW_LABEL_M \ - (((1UL << 20) - 1) << UD_SEND_WQE_U32_36_FLOW_LABEL_S) - -#define UD_SEND_WQE_U32_36_PRIORITY_S 20 -#define UD_SEND_WQE_U32_36_PRIORITY_M \ - (((1UL << 4) - 1) << UD_SEND_WQE_U32_36_PRIORITY_S) - -#define UD_SEND_WQE_U32_36_SGID_INDEX_S 24 -#define UD_SEND_WQE_U32_36_SGID_INDEX_M \ - (((1UL << 8) - 1) << UD_SEND_WQE_U32_36_SGID_INDEX_S) - -#define UD_SEND_WQE_U32_40_HOP_LIMIT_S 0 -#define UD_SEND_WQE_U32_40_HOP_LIMIT_M \ - (((1UL << 8) - 1) << UD_SEND_WQE_U32_40_HOP_LIMIT_S) - -#define UD_SEND_WQE_U32_40_TRAFFIC_CLASS_S 8 -#define UD_SEND_WQE_U32_40_TRAFFIC_CLASS_M \ - (((1UL << 8) - 1) << UD_SEND_WQE_U32_40_TRAFFIC_CLASS_S) - -struct hns_roce_sqp_context { - __le32 qp1c_bytes_4; - __le32 sq_rq_bt_l; - __le32 qp1c_bytes_12; - __le32 qp1c_bytes_16; - __le32 qp1c_bytes_20; - __le32 cur_rq_wqe_ba_l; - __le32 qp1c_bytes_28; - __le32 qp1c_bytes_32; - __le32 cur_sq_wqe_ba_l; - __le32 qp1c_bytes_40; -}; - -#define QP1C_BYTES_4_QP_STATE_S 0 -#define QP1C_BYTES_4_QP_STATE_M \ - (((1UL << 3) - 1) << QP1C_BYTES_4_QP_STATE_S) - -#define QP1C_BYTES_4_SQ_WQE_SHIFT_S 8 -#define QP1C_BYTES_4_SQ_WQE_SHIFT_M \ - (((1UL << 4) - 1) << QP1C_BYTES_4_SQ_WQE_SHIFT_S) - -#define QP1C_BYTES_4_RQ_WQE_SHIFT_S 12 -#define QP1C_BYTES_4_RQ_WQE_SHIFT_M \ - (((1UL << 4) - 1) << QP1C_BYTES_4_RQ_WQE_SHIFT_S) - -#define QP1C_BYTES_4_PD_S 16 -#define QP1C_BYTES_4_PD_M (((1UL << 16) - 1) << QP1C_BYTES_4_PD_S) - -#define QP1C_BYTES_12_SQ_RQ_BT_H_S 0 -#define QP1C_BYTES_12_SQ_RQ_BT_H_M \ - (((1UL << 17) - 1) << QP1C_BYTES_12_SQ_RQ_BT_H_S) - -#define QP1C_BYTES_16_RQ_HEAD_S 0 -#define QP1C_BYTES_16_RQ_HEAD_M (((1UL << 15) - 1) << QP1C_BYTES_16_RQ_HEAD_S) - -#define QP1C_BYTES_16_PORT_NUM_S 16 -#define QP1C_BYTES_16_PORT_NUM_M \ - (((1UL << 3) - 1) << QP1C_BYTES_16_PORT_NUM_S) - -#define QP1C_BYTES_16_SIGNALING_TYPE_S 27 -#define QP1C_BYTES_16_LOCAL_ENABLE_E2E_CREDIT_S 28 -#define QP1C_BYTES_16_RQ_BA_FLG_S 29 -#define QP1C_BYTES_16_SQ_BA_FLG_S 30 -#define QP1C_BYTES_16_QP1_ERR_S 31 - -#define QP1C_BYTES_20_SQ_HEAD_S 0 -#define QP1C_BYTES_20_SQ_HEAD_M (((1UL << 15) - 1) << QP1C_BYTES_20_SQ_HEAD_S) - -#define QP1C_BYTES_20_PKEY_IDX_S 16 -#define QP1C_BYTES_20_PKEY_IDX_M \ - (((1UL << 16) - 1) << QP1C_BYTES_20_PKEY_IDX_S) - -#define QP1C_BYTES_28_CUR_RQ_WQE_BA_H_S 0 -#define QP1C_BYTES_28_CUR_RQ_WQE_BA_H_M \ - (((1UL << 5) - 1) << QP1C_BYTES_28_CUR_RQ_WQE_BA_H_S) - -#define QP1C_BYTES_28_RQ_CUR_IDX_S 16 -#define QP1C_BYTES_28_RQ_CUR_IDX_M \ - (((1UL << 15) - 1) << QP1C_BYTES_28_RQ_CUR_IDX_S) - -#define QP1C_BYTES_32_TX_CQ_NUM_S 0 -#define QP1C_BYTES_32_TX_CQ_NUM_M \ - (((1UL << 16) - 1) << QP1C_BYTES_32_TX_CQ_NUM_S) - -#define QP1C_BYTES_32_RX_CQ_NUM_S 16 -#define QP1C_BYTES_32_RX_CQ_NUM_M \ - (((1UL << 16) - 1) << QP1C_BYTES_32_RX_CQ_NUM_S) - -#define QP1C_BYTES_40_CUR_SQ_WQE_BA_H_S 0 -#define QP1C_BYTES_40_CUR_SQ_WQE_BA_H_M \ - (((1UL << 5) - 1) << QP1C_BYTES_40_CUR_SQ_WQE_BA_H_S) - -#define QP1C_BYTES_40_SQ_CUR_IDX_S 16 -#define QP1C_BYTES_40_SQ_CUR_IDX_M \ - (((1UL << 15) - 1) << QP1C_BYTES_40_SQ_CUR_IDX_S) - -#define HNS_ROCE_WQE_INLINE (1UL<<31) -#define HNS_ROCE_WQE_SE (1UL<<30) - -#define HNS_ROCE_WQE_SGE_NUM_BIT 24 -#define HNS_ROCE_WQE_IMM (1UL<<23) -#define HNS_ROCE_WQE_FENCE (1UL<<21) -#define HNS_ROCE_WQE_CQ_NOTIFY (1UL<<20) - -#define HNS_ROCE_WQE_OPCODE_SEND (0<<16) -#define HNS_ROCE_WQE_OPCODE_RDMA_READ (1<<16) -#define HNS_ROCE_WQE_OPCODE_RDMA_WRITE (2<<16) -#define HNS_ROCE_WQE_OPCODE_LOCAL_INV (4<<16) -#define HNS_ROCE_WQE_OPCODE_UD_SEND (7<<16) -#define HNS_ROCE_WQE_OPCODE_MASK (15<<16) - -struct hns_roce_qp_context { - __le32 qpc_bytes_4; - __le32 qpc_bytes_8; - __le32 qpc_bytes_12; - __le32 qpc_bytes_16; - __le32 sq_rq_bt_l; - __le32 qpc_bytes_24; - __le32 irrl_ba_l; - __le32 qpc_bytes_32; - __le32 qpc_bytes_36; - __le32 dmac_l; - __le32 qpc_bytes_44; - __le32 qpc_bytes_48; - u8 dgid[16]; - __le32 qpc_bytes_68; - __le32 cur_rq_wqe_ba_l; - __le32 qpc_bytes_76; - __le32 rx_rnr_time; - __le32 qpc_bytes_84; - __le32 qpc_bytes_88; - union { - __le32 rx_sge_len; - __le32 dma_length; - }; - union { - __le32 rx_sge_num; - __le32 rx_send_pktn; - __le32 r_key; - }; - __le32 va_l; - __le32 va_h; - __le32 qpc_bytes_108; - __le32 qpc_bytes_112; - __le32 rx_cur_sq_wqe_ba_l; - __le32 qpc_bytes_120; - __le32 qpc_bytes_124; - __le32 qpc_bytes_128; - __le32 qpc_bytes_132; - __le32 qpc_bytes_136; - __le32 qpc_bytes_140; - __le32 qpc_bytes_144; - __le32 qpc_bytes_148; - union { - __le32 rnr_retry; - __le32 ack_time; - }; - __le32 qpc_bytes_156; - __le32 pkt_use_len; - __le32 qpc_bytes_164; - __le32 qpc_bytes_168; - union { - __le32 sge_use_len; - __le32 pa_use_len; - }; - __le32 qpc_bytes_176; - __le32 qpc_bytes_180; - __le32 tx_cur_sq_wqe_ba_l; - __le32 qpc_bytes_188; - __le32 rvd21; -}; - -#define QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S 0 -#define QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_M \ - (((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S) - -#define QP_CONTEXT_QPC_BYTE_4_ENABLE_FPMR_S 3 -#define QP_CONTEXT_QPC_BYTE_4_RDMA_READ_ENABLE_S 4 -#define QP_CONTEXT_QPC_BYTE_4_RDMA_WRITE_ENABLE_S 5 -#define QP_CONTEXT_QPC_BYTE_4_ATOMIC_OPERATION_ENABLE_S 6 -#define QP_CONTEXT_QPC_BYTE_4_RDMAR_USE_S 7 - -#define QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S 8 -#define QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_M \ - (((1UL << 4) - 1) << QP_CONTEXT_QPC_BYTES_4_SQ_WQE_SHIFT_S) - -#define QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S 12 -#define QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_M \ - (((1UL << 4) - 1) << QP_CONTEXT_QPC_BYTES_4_RQ_WQE_SHIFT_S) - -#define QP_CONTEXT_QPC_BYTES_4_PD_S 16 -#define QP_CONTEXT_QPC_BYTES_4_PD_M \ - (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_4_PD_S) - -#define QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S 0 -#define QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_M \ - (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_8_TX_COMPLETION_S) - -#define QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S 16 -#define QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_M \ - (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_8_RX_COMPLETION_S) - -#define QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S 0 -#define QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_M \ - (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_12_SRQ_NUMBER_S) - -#define QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S 16 -#define QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_M \ - (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_12_P_KEY_INDEX_S) - -#define QP_CONTEXT_QPC_BYTES_16_QP_NUM_S 0 -#define QP_CONTEXT_QPC_BYTES_16_QP_NUM_M \ - (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_16_QP_NUM_S) - -#define QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_S 0 -#define QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_M \ - (((1UL << 17) - 1) << QP_CONTEXT_QPC_BYTES_24_SQ_RQ_BT_H_S) - -#define QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S 18 -#define QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_M \ - (((1UL << 5) - 1) << QP_CONTEXT_QPC_BYTES_24_MINIMUM_RNR_NAK_TIMER_S) - -#define QP_CONTEXT_QPC_BYTE_24_REMOTE_ENABLE_E2E_CREDITS_S 23 - -#define QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_S 0 -#define QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_M \ - (((1UL << 17) - 1) << QP_CONTEXT_QPC_BYTES_32_IRRL_BA_H_S) - -#define QP_CONTEXT_QPC_BYTES_32_MIG_STATE_S 18 -#define QP_CONTEXT_QPC_BYTES_32_MIG_STATE_M \ - (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_32_MIG_STATE_S) - -#define QP_CONTEXT_QPC_BYTE_32_LOCAL_ENABLE_E2E_CREDITS_S 20 -#define QP_CONTEXT_QPC_BYTE_32_SIGNALING_TYPE_S 21 -#define QP_CONTEXT_QPC_BYTE_32_LOOPBACK_INDICATOR_S 22 -#define QP_CONTEXT_QPC_BYTE_32_GLOBAL_HEADER_S 23 - -#define QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S 24 -#define QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_M \ - (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_32_RESPONDER_RESOURCES_S) - -#define QP_CONTEXT_QPC_BYTES_36_DEST_QP_S 0 -#define QP_CONTEXT_QPC_BYTES_36_DEST_QP_M \ - (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_36_DEST_QP_S) - -#define QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S 24 -#define QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_M \ - (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_36_SGID_INDEX_S) - -#define QP_CONTEXT_QPC_BYTES_44_DMAC_H_S 0 -#define QP_CONTEXT_QPC_BYTES_44_DMAC_H_M \ - (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_44_DMAC_H_S) - -#define QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_S 16 -#define QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_M \ - (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_44_MAXIMUM_STATIC_RATE_S) - -#define QP_CONTEXT_QPC_BYTES_44_HOPLMT_S 24 -#define QP_CONTEXT_QPC_BYTES_44_HOPLMT_M \ - (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_44_HOPLMT_S) - -#define QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S 0 -#define QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_M \ - (((1UL << 20) - 1) << QP_CONTEXT_QPC_BYTES_48_FLOWLABEL_S) - -#define QP_CONTEXT_QPC_BYTES_48_TCLASS_S 20 -#define QP_CONTEXT_QPC_BYTES_48_TCLASS_M \ - (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_48_TCLASS_S) - -#define QP_CONTEXT_QPC_BYTES_48_MTU_S 28 -#define QP_CONTEXT_QPC_BYTES_48_MTU_M \ - (((1UL << 4) - 1) << QP_CONTEXT_QPC_BYTES_48_MTU_S) - -#define QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_S 0 -#define QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_M \ - (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_68_RQ_HEAD_S) - -#define QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_S 16 -#define QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_M \ - (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_68_RQ_CUR_INDEX_S) - -#define QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_S 0 -#define QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_M \ - (((1UL << 5) - 1) << QP_CONTEXT_QPC_BYTES_76_CUR_RQ_WQE_BA_H_S) - -#define QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_S 8 -#define QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_M \ - (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_76_RX_REQ_MSN_S) - -#define QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_S 0 -#define QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_M \ - (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_84_LAST_ACK_PSN_S) - -#define QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_S 24 -#define QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_M \ - (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_84_TRRL_HEAD_S) - -#define QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S 0 -#define QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_M \ - (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_88_RX_REQ_EPSN_S) - -#define QP_CONTEXT_QPC_BYTES_88_RX_REQ_PSN_ERR_FLAG_S 24 -#define QP_CONTEXT_QPC_BYTES_88_RX_LAST_OPCODE_FLG_S 25 - -#define QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_S 26 -#define QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_M \ - (((1UL << 2) - 1) << \ - QP_CONTEXT_QPC_BYTES_88_RQ_REQ_LAST_OPERATION_TYPE_S) - -#define QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_S 29 -#define QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_M \ - (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_88_RQ_REQ_RDMA_WR_FLAG_S) - -#define QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_S 0 -#define QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_M \ - (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_S) - -#define QP_CONTEXT_QPC_BYTES_108_TRRL_SDB_PSN_FLG_S 24 -#define QP_CONTEXT_QPC_BYTES_108_TRRL_TDB_PSN_FLG_S 25 - -#define QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_S 0 -#define QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_M \ - (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_112_TRRL_TDB_PSN_S) - -#define QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_S 24 -#define QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_M \ - (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_112_TRRL_TAIL_S) - -#define QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_S 0 -#define QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_M \ - (((1UL << 5) - 1) << QP_CONTEXT_QPC_BYTES_120_RX_CUR_SQ_WQE_BA_H_S) - -#define QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_S 0 -#define QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_M \ - (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_124_RX_ACK_MSN_S) - -#define QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_S 16 -#define QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_M \ - (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_124_IRRL_MSG_IDX_S) - -#define QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_S 0 -#define QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_M \ - (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_128_RX_ACK_EPSN_S) - -#define QP_CONTEXT_QPC_BYTES_128_RX_ACK_PSN_ERR_FLG_S 24 - -#define QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_S 25 -#define QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_M \ - (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_128_ACK_LAST_OPERATION_TYPE_S) - -#define QP_CONTEXT_QPC_BYTES_128_IRRL_PSN_VLD_FLG_S 27 - -#define QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_S 0 -#define QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_M \ - (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_132_IRRL_PSN_S) - -#define QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_S 24 -#define QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_M \ - (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_132_IRRL_TAIL_S) - -#define QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_S 0 -#define QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_M \ - (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_PSN_S) - -#define QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_S 24 -#define QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_M \ - (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_136_RETRY_MSG_FPKT_PSN_L_S) - -#define QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_S 0 -#define QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_M \ - (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_FPKT_PSN_H_S) - -#define QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_S 16 -#define QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_M \ - (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_140_RETRY_MSG_MSN_S) - -#define QP_CONTEXT_QPC_BYTES_140_RNR_RETRY_FLG_S 31 - -#define QP_CONTEXT_QPC_BYTES_144_QP_STATE_S 0 -#define QP_CONTEXT_QPC_BYTES_144_QP_STATE_M \ - (((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_144_QP_STATE_S) - -#define QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_S 0 -#define QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_M \ - (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_148_CHECK_FLAG_S) - -#define QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S 2 -#define QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_M \ - (((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_148_RETRY_COUNT_S) - -#define QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_S 5 -#define QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_M \ - (((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_148_RNR_RETRY_COUNT_S) - -#define QP_CONTEXT_QPC_BYTES_148_LSN_S 8 -#define QP_CONTEXT_QPC_BYTES_148_LSN_M \ - (((1UL << 16) - 1) << QP_CONTEXT_QPC_BYTES_148_LSN_S) - -#define QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_S 0 -#define QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_M \ - (((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_156_RETRY_COUNT_INIT_S) - -#define QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S 3 -#define QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_M \ - (((1UL << 5) - 1) << QP_CONTEXT_QPC_BYTES_156_ACK_TIMEOUT_S) - -#define QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_S 8 -#define QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_M \ - (((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_156_RNR_RETRY_COUNT_INIT_S) - -#define QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S 11 -#define QP_CONTEXT_QPC_BYTES_156_PORT_NUM_M \ - (((1UL << 3) - 1) << QP_CONTEXT_QPC_BYTES_156_PORT_NUM_S) - -#define QP_CONTEXT_QPC_BYTES_156_SL_S 14 -#define QP_CONTEXT_QPC_BYTES_156_SL_M \ - (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_156_SL_S) - -#define QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S 16 -#define QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_M \ - (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_156_INITIATOR_DEPTH_S) - -#define QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_S 24 -#define QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_M \ - (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_156_ACK_REQ_IND_S) - -#define QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S 0 -#define QP_CONTEXT_QPC_BYTES_164_SQ_PSN_M \ - (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_164_SQ_PSN_S) - -#define QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_S 24 -#define QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_M \ - (((1UL << 8) - 1) << QP_CONTEXT_QPC_BYTES_164_IRRL_HEAD_S) - -#define QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_S 0 -#define QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_M \ - (((1UL << 24) - 1) << QP_CONTEXT_QPC_BYTES_168_RETRY_SQ_PSN_S) - -#define QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_S 24 -#define QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_M \ - (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_168_SGE_USE_FLA_S) - -#define QP_CONTEXT_QPC_BYTES_168_DB_TYPE_S 26 -#define QP_CONTEXT_QPC_BYTES_168_DB_TYPE_M \ - (((1UL << 2) - 1) << QP_CONTEXT_QPC_BYTES_168_DB_TYPE_S) - -#define QP_CONTEXT_QPC_BYTES_168_MSG_LP_IND_S 28 -#define QP_CONTEXT_QPC_BYTES_168_CSDB_LP_IND_S 29 -#define QP_CONTEXT_QPC_BYTES_168_QP_ERR_FLG_S 30 - -#define QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_S 0 -#define QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_M \ - (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_176_DB_CUR_INDEX_S) - -#define QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_S 16 -#define QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_M \ - (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_176_RETRY_DB_CUR_INDEX_S) - -#define QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_S 0 -#define QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_M \ - (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_180_SQ_HEAD_S) - -#define QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_S 16 -#define QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_M \ - (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_180_SQ_CUR_INDEX_S) - -#define QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_S 0 -#define QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_M \ - (((1UL << 5) - 1) << QP_CONTEXT_QPC_BYTES_188_TX_CUR_SQ_WQE_BA_H_S) - -#define QP_CONTEXT_QPC_BYTES_188_PKT_RETRY_FLG_S 8 - -#define QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S 16 -#define QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_M \ - (((1UL << 15) - 1) << QP_CONTEXT_QPC_BYTES_188_TX_RETRY_CUR_INDEX_S) - -#define STATUS_MASK 0xff -#define GO_BIT_TIMEOUT_MSECS 10000 -#define HCR_STATUS_OFFSET 0x18 -#define HCR_GO_BIT 15 - -struct hns_roce_rq_db { - __le32 u32_4; - __le32 u32_8; -}; - -#define RQ_DOORBELL_U32_4_RQ_HEAD_S 0 -#define RQ_DOORBELL_U32_4_RQ_HEAD_M \ - (((1UL << 15) - 1) << RQ_DOORBELL_U32_4_RQ_HEAD_S) - -#define RQ_DOORBELL_U32_8_QPN_S 0 -#define RQ_DOORBELL_U32_8_QPN_M (((1UL << 24) - 1) << RQ_DOORBELL_U32_8_QPN_S) - -#define RQ_DOORBELL_U32_8_CMD_S 28 -#define RQ_DOORBELL_U32_8_CMD_M (((1UL << 3) - 1) << RQ_DOORBELL_U32_8_CMD_S) - -#define RQ_DOORBELL_U32_8_HW_SYNC_S 31 - -struct hns_roce_sq_db { - __le32 u32_4; - __le32 u32_8; -}; - -#define SQ_DOORBELL_U32_4_SQ_HEAD_S 0 -#define SQ_DOORBELL_U32_4_SQ_HEAD_M \ - (((1UL << 15) - 1) << SQ_DOORBELL_U32_4_SQ_HEAD_S) - -#define SQ_DOORBELL_U32_4_SL_S 16 -#define SQ_DOORBELL_U32_4_SL_M \ - (((1UL << 2) - 1) << SQ_DOORBELL_U32_4_SL_S) - -#define SQ_DOORBELL_U32_4_PORT_S 18 -#define SQ_DOORBELL_U32_4_PORT_M (((1UL << 3) - 1) << SQ_DOORBELL_U32_4_PORT_S) - -#define SQ_DOORBELL_U32_8_QPN_S 0 -#define SQ_DOORBELL_U32_8_QPN_M (((1UL << 24) - 1) << SQ_DOORBELL_U32_8_QPN_S) - -#define SQ_DOORBELL_HW_SYNC_S 31 - -struct hns_roce_ext_db { - int esdb_dep; - int eodb_dep; - struct hns_roce_buf_list *sdb_buf_list; - struct hns_roce_buf_list *odb_buf_list; -}; - -struct hns_roce_db_table { - int sdb_ext_mod; - int odb_ext_mod; - struct hns_roce_ext_db *ext_db; -}; - -#define HW_SYNC_SLEEP_TIME_INTERVAL 20 -#define HW_SYNC_TIMEOUT_MSECS (25 * HW_SYNC_SLEEP_TIME_INTERVAL) -#define BT_CMD_SYNC_SHIFT 31 -#define HNS_ROCE_BA_SIZE (32 * 4096) - -struct hns_roce_bt_table { - struct hns_roce_buf_list qpc_buf; - struct hns_roce_buf_list mtpt_buf; - struct hns_roce_buf_list cqc_buf; -}; - -struct hns_roce_tptr_table { - struct hns_roce_buf_list tptr_buf; -}; - -struct hns_roce_qp_work { - struct work_struct work; - struct ib_device *ib_dev; - struct hns_roce_qp *qp; - u32 db_wait_stage; - u32 sdb_issue_ptr; - u32 sdb_inv_cnt; - u32 sche_cnt; -}; - -struct hns_roce_mr_free_work { - struct work_struct work; - struct ib_device *ib_dev; - struct completion *comp; - int comp_flag; - void *mr; -}; - -struct hns_roce_recreate_lp_qp_work { - struct work_struct work; - struct ib_device *ib_dev; - struct completion *comp; - int comp_flag; -}; - -struct hns_roce_free_mr { - struct workqueue_struct *free_mr_wq; - struct hns_roce_qp *mr_free_qp[HNS_ROCE_V1_RESV_QP]; - struct hns_roce_cq *mr_free_cq; - struct hns_roce_pd *mr_free_pd; -}; - -struct hns_roce_v1_priv { - struct hns_roce_db_table db_table; - struct hns_roce_raq_table raq_table; - struct hns_roce_bt_table bt_table; - struct hns_roce_tptr_table tptr_table; - struct hns_roce_free_mr free_mr; -}; - -int hns_dsaf_roce_reset(struct fwnode_handle *dsaf_fwnode, bool dereset); -int hns_roce_v1_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); -int hns_roce_v1_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata); - -#endif diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c index d5f3faa1627a..b33e948fd060 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.c +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.c @@ -33,6 +33,7 @@ #include <linux/acpi.h> #include <linux/etherdevice.h> #include <linux/interrupt.h> +#include <linux/iopoll.h> #include <linux/kernel.h> #include <linux/types.h> #include <net/addrconf.h> @@ -677,6 +678,7 @@ static void hns_roce_write512(struct hns_roce_dev *hr_dev, u64 *val, static void write_dwqe(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp, void *wqe) { +#define HNS_ROCE_SL_SHIFT 2 struct hns_roce_v2_rc_send_wqe *rc_sq_wqe = wqe; /* All kinds of DirectWQE have the same header field layout */ @@ -684,7 +686,8 @@ static void write_dwqe(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp, roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_DB_SL_L_M, V2_RC_SEND_WQE_BYTE_4_DB_SL_L_S, qp->sl); roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_DB_SL_H_M, - V2_RC_SEND_WQE_BYTE_4_DB_SL_H_S, qp->sl >> 2); + V2_RC_SEND_WQE_BYTE_4_DB_SL_H_S, + qp->sl >> HNS_ROCE_SL_SHIFT); roce_set_field(rc_sq_wqe->byte_4, V2_RC_SEND_WQE_BYTE_4_WQE_INDEX_M, V2_RC_SEND_WQE_BYTE_4_WQE_INDEX_S, qp->sq.head); @@ -1050,9 +1053,14 @@ static u32 hns_roce_v2_cmd_hw_resetting(struct hns_roce_dev *hr_dev, unsigned long instance_stage, unsigned long reset_stage) { +#define HW_RESET_TIMEOUT_US 1000000 +#define HW_RESET_SLEEP_US 1000 + struct hns_roce_v2_priv *priv = hr_dev->priv; struct hnae3_handle *handle = priv->handle; const struct hnae3_ae_ops *ops = handle->ae_algo->ops; + unsigned long val; + int ret; /* When hardware reset is detected, we should stop sending mailbox&cmq& * doorbell to hardware. If now in .init_instance() function, we should @@ -1064,7 +1072,11 @@ static u32 hns_roce_v2_cmd_hw_resetting(struct hns_roce_dev *hr_dev, * again. */ hr_dev->dis_db = true; - if (!ops->get_hw_reset_stat(handle)) + + ret = read_poll_timeout(ops->ae_dev_reset_cnt, val, + val > hr_dev->reset_cnt, HW_RESET_SLEEP_US, + HW_RESET_TIMEOUT_US, false, handle); + if (!ret) hr_dev->is_reset = true; if (!hr_dev->is_reset || reset_stage == HNS_ROCE_STATE_RST_INIT || @@ -1165,32 +1177,22 @@ static int hns_roce_alloc_cmq_desc(struct hns_roce_dev *hr_dev, { int size = ring->desc_num * sizeof(struct hns_roce_cmq_desc); - ring->desc = kzalloc(size, GFP_KERNEL); + ring->desc = dma_alloc_coherent(hr_dev->dev, size, + &ring->desc_dma_addr, GFP_KERNEL); if (!ring->desc) return -ENOMEM; - ring->desc_dma_addr = dma_map_single(hr_dev->dev, ring->desc, size, - DMA_BIDIRECTIONAL); - if (dma_mapping_error(hr_dev->dev, ring->desc_dma_addr)) { - ring->desc_dma_addr = 0; - kfree(ring->desc); - ring->desc = NULL; - - return -ENOMEM; - } - return 0; } static void hns_roce_free_cmq_desc(struct hns_roce_dev *hr_dev, struct hns_roce_v2_cmq_ring *ring) { - dma_unmap_single(hr_dev->dev, ring->desc_dma_addr, - ring->desc_num * sizeof(struct hns_roce_cmq_desc), - DMA_BIDIRECTIONAL); + dma_free_coherent(hr_dev->dev, + ring->desc_num * sizeof(struct hns_roce_cmq_desc), + ring->desc, ring->desc_dma_addr); ring->desc_dma_addr = 0; - kfree(ring->desc); } static int init_csq(struct hns_roce_dev *hr_dev, @@ -1305,14 +1307,14 @@ static int __hns_roce_cmq_send(struct hns_roce_dev *hr_dev, continue; dev_err_ratelimited(hr_dev->dev, - "Cmdq IO error, opcode = %x, return = %x\n", + "Cmdq IO error, opcode = 0x%x, return = 0x%x.\n", desc->opcode, desc_ret); ret = -EIO; } } else { /* FW/HW reset or incorrect number of desc */ tail = roce_read(hr_dev, ROCEE_TX_CMQ_CI_REG); - dev_warn(hr_dev->dev, "CMDQ move tail from %d to %d\n", + dev_warn(hr_dev->dev, "CMDQ move tail from %u to %u.\n", csq->head, tail); csq->head = tail; @@ -1571,7 +1573,7 @@ static int hns_roce_query_func_info(struct hns_roce_dev *hr_dev) struct hns_roce_cmq_desc desc; int ret; - if (hr_dev->pci_dev->revision < PCI_REVISION_ID_HIP09) { + if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) { hr_dev->func_num = 1; return 0; } @@ -1594,11 +1596,17 @@ static int hns_roce_config_global_param(struct hns_roce_dev *hr_dev) { struct hns_roce_cmq_desc desc; struct hns_roce_cmq_req *req = (struct hns_roce_cmq_req *)desc.data; + u32 clock_cycles_of_1us; hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_CFG_GLOBAL_PARAM, false); - hr_reg_write(req, CFG_GLOBAL_PARAM_1US_CYCLES, 0x3e8); + if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) + clock_cycles_of_1us = HNS_ROCE_1NS_CFG; + else + clock_cycles_of_1us = HNS_ROCE_1US_CFG; + + hr_reg_write(req, CFG_GLOBAL_PARAM_1US_CYCLES, clock_cycles_of_1us); hr_reg_write(req, CFG_GLOBAL_PARAM_UDP_PORT, ROCE_V2_UDP_DPORT); return hns_roce_cmq_send(hr_dev, &desc, 1); @@ -1997,7 +2005,8 @@ static void set_default_caps(struct hns_roce_dev *hr_dev) caps->gid_table_len[0] = HNS_ROCE_V2_GID_INDEX_NUM; if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) { - caps->flags |= HNS_ROCE_CAP_FLAG_STASH; + caps->flags |= HNS_ROCE_CAP_FLAG_STASH | + HNS_ROCE_CAP_FLAG_DIRECT_WQE; caps->max_sq_inline = HNS_ROCE_V3_MAX_SQ_INLINE; } else { caps->max_sq_inline = HNS_ROCE_V2_MAX_SQ_INLINE; @@ -2138,7 +2147,6 @@ static void apply_func_caps(struct hns_roce_dev *hr_dev) caps->cqc_timer_entry_sz = HNS_ROCE_V2_CQC_TIMER_ENTRY_SZ; caps->mtt_entry_sz = HNS_ROCE_V2_MTT_ENTRY_SZ; - caps->eqe_hop_num = HNS_ROCE_EQE_HOP_NUM; caps->pbl_hop_num = HNS_ROCE_PBL_HOP_NUM; caps->qpc_timer_hop_num = HNS_ROCE_HOP_NUM_0; caps->cqc_timer_hop_num = HNS_ROCE_HOP_NUM_0; @@ -2155,6 +2163,7 @@ static void apply_func_caps(struct hns_roce_dev *hr_dev) (u32)priv->handle->rinfo.num_vectors - 2); if (hr_dev->pci_dev->revision >= PCI_REVISION_ID_HIP09) { + caps->eqe_hop_num = HNS_ROCE_V3_EQE_HOP_NUM; caps->ceqe_size = HNS_ROCE_V3_EQE_SIZE; caps->aeqe_size = HNS_ROCE_V3_EQE_SIZE; @@ -2175,6 +2184,7 @@ static void apply_func_caps(struct hns_roce_dev *hr_dev) } else { u32 func_num = max_t(u32, 1, hr_dev->func_num); + caps->eqe_hop_num = HNS_ROCE_V2_EQE_HOP_NUM; caps->ceqe_size = HNS_ROCE_CEQE_SIZE; caps->aeqe_size = HNS_ROCE_AEQE_SIZE; caps->gid_table_len[0] /= func_num; @@ -2387,7 +2397,7 @@ static int hns_roce_config_entry_size(struct hns_roce_dev *hr_dev) struct hns_roce_caps *caps = &hr_dev->caps; int ret; - if (hr_dev->pci_dev->revision < PCI_REVISION_ID_HIP09) + if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) return 0; ret = config_hem_entry_size(hr_dev, HNS_ROCE_CFG_QPC_SIZE, @@ -2961,8 +2971,8 @@ static int config_gmv_table(struct hns_roce_dev *hr_dev, return hns_roce_cmq_send(hr_dev, desc, 2); } -static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, u32 port, - int gid_index, const union ib_gid *gid, +static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, int gid_index, + const union ib_gid *gid, const struct ib_gid_attr *attr) { enum hns_roce_sgid_type sgid_type = GID_TYPE_FLAG_ROCE_V1; @@ -2992,7 +3002,7 @@ static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, u32 port, } static int hns_roce_v2_set_mac(struct hns_roce_dev *hr_dev, u8 phy_port, - u8 *addr) + const u8 *addr) { struct hns_roce_cmq_desc desc; struct hns_roce_cfg_smac_tb *smac_tb = @@ -3057,8 +3067,7 @@ static int set_mtpt_pbl(struct hns_roce_dev *hr_dev, } static int hns_roce_v2_write_mtpt(struct hns_roce_dev *hr_dev, - void *mb_buf, struct hns_roce_mr *mr, - unsigned long mtpt_idx) + void *mb_buf, struct hns_roce_mr *mr) { struct hns_roce_v2_mpt_entry *mpt_entry; int ret; @@ -3328,7 +3337,7 @@ static void hns_roce_v2_write_cqc(struct hns_roce_dev *hr_dev, memset(cq_context, 0, sizeof(*cq_context)); hr_reg_write(cq_context, CQC_CQ_ST, V2_CQ_STATE_VALID); - hr_reg_write(cq_context, CQC_ARM_ST, REG_NXT_CEQE); + hr_reg_write(cq_context, CQC_ARM_ST, NO_ARMED); hr_reg_write(cq_context, CQC_SHIFT, ilog2(hr_cq->cq_depth)); hr_reg_write(cq_context, CQC_CEQN, hr_cq->vector); hr_reg_write(cq_context, CQC_CQN, hr_cq->cqn); @@ -4318,10 +4327,10 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, dma_addr_t trrl_ba; dma_addr_t irrl_ba; enum ib_mtu ib_mtu; + const u8 *smac; u8 lp_pktn_ini; u64 *mtts; u8 *dmac; - u8 *smac; u32 port; int mtu; int ret; @@ -4374,7 +4383,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, port = (attr_mask & IB_QP_PORT) ? (attr->port_num - 1) : hr_qp->port; - smac = (u8 *)hr_dev->dev_addr[port]; + smac = (const u8 *)hr_dev->dev_addr[port]; dmac = (u8 *)attr->ah_attr.roce.dmac; /* when dmac equals smac or loop_idc is 1, it should loopback */ if (ether_addr_equal_unaligned(dmac, smac) || @@ -4399,8 +4408,8 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, mtu = ib_mtu_enum_to_int(ib_mtu); if (WARN_ON(mtu <= 0)) return -EINVAL; -#define MAX_LP_MSG_LEN 65536 - /* MTU * (2 ^ LP_PKTN_INI) shouldn't be bigger than 64KB */ +#define MAX_LP_MSG_LEN 16384 + /* MTU * (2 ^ LP_PKTN_INI) shouldn't be bigger than 16KB */ lp_pktn_ini = ilog2(MAX_LP_MSG_LEN / mtu); if (WARN_ON(lp_pktn_ini >= 0xF)) return -EINVAL; @@ -4482,14 +4491,6 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp, return 0; } -static inline u16 get_udp_sport(u32 fl, u32 lqpn, u32 rqpn) -{ - if (!fl) - fl = rdma_calc_flow_label(lqpn, rqpn); - - return rdma_flow_label_to_udp_sport(fl); -} - static int get_dip_ctx_idx(struct ib_qp *ibqp, const struct ib_qp_attr *attr, u32 *dip_idx) { @@ -4706,8 +4707,9 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp, } hr_reg_write(context, QPC_UDPSPN, - is_udp ? get_udp_sport(grh->flow_label, ibqp->qp_num, - attr->dest_qp_num) : 0); + is_udp ? rdma_get_udp_sport(grh->flow_label, ibqp->qp_num, + attr->dest_qp_num) : + 0); hr_reg_clear(qpc_mask, QPC_UDPSPN); @@ -4733,7 +4735,7 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp, hr_qp->sl = rdma_ah_get_sl(&attr->ah_attr); if (unlikely(hr_qp->sl > MAX_SERVICE_LEVEL)) { ibdev_err(ibdev, - "failed to fill QPC, sl (%d) shouldn't be larger than %d.\n", + "failed to fill QPC, sl (%u) shouldn't be larger than %d.\n", hr_qp->sl, MAX_SERVICE_LEVEL); return -EINVAL; } @@ -4762,7 +4764,8 @@ static bool check_qp_state(enum ib_qp_state cur_state, [IB_QPS_ERR] = true }, [IB_QPS_SQD] = {}, [IB_QPS_SQE] = {}, - [IB_QPS_ERR] = { [IB_QPS_RESET] = true, [IB_QPS_ERR] = true } + [IB_QPS_ERR] = { [IB_QPS_RESET] = true, + [IB_QPS_ERR] = true } }; return sm[cur_state][new_state]; @@ -4802,6 +4805,30 @@ static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp, return ret; } +static bool check_qp_timeout_cfg_range(struct hns_roce_dev *hr_dev, u8 *timeout) +{ +#define QP_ACK_TIMEOUT_MAX_HIP08 20 +#define QP_ACK_TIMEOUT_OFFSET 10 +#define QP_ACK_TIMEOUT_MAX 31 + + if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) { + if (*timeout > QP_ACK_TIMEOUT_MAX_HIP08) { + ibdev_warn(&hr_dev->ib_dev, + "Local ACK timeout shall be 0 to 20.\n"); + return false; + } + *timeout += QP_ACK_TIMEOUT_OFFSET; + } else if (hr_dev->pci_dev->revision > PCI_REVISION_ID_HIP08) { + if (*timeout > QP_ACK_TIMEOUT_MAX) { + ibdev_warn(&hr_dev->ib_dev, + "Local ACK timeout shall be 0 to 31.\n"); + return false; + } + } + + return true; +} + static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp, const struct ib_qp_attr *attr, int attr_mask, @@ -4811,6 +4838,7 @@ static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp, struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); int ret = 0; + u8 timeout; if (attr_mask & IB_QP_AV) { ret = hns_roce_v2_set_path(ibqp, attr, attr_mask, context, @@ -4820,12 +4848,10 @@ static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp, } if (attr_mask & IB_QP_TIMEOUT) { - if (attr->timeout < 31) { - hr_reg_write(context, QPC_AT, attr->timeout); + timeout = attr->timeout; + if (check_qp_timeout_cfg_range(hr_dev, &timeout)) { + hr_reg_write(context, QPC_AT, timeout); hr_reg_clear(qpc_mask, QPC_AT); - } else { - ibdev_warn(&hr_dev->ib_dev, - "Local ACK timeout shall be 0 to 30.\n"); } } @@ -4882,7 +4908,9 @@ static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp, set_access_flags(hr_qp, context, qpc_mask, attr, attr_mask); if (attr_mask & IB_QP_MIN_RNR_TIMER) { - hr_reg_write(context, QPC_MIN_RNR_TIME, attr->min_rnr_timer); + hr_reg_write(context, QPC_MIN_RNR_TIME, + hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 ? + HNS_ROCE_RNR_TIMER_10NS : attr->min_rnr_timer); hr_reg_clear(qpc_mask, QPC_MIN_RNR_TIME); } @@ -5499,6 +5527,16 @@ static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) hr_reg_write(cq_context, CQC_CQ_MAX_CNT, cq_count); hr_reg_clear(cqc_mask, CQC_CQ_MAX_CNT); + + if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) { + if (cq_period * HNS_ROCE_CLOCK_ADJUST > USHRT_MAX) { + dev_info(hr_dev->dev, + "cq_period(%u) reached the upper limit, adjusted to 65.\n", + cq_period); + cq_period = HNS_ROCE_MAX_CQ_PERIOD; + } + cq_period *= HNS_ROCE_CLOCK_ADJUST; + } hr_reg_write(cq_context, CQC_CQ_PERIOD, cq_period); hr_reg_clear(cqc_mask, CQC_CQ_PERIOD); @@ -5827,7 +5865,7 @@ static void hns_roce_v2_int_mask_enable(struct hns_roce_dev *hr_dev, roce_write(hr_dev, ROCEE_VF_ABN_INT_CFG_REG, enable_flag); } -static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, int eqn) +static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, u32 eqn) { struct device *dev = hr_dev->dev; int ret; @@ -5841,7 +5879,7 @@ static void hns_roce_v2_destroy_eqc(struct hns_roce_dev *hr_dev, int eqn) 0, HNS_ROCE_CMD_DESTROY_AEQC, HNS_ROCE_CMD_TIMEOUT_MSECS); if (ret) - dev_err(dev, "[mailbox cmd] destroy eqc(%d) failed.\n", eqn); + dev_err(dev, "[mailbox cmd] destroy eqc(%u) failed.\n", eqn); } static void free_eq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq) @@ -5894,6 +5932,15 @@ static int config_eqc(struct hns_roce_dev *hr_dev, struct hns_roce_eq *eq, hr_reg_write(eqc, EQC_EQ_PROD_INDX, HNS_ROCE_EQ_INIT_PROD_IDX); hr_reg_write(eqc, EQC_EQ_MAX_CNT, eq->eq_max_cnt); + if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08) { + if (eq->eq_period * HNS_ROCE_CLOCK_ADJUST > USHRT_MAX) { + dev_info(hr_dev->dev, "eq_period(%u) reached the upper limit, adjusted to 65.\n", + eq->eq_period); + eq->eq_period = HNS_ROCE_MAX_EQ_PERIOD; + } + eq->eq_period *= HNS_ROCE_CLOCK_ADJUST; + } + hr_reg_write(eqc, EQC_EQ_PERIOD, eq->eq_period); hr_reg_write(eqc, EQC_EQE_REPORT_TIMER, HNS_ROCE_EQ_INIT_REPORT_TIMER); hr_reg_write(eqc, EQC_EQE_BA_L, bt_ba >> 3); @@ -6344,7 +6391,7 @@ static int hns_roce_hw_v2_init_instance(struct hnae3_handle *handle) if (!id) return 0; - if (id->driver_data && handle->pdev->revision < PCI_REVISION_ID_HIP09) + if (id->driver_data && handle->pdev->revision == PCI_REVISION_ID_HIP08) return 0; ret = __hns_roce_hw_v2_init_instance(handle); @@ -6397,10 +6444,8 @@ static int hns_roce_hw_v2_reset_notify_down(struct hnae3_handle *handle) if (!hr_dev) return 0; - hr_dev->is_reset = true; hr_dev->active = false; hr_dev->dis_db = true; - hr_dev->state = HNS_ROCE_DEVICE_STATE_RST_DOWN; return 0; diff --git a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h index 4d904d5e82be..12be85f0986e 100644 --- a/drivers/infiniband/hw/hns/hns_roce_hw_v2.h +++ b/drivers/infiniband/hw/hns/hns_roce_hw_v2.h @@ -35,26 +35,15 @@ #include <linux/bitops.h> -#define HNS_ROCE_VF_QPC_BT_NUM 256 -#define HNS_ROCE_VF_SCCC_BT_NUM 64 -#define HNS_ROCE_VF_SRQC_BT_NUM 64 -#define HNS_ROCE_VF_CQC_BT_NUM 64 -#define HNS_ROCE_VF_MPT_BT_NUM 64 -#define HNS_ROCE_VF_SMAC_NUM 32 -#define HNS_ROCE_VF_SL_NUM 8 -#define HNS_ROCE_VF_GMV_BT_NUM 256 - #define HNS_ROCE_V2_MAX_QP_NUM 0x1000 #define HNS_ROCE_V2_MAX_QPC_TIMER_NUM 0x200 #define HNS_ROCE_V2_MAX_WQE_NUM 0x8000 -#define HNS_ROCE_V2_MAX_SRQ 0x100000 #define HNS_ROCE_V2_MAX_SRQ_WR 0x8000 #define HNS_ROCE_V2_MAX_SRQ_SGE 64 #define HNS_ROCE_V2_MAX_CQ_NUM 0x100000 #define HNS_ROCE_V2_MAX_CQC_TIMER_NUM 0x100 #define HNS_ROCE_V2_MAX_SRQ_NUM 0x100000 #define HNS_ROCE_V2_MAX_CQE_NUM 0x400000 -#define HNS_ROCE_V2_MAX_SRQWQE_NUM 0x8000 #define HNS_ROCE_V2_MAX_RQ_SGE_NUM 64 #define HNS_ROCE_V2_MAX_SQ_SGE_NUM 64 #define HNS_ROCE_V2_MAX_EXTEND_SGE_NUM 0x200000 @@ -63,13 +52,10 @@ #define HNS_ROCE_V2_MAX_RC_INL_INN_SZ 32 #define HNS_ROCE_V2_UAR_NUM 256 #define HNS_ROCE_V2_PHY_UAR_NUM 1 -#define HNS_ROCE_V2_MAX_IRQ_NUM 65 -#define HNS_ROCE_V2_COMP_VEC_NUM 63 #define HNS_ROCE_V2_AEQE_VEC_NUM 1 #define HNS_ROCE_V2_ABNORMAL_VEC_NUM 1 #define HNS_ROCE_V2_MAX_MTPT_NUM 0x100000 #define HNS_ROCE_V2_MAX_MTT_SEGS 0x1000000 -#define HNS_ROCE_V2_MAX_CQE_SEGS 0x1000000 #define HNS_ROCE_V2_MAX_SRQWQE_SEGS 0x1000000 #define HNS_ROCE_V2_MAX_IDX_SEGS 0x1000000 #define HNS_ROCE_V2_MAX_PD_NUM 0x1000000 @@ -81,7 +67,6 @@ #define HNS_ROCE_V2_MAX_RQ_DESC_SZ 16 #define HNS_ROCE_V2_MAX_SRQ_DESC_SZ 64 #define HNS_ROCE_V2_IRRL_ENTRY_SZ 64 -#define HNS_ROCE_V2_TRRL_ENTRY_SZ 48 #define HNS_ROCE_V2_EXT_ATOMIC_TRRL_ENTRY_SZ 100 #define HNS_ROCE_V2_CQC_ENTRY_SZ 64 #define HNS_ROCE_V2_SRQC_ENTRY_SZ 64 @@ -103,7 +88,6 @@ #define HNS_ROCE_INVALID_LKEY 0x0 #define HNS_ROCE_INVALID_SGE_LENGTH 0x80000000 #define HNS_ROCE_CMQ_TX_TIMEOUT 30000 -#define HNS_ROCE_V2_UC_RC_SGE_NUM_IN_WQE 2 #define HNS_ROCE_V2_RSV_QPS 8 #define HNS_ROCE_V2_HW_RST_TIMEOUT 1000 @@ -117,12 +101,14 @@ #define HNS_ROCE_CQE_HOP_NUM 1 #define HNS_ROCE_SRQWQE_HOP_NUM 1 #define HNS_ROCE_PBL_HOP_NUM 2 -#define HNS_ROCE_EQE_HOP_NUM 2 #define HNS_ROCE_IDX_HOP_NUM 1 #define HNS_ROCE_SQWQE_HOP_NUM 2 #define HNS_ROCE_EXT_SGE_HOP_NUM 1 #define HNS_ROCE_RQWQE_HOP_NUM 2 +#define HNS_ROCE_V2_EQE_HOP_NUM 2 +#define HNS_ROCE_V3_EQE_HOP_NUM 1 + #define HNS_ROCE_BA_PG_SZ_SUPPORTED_256K 6 #define HNS_ROCE_BA_PG_SZ_SUPPORTED_16K 2 #define HNS_ROCE_V2_GID_INDEX_NUM 16 @@ -1441,9 +1427,17 @@ struct hns_roce_v2_priv { struct hns_roce_dip { u8 dgid[GID_LEN_V2]; u32 dip_idx; - struct list_head node; /* all dips are on a list */ + struct list_head node; /* all dips are on a list */ }; +/* only for RNR timeout issue of HIP08 */ +#define HNS_ROCE_CLOCK_ADJUST 1000 +#define HNS_ROCE_MAX_CQ_PERIOD 65 +#define HNS_ROCE_MAX_EQ_PERIOD 65 +#define HNS_ROCE_RNR_TIMER_10NS 1 +#define HNS_ROCE_1US_CFG 999 +#define HNS_ROCE_1NS_CFG 0 + #define HNS_ROCE_AEQ_DEFAULT_BURST_NUM 0x0 #define HNS_ROCE_AEQ_DEFAULT_INTERVAL 0x0 #define HNS_ROCE_CEQ_DEFAULT_BURST_NUM 0x0 diff --git a/drivers/infiniband/hw/hns/hns_roce_main.c b/drivers/infiniband/hw/hns/hns_roce_main.c index 5d39bd08582a..f73ba619f375 100644 --- a/drivers/infiniband/hw/hns/hns_roce_main.c +++ b/drivers/infiniband/hw/hns/hns_roce_main.c @@ -31,7 +31,6 @@ * SOFTWARE. */ #include <linux/acpi.h> -#include <linux/of_platform.h> #include <linux/module.h> #include <linux/pci.h> #include <rdma/ib_addr.h> @@ -42,7 +41,8 @@ #include "hns_roce_device.h" #include "hns_roce_hem.h" -static int hns_roce_set_mac(struct hns_roce_dev *hr_dev, u32 port, u8 *addr) +static int hns_roce_set_mac(struct hns_roce_dev *hr_dev, u32 port, + const u8 *addr) { u8 phy_port; u32 i; @@ -69,7 +69,7 @@ static int hns_roce_add_gid(const struct ib_gid_attr *attr, void **context) if (port >= hr_dev->caps.num_ports) return -EINVAL; - ret = hr_dev->hw->set_gid(hr_dev, port, attr->index, &attr->gid, attr); + ret = hr_dev->hw->set_gid(hr_dev, attr->index, &attr->gid, attr); return ret; } @@ -83,7 +83,7 @@ static int hns_roce_del_gid(const struct ib_gid_attr *attr, void **context) if (port >= hr_dev->caps.num_ports) return -EINVAL; - ret = hr_dev->hw->set_gid(hr_dev, port, attr->index, NULL, NULL); + ret = hr_dev->hw->set_gid(hr_dev, attr->index, NULL, NULL); return ret; } @@ -151,9 +151,6 @@ static int hns_roce_setup_mtu_mac(struct hns_roce_dev *hr_dev) u8 i; for (i = 0; i < hr_dev->caps.num_ports; i++) { - if (hr_dev->hw->set_mtu) - hr_dev->hw->set_mtu(hr_dev, hr_dev->iboe.phy_port[i], - hr_dev->caps.max_mtu); ret = hns_roce_set_mac(hr_dev, i, hr_dev->iboe.netdevs[i]->dev_addr); if (ret) @@ -269,6 +266,9 @@ static enum rdma_link_layer hns_roce_get_link_layer(struct ib_device *device, static int hns_roce_query_pkey(struct ib_device *ib_dev, u32 port, u16 index, u16 *pkey) { + if (index > 0) + return -EINVAL; + *pkey = PKEY_ID; return 0; @@ -291,6 +291,66 @@ static int hns_roce_modify_device(struct ib_device *ib_dev, int mask, return 0; } +struct hns_user_mmap_entry * +hns_roce_user_mmap_entry_insert(struct ib_ucontext *ucontext, u64 address, + size_t length, + enum hns_roce_mmap_type mmap_type) +{ + struct hns_user_mmap_entry *entry; + int ret; + + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + return NULL; + + entry->address = address; + entry->mmap_type = mmap_type; + + switch (mmap_type) { + /* pgoff 0 must be used by DB for compatibility */ + case HNS_ROCE_MMAP_TYPE_DB: + ret = rdma_user_mmap_entry_insert_exact( + ucontext, &entry->rdma_entry, length, 0); + break; + case HNS_ROCE_MMAP_TYPE_DWQE: + ret = rdma_user_mmap_entry_insert_range( + ucontext, &entry->rdma_entry, length, 1, + U32_MAX); + break; + default: + ret = -EINVAL; + break; + } + + if (ret) { + kfree(entry); + return NULL; + } + + return entry; +} + +static void hns_roce_dealloc_uar_entry(struct hns_roce_ucontext *context) +{ + if (context->db_mmap_entry) + rdma_user_mmap_entry_remove( + &context->db_mmap_entry->rdma_entry); +} + +static int hns_roce_alloc_uar_entry(struct ib_ucontext *uctx) +{ + struct hns_roce_ucontext *context = to_hr_ucontext(uctx); + u64 address; + + address = context->uar.pfn << PAGE_SHIFT; + context->db_mmap_entry = hns_roce_user_mmap_entry_insert( + uctx, address, PAGE_SIZE, HNS_ROCE_MMAP_TYPE_DB); + if (!context->db_mmap_entry) + return -ENOMEM; + + return 0; +} + static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata) { @@ -309,6 +369,10 @@ static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx, if (ret) goto error_fail_uar_alloc; + ret = hns_roce_alloc_uar_entry(uctx); + if (ret) + goto error_fail_uar_entry; + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB || hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) { INIT_LIST_HEAD(&context->page_list); @@ -325,6 +389,9 @@ static int hns_roce_alloc_ucontext(struct ib_ucontext *uctx, return 0; error_fail_copy_to_udata: + hns_roce_dealloc_uar_entry(context); + +error_fail_uar_entry: ida_free(&hr_dev->uar_ida.ida, (int)context->uar.logic_idx); error_fail_uar_alloc: @@ -336,39 +403,48 @@ static void hns_roce_dealloc_ucontext(struct ib_ucontext *ibcontext) struct hns_roce_ucontext *context = to_hr_ucontext(ibcontext); struct hns_roce_dev *hr_dev = to_hr_dev(ibcontext->device); + hns_roce_dealloc_uar_entry(context); + ida_free(&hr_dev->uar_ida.ida, (int)context->uar.logic_idx); } -static int hns_roce_mmap(struct ib_ucontext *context, - struct vm_area_struct *vma) +static int hns_roce_mmap(struct ib_ucontext *uctx, struct vm_area_struct *vma) { - struct hns_roce_dev *hr_dev = to_hr_dev(context->device); - - switch (vma->vm_pgoff) { - case 0: - return rdma_user_mmap_io(context, vma, - to_hr_ucontext(context)->uar.pfn, - PAGE_SIZE, - pgprot_noncached(vma->vm_page_prot), - NULL); - - /* vm_pgoff: 1 -- TPTR */ - case 1: - if (!hr_dev->tptr_dma_addr || !hr_dev->tptr_size) - return -EINVAL; - /* - * FIXME: using io_remap_pfn_range on the dma address returned - * by dma_alloc_coherent is totally wrong. - */ - return rdma_user_mmap_io(context, vma, - hr_dev->tptr_dma_addr >> PAGE_SHIFT, - hr_dev->tptr_size, - vma->vm_page_prot, - NULL); + struct rdma_user_mmap_entry *rdma_entry; + struct hns_user_mmap_entry *entry; + phys_addr_t pfn; + pgprot_t prot; + int ret; + + rdma_entry = rdma_user_mmap_entry_get_pgoff(uctx, vma->vm_pgoff); + if (!rdma_entry) + return -EINVAL; + + entry = to_hns_mmap(rdma_entry); + pfn = entry->address >> PAGE_SHIFT; + switch (entry->mmap_type) { + case HNS_ROCE_MMAP_TYPE_DB: + case HNS_ROCE_MMAP_TYPE_DWQE: + prot = pgprot_device(vma->vm_page_prot); + break; default: return -EINVAL; } + + ret = rdma_user_mmap_io(uctx, vma, pfn, rdma_entry->npages * PAGE_SIZE, + prot, rdma_entry); + + rdma_user_mmap_entry_put(rdma_entry); + + return ret; +} + +static void hns_roce_free_mmap(struct rdma_user_mmap_entry *rdma_entry) +{ + struct hns_user_mmap_entry *entry = to_hns_mmap(rdma_entry); + + kfree(entry); } static int hns_roce_port_immutable(struct ib_device *ib_dev, u32 port_num, @@ -444,6 +520,7 @@ static const struct ib_device_ops hns_roce_dev_ops = { .get_link_layer = hns_roce_get_link_layer, .get_port_immutable = hns_roce_port_immutable, .mmap = hns_roce_mmap, + .mmap_free = hns_roce_free_mmap, .modify_device = hns_roce_modify_device, .modify_qp = hns_roce_modify_qp, .query_ah = hns_roce_query_ah, @@ -730,7 +807,6 @@ static int hns_roce_setup_hca(struct hns_roce_dev *hr_dev) int ret; spin_lock_init(&hr_dev->sm_lock); - spin_lock_init(&hr_dev->bt_cmd_lock); if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_CQ_RECORD_DB || hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_QP_RECORD_DB) { @@ -821,20 +897,13 @@ int hns_roce_init(struct hns_roce_dev *hr_dev) struct device *dev = hr_dev->dev; int ret; - if (hr_dev->hw->reset) { - ret = hr_dev->hw->reset(hr_dev, true); - if (ret) { - dev_err(dev, "Reset RoCE engine failed!\n"); - return ret; - } - } hr_dev->is_reset = false; if (hr_dev->hw->cmq_init) { ret = hr_dev->hw->cmq_init(hr_dev); if (ret) { dev_err(dev, "Init RoCE Command Queue failed!\n"); - goto error_failed_cmq_init; + return ret; } } @@ -917,12 +986,6 @@ error_failed_cmd_init: if (hr_dev->hw->cmq_exit) hr_dev->hw->cmq_exit(hr_dev); -error_failed_cmq_init: - if (hr_dev->hw->reset) { - if (hr_dev->hw->reset(hr_dev, false)) - dev_err(dev, "Dereset RoCE engine failed!\n"); - } - return ret; } @@ -942,8 +1005,6 @@ void hns_roce_exit(struct hns_roce_dev *hr_dev) hns_roce_cmd_cleanup(hr_dev); if (hr_dev->hw->cmq_exit) hr_dev->hw->cmq_exit(hr_dev); - if (hr_dev->hw->reset) - hr_dev->hw->reset(hr_dev, false); } MODULE_LICENSE("Dual BSD/GPL"); diff --git a/drivers/infiniband/hw/hns/hns_roce_mr.c b/drivers/infiniband/hw/hns/hns_roce_mr.c index 7089ac780291..2ee06b906b60 100644 --- a/drivers/infiniband/hw/hns/hns_roce_mr.c +++ b/drivers/infiniband/hw/hns/hns_roce_mr.c @@ -31,7 +31,6 @@ * SOFTWARE. */ -#include <linux/platform_device.h> #include <linux/vmalloc.h> #include <rdma/ib_umem.h> #include "hns_roce_device.h" @@ -81,7 +80,7 @@ static int alloc_mr_key(struct hns_roce_dev *hr_dev, struct hns_roce_mr *mr) return -ENOMEM; } - mr->key = hw_index_to_key(id); /* MR key */ + mr->key = hw_index_to_key(id); /* MR key */ err = hns_roce_table_get(hr_dev, &hr_dev->mr_table.mtpt_table, (unsigned long)id); @@ -173,8 +172,7 @@ static int hns_roce_mr_enable(struct hns_roce_dev *hr_dev, } if (mr->type != MR_TYPE_FRMR) - ret = hr_dev->hw->write_mtpt(hr_dev, mailbox->buf, mr, - mtpt_idx); + ret = hr_dev->hw->write_mtpt(hr_dev, mailbox->buf, mr); else ret = hr_dev->hw->frmr_write_mtpt(hr_dev, mailbox->buf, mr); if (ret) { @@ -363,12 +361,8 @@ int hns_roce_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) struct hns_roce_mr *mr = to_hr_mr(ibmr); int ret = 0; - if (hr_dev->hw->dereg_mr) { - ret = hr_dev->hw->dereg_mr(hr_dev, mr, udata); - } else { - hns_roce_mr_free(hr_dev, mr); - kfree(mr); - } + hns_roce_mr_free(hr_dev, mr); + kfree(mr); return ret; } @@ -614,10 +608,7 @@ static int mtr_map_region(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, return -ENOBUFS; for (i = 0; i < count && npage < max_count; i++) { - if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) - addr = to_hr_hw_page_addr(pages[npage]); - else - addr = pages[npage]; + addr = pages[npage]; mtts[i] = cpu_to_le64(addr); npage++; @@ -824,11 +815,11 @@ int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, } int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, - int offset, u64 *mtt_buf, int mtt_max, u64 *base_addr) + u32 offset, u64 *mtt_buf, int mtt_max, u64 *base_addr) { struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg; int mtt_count, left; - int start_index; + u32 start_index; int total = 0; __le64 *mtts; u32 npage; @@ -847,10 +838,7 @@ int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, continue; addr = cfg->root_ba + (npage << HNS_HW_PAGE_SHIFT); - if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) - mtt_buf[total] = to_hr_hw_page_addr(addr); - else - mtt_buf[total] = addr; + mtt_buf[total] = addr; total++; } @@ -884,10 +872,10 @@ done: static int mtr_init_buf_cfg(struct hns_roce_dev *hr_dev, struct hns_roce_buf_attr *attr, struct hns_roce_hem_cfg *cfg, - unsigned int *buf_page_shift, int unalinged_size) + unsigned int *buf_page_shift, u64 unalinged_size) { struct hns_roce_buf_region *r; - int first_region_padding; + u64 first_region_padding; int page_cnt, region_cnt; unsigned int page_shift; size_t buf_size; diff --git a/drivers/infiniband/hw/hns/hns_roce_pd.c b/drivers/infiniband/hw/hns/hns_roce_pd.c index 81ffad77ae42..783e71852c50 100644 --- a/drivers/infiniband/hw/hns/hns_roce_pd.c +++ b/drivers/infiniband/hw/hns/hns_roce_pd.c @@ -30,7 +30,6 @@ * SOFTWARE. */ -#include <linux/platform_device.h> #include <linux/pci.h> #include "hns_roce_device.h" @@ -86,7 +85,6 @@ int hns_roce_dealloc_pd(struct ib_pd *pd, struct ib_udata *udata) int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar) { struct hns_roce_ida *uar_ida = &hr_dev->uar_ida; - struct resource *res; int id; /* Using bitmap to manager UAR index */ @@ -104,18 +102,9 @@ int hns_roce_uar_alloc(struct hns_roce_dev *hr_dev, struct hns_roce_uar *uar) else uar->index = 0; - if (!dev_is_pci(hr_dev->dev)) { - res = platform_get_resource(hr_dev->pdev, IORESOURCE_MEM, 0); - if (!res) { - ida_free(&uar_ida->ida, id); - dev_err(&hr_dev->pdev->dev, "memory resource not found!\n"); - return -EINVAL; - } - uar->pfn = ((res->start) >> PAGE_SHIFT) + uar->index; - } else { - uar->pfn = ((pci_resource_start(hr_dev->pci_dev, 2)) - >> PAGE_SHIFT); - } + uar->pfn = ((pci_resource_start(hr_dev->pci_dev, 2)) >> PAGE_SHIFT); + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_DIRECT_WQE) + hr_dev->dwqe_page = pci_resource_start(hr_dev->pci_dev, 4); return 0; } diff --git a/drivers/infiniband/hw/hns/hns_roce_qp.c b/drivers/infiniband/hw/hns/hns_roce_qp.c index 9af4509894e6..d78373e10aab 100644 --- a/drivers/infiniband/hw/hns/hns_roce_qp.c +++ b/drivers/infiniband/hw/hns/hns_roce_qp.c @@ -32,7 +32,6 @@ */ #include <linux/pci.h> -#include <linux/platform_device.h> #include <rdma/ib_addr.h> #include <rdma/ib_umem.h> #include <rdma/uverbs_ioctl.h> @@ -110,12 +109,11 @@ void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type) return; } - if (hr_dev->hw_rev != HNS_ROCE_HW_VER1 && - (event_type == HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR || - event_type == HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR || - event_type == HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR || - event_type == HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION || - event_type == HNS_ROCE_EVENT_TYPE_INVALID_XRCETH)) { + if (event_type == HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR || + event_type == HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR || + event_type == HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR || + event_type == HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION || + event_type == HNS_ROCE_EVENT_TYPE_INVALID_XRCETH) { qp->state = IB_QPS_ERR; flush_cqe(hr_dev, qp); @@ -219,13 +217,7 @@ static int alloc_qpn(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) int ret; if (hr_qp->ibqp.qp_type == IB_QPT_GSI) { - /* when hw version is v1, the sqpn is allocated */ - if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) - num = HNS_ROCE_MAX_PORTS + - hr_dev->iboe.phy_port[hr_qp->port]; - else - num = 1; - + num = 1; hr_qp->doorbell_qpn = 1; } else { mutex_lock(&qp_table->bank_mutex); @@ -324,11 +316,6 @@ static int alloc_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) if (!hr_qp->qpn) return -EINVAL; - /* In v1 engine, GSI QP context is saved in the RoCE hw's register */ - if (hr_qp->ibqp.qp_type == IB_QPT_GSI && - hr_dev->hw_rev == HNS_ROCE_HW_VER1) - return 0; - /* Alloc memory for QPC */ ret = hns_roce_table_get(hr_dev, &qp_table->qp_table, hr_qp->qpn); if (ret) { @@ -379,6 +366,11 @@ err_out: return ret; } +static void qp_user_mmap_entry_remove(struct hns_roce_qp *hr_qp) +{ + rdma_user_mmap_entry_remove(&hr_qp->dwqe_mmap_entry->rdma_entry); +} + void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) { struct xarray *xa = &hr_dev->qp_table_xa; @@ -402,11 +394,6 @@ static void free_qpc(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) { struct hns_roce_qp_table *qp_table = &hr_dev->qp_table; - /* In v1 engine, GSI QP context is saved in the RoCE hw's register */ - if (hr_qp->ibqp.qp_type == IB_QPT_GSI && - hr_dev->hw_rev == HNS_ROCE_HW_VER1) - return; - if (hr_dev->caps.trrl_entry_sz) hns_roce_table_put(hr_dev, &qp_table->trrl_table, hr_qp->qpn); hns_roce_table_put(hr_dev, &qp_table->irrl_table, hr_qp->qpn); @@ -535,11 +522,6 @@ static void set_ext_sge_param(struct hns_roce_dev *hr_dev, u32 sq_wqe_cnt, hr_qp->sge.sge_shift = HNS_ROCE_SGE_SHIFT; - if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) { - hr_qp->sq.max_gs = HNS_ROCE_SGE_IN_WQE; - return; - } - hr_qp->sq.max_gs = max(1U, cap->max_send_sge); wqe_sge_cnt = get_wqe_ext_sge_cnt(hr_qp); @@ -780,7 +762,11 @@ static int alloc_qp_buf(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, goto err_inline; } + if (hr_dev->caps.flags & HNS_ROCE_CAP_FLAG_DIRECT_WQE) + hr_qp->en_flags |= HNS_ROCE_QP_CAP_DIRECT_WQE; + return 0; + err_inline: free_rq_inline_buf(hr_qp); @@ -822,6 +808,35 @@ static inline bool kernel_qp_has_rdb(struct hns_roce_dev *hr_dev, hns_roce_qp_has_rq(init_attr)); } +static int qp_mmap_entry(struct hns_roce_qp *hr_qp, + struct hns_roce_dev *hr_dev, + struct ib_udata *udata, + struct hns_roce_ib_create_qp_resp *resp) +{ + struct hns_roce_ucontext *uctx = + rdma_udata_to_drv_context(udata, + struct hns_roce_ucontext, ibucontext); + struct rdma_user_mmap_entry *rdma_entry; + u64 address; + + address = hr_dev->dwqe_page + hr_qp->qpn * HNS_ROCE_DWQE_SIZE; + + hr_qp->dwqe_mmap_entry = + hns_roce_user_mmap_entry_insert(&uctx->ibucontext, address, + HNS_ROCE_DWQE_SIZE, + HNS_ROCE_MMAP_TYPE_DWQE); + + if (!hr_qp->dwqe_mmap_entry) { + ibdev_err(&hr_dev->ib_dev, "failed to get dwqe mmap entry.\n"); + return -ENOMEM; + } + + rdma_entry = &hr_qp->dwqe_mmap_entry->rdma_entry; + resp->dwqe_mmap_key = rdma_user_mmap_get_offset(rdma_entry); + + return 0; +} + static int alloc_user_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, struct ib_qp_init_attr *init_attr, @@ -909,10 +924,16 @@ static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, hr_qp->en_flags |= HNS_ROCE_QP_CAP_OWNER_DB; if (udata) { + if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE) { + ret = qp_mmap_entry(hr_qp, hr_dev, udata, resp); + if (ret) + return ret; + } + ret = alloc_user_qp_db(hr_dev, hr_qp, init_attr, udata, ucmd, resp); if (ret) - return ret; + goto err_remove_qp; } else { ret = alloc_kernel_qp_db(hr_dev, hr_qp, init_attr); if (ret) @@ -920,6 +941,12 @@ static int alloc_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, } return 0; + +err_remove_qp: + if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE) + qp_user_mmap_entry_remove(hr_qp); + + return ret; } static void free_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, @@ -933,6 +960,8 @@ static void free_qp_db(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, hns_roce_db_unmap_user(uctx, &hr_qp->rdb); if (hr_qp->en_flags & HNS_ROCE_QP_CAP_SQ_RECORD_DB) hns_roce_db_unmap_user(uctx, &hr_qp->sdb); + if (hr_qp->en_flags & HNS_ROCE_QP_CAP_DIRECT_WQE) + qp_user_mmap_entry_remove(hr_qp); } else { if (hr_qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB) hns_roce_free_db(hr_dev, &hr_qp->rdb); @@ -1158,7 +1187,7 @@ static int check_qp_type(struct hns_roce_dev *hr_dev, enum ib_qp_type type, goto out; break; case IB_QPT_UD: - if (hr_dev->pci_dev->revision <= PCI_REVISION_ID_HIP08 && + if (hr_dev->pci_dev->revision == PCI_REVISION_ID_HIP08 && is_user) goto out; break; @@ -1391,7 +1420,7 @@ void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq, } } -static inline void *get_wqe(struct hns_roce_qp *hr_qp, int offset) +static inline void *get_wqe(struct hns_roce_qp *hr_qp, u32 offset) { return hns_roce_buf_offset(hr_qp->mtr.kmem, offset); } diff --git a/drivers/infiniband/hw/hns/hns_roce_srq.c b/drivers/infiniband/hw/hns/hns_roce_srq.c index 6eee9deadd12..e64ef6903fb4 100644 --- a/drivers/infiniband/hw/hns/hns_roce_srq.c +++ b/drivers/infiniband/hw/hns/hns_roce_srq.c @@ -259,7 +259,7 @@ static int alloc_srq_wrid(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq) static void free_srq_wrid(struct hns_roce_srq *srq) { - kfree(srq->wrid); + kvfree(srq->wrid); srq->wrid = NULL; } diff --git a/drivers/infiniband/hw/irdma/cm.h b/drivers/infiniband/hw/irdma/cm.h index d03cd29333ea..3bf42728e9b7 100644 --- a/drivers/infiniband/hw/irdma/cm.h +++ b/drivers/infiniband/hw/irdma/cm.h @@ -159,14 +159,6 @@ enum irdma_cm_event_type { IRDMA_CM_EVENT_ABORTED, }; -struct irdma_bth { /* Base Trasnport Header */ - u8 opcode; - u8 flags; - __be16 pkey; - __be32 qpn; - __be32 apsn; -}; - struct ietf_mpa_v1 { u8 key[IETF_MPA_KEY_SIZE]; u8 flags; @@ -397,7 +389,7 @@ int irdma_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len); int irdma_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param); int irdma_create_listen(struct iw_cm_id *cm_id, int backlog); int irdma_destroy_listen(struct iw_cm_id *cm_id); -int irdma_add_arp(struct irdma_pci_f *rf, u32 *ip, bool ipv4, u8 *mac); +int irdma_add_arp(struct irdma_pci_f *rf, u32 *ip, bool ipv4, const u8 *mac); void irdma_cm_teardown_connections(struct irdma_device *iwdev, u32 *ipaddr, struct irdma_cm_info *nfo, bool disconnect_all); @@ -406,7 +398,7 @@ int irdma_cm_stop(struct irdma_device *dev); bool irdma_ipv4_is_lpb(u32 loc_addr, u32 rem_addr); bool irdma_ipv6_is_lpb(u32 *loc_addr, u32 *rem_addr); int irdma_arp_table(struct irdma_pci_f *rf, u32 *ip_addr, bool ipv4, - u8 *mac_addr, u32 action); + const u8 *mac_addr, u32 action); void irdma_if_notify(struct irdma_device *iwdev, struct net_device *netdev, u32 *ipaddr, bool ipv4, bool ifup); bool irdma_port_in_use(struct irdma_cm_core *cm_core, u16 port); diff --git a/drivers/infiniband/hw/irdma/ctrl.c b/drivers/infiniband/hw/irdma/ctrl.c index f1e5515256e0..3141a9c85de5 100644 --- a/drivers/infiniband/hw/irdma/ctrl.c +++ b/drivers/infiniband/hw/irdma/ctrl.c @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB /* Copyright (c) 2015 - 2021 Intel Corporation */ +#include <linux/etherdevice.h> + #include "osdep.h" #include "status.h" #include "hmc.h" @@ -1420,44 +1422,6 @@ void irdma_sc_send_lsmm(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size, } /** - * irdma_sc_send_lsmm_nostag - for privilege qp - * @qp: sc qp struct - * @lsmm_buf: buffer with lsmm message - * @size: size of lsmm buffer - */ -void irdma_sc_send_lsmm_nostag(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size) -{ - __le64 *wqe; - u64 hdr; - struct irdma_qp_uk *qp_uk; - - qp_uk = &qp->qp_uk; - wqe = qp_uk->sq_base->elem; - - set_64bit_val(wqe, 0, (uintptr_t)lsmm_buf); - - if (qp->qp_uk.uk_attrs->hw_rev == IRDMA_GEN_1) - set_64bit_val(wqe, 8, - FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, size)); - else - set_64bit_val(wqe, 8, - FIELD_PREP(IRDMAQPSQ_FRAG_LEN, size) | - FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity)); - set_64bit_val(wqe, 16, 0); - - hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMAQP_OP_RDMA_SEND) | - FIELD_PREP(IRDMAQPSQ_STREAMMODE, 1) | - FIELD_PREP(IRDMAQPSQ_WAITFORRCVPDU, 1) | - FIELD_PREP(IRDMAQPSQ_VALID, qp->qp_uk.swqe_polarity); - dma_wmb(); /* make sure WQE is written before valid bit is set */ - - set_64bit_val(wqe, 24, hdr); - - print_hex_dump_debug("WQE: SEND_LSMM_NOSTAG WQE", DUMP_PREFIX_OFFSET, - 16, 8, wqe, IRDMA_QP_WQE_MIN_SIZE, false); -} - -/** * irdma_sc_send_rtt - send last read0 or write0 * @qp: sc qp struct * @read: Do read0 or write0 @@ -2501,7 +2465,6 @@ static inline void irdma_sc_cq_ack(struct irdma_sc_cq *cq) enum irdma_status_code irdma_sc_cq_init(struct irdma_sc_cq *cq, struct irdma_cq_init_info *info) { - enum irdma_status_code ret_code; u32 pble_obj_cnt; pble_obj_cnt = info->dev->hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt; @@ -2513,9 +2476,7 @@ enum irdma_status_code irdma_sc_cq_init(struct irdma_sc_cq *cq, cq->ceq_id = info->ceq_id; info->cq_uk_init_info.cqe_alloc_db = cq->dev->cq_arm_db; info->cq_uk_init_info.cq_ack_db = cq->dev->cq_ack_db; - ret_code = irdma_uk_cq_init(&cq->cq_uk, &info->cq_uk_init_info); - if (ret_code) - return ret_code; + irdma_uk_cq_init(&cq->cq_uk, &info->cq_uk_init_info); cq->virtual_map = info->virtual_map; cq->pbl_chunk_size = info->pbl_chunk_size; diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c index 7de525a5ccf8..89234d04cc65 100644 --- a/drivers/infiniband/hw/irdma/hw.c +++ b/drivers/infiniband/hw/irdma/hw.c @@ -60,6 +60,8 @@ static void irdma_iwarp_ce_handler(struct irdma_sc_cq *iwcq) { struct irdma_cq *cq = iwcq->back_cq; + if (!cq->user_mode) + cq->armed = false; if (cq->ibcq.comp_handler) cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); } @@ -146,6 +148,7 @@ static void irdma_set_flush_fields(struct irdma_sc_qp *qp, qp->flush_code = FLUSH_PROT_ERR; break; case IRDMA_AE_AMP_BAD_QP: + case IRDMA_AE_WQE_UNEXPECTED_OPCODE: qp->flush_code = FLUSH_LOC_QP_OP_ERR; break; case IRDMA_AE_AMP_BAD_STAG_KEY: @@ -156,7 +159,6 @@ static void irdma_set_flush_fields(struct irdma_sc_qp *qp, case IRDMA_AE_PRIV_OPERATION_DENIED: case IRDMA_AE_IB_INVALID_REQUEST: case IRDMA_AE_IB_REMOTE_ACCESS_ERROR: - case IRDMA_AE_IB_REMOTE_OP_ERROR: qp->flush_code = FLUSH_REM_ACCESS_ERR; qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR; break; @@ -184,6 +186,9 @@ static void irdma_set_flush_fields(struct irdma_sc_qp *qp, case IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS: qp->flush_code = FLUSH_MW_BIND_ERR; break; + case IRDMA_AE_IB_REMOTE_OP_ERROR: + qp->flush_code = FLUSH_REM_OP_ERR; + break; default: qp->flush_code = FLUSH_FATAL_ERR; break; @@ -545,7 +550,7 @@ static void irdma_destroy_irq(struct irdma_pci_f *rf, struct irdma_sc_dev *dev = &rf->sc_dev; dev->irq_ops->irdma_dis_irq(dev, msix_vec->idx); - irq_set_affinity_hint(msix_vec->irq, NULL); + irq_update_affinity_hint(msix_vec->irq, NULL); free_irq(msix_vec->irq, dev_id); } @@ -1057,7 +1062,7 @@ static enum irdma_status_code irdma_alloc_set_mac(struct irdma_device *iwdev) &iwdev->mac_ip_table_idx); if (!status) { status = irdma_add_local_mac_entry(iwdev->rf, - (u8 *)iwdev->netdev->dev_addr, + (const u8 *)iwdev->netdev->dev_addr, (u8)iwdev->mac_ip_table_idx); if (status) irdma_del_local_mac_entry(iwdev->rf, @@ -1095,7 +1100,7 @@ irdma_cfg_ceq_vector(struct irdma_pci_f *rf, struct irdma_ceq *iwceq, } cpumask_clear(&msix_vec->mask); cpumask_set_cpu(msix_vec->cpu_affinity, &msix_vec->mask); - irq_set_affinity_hint(msix_vec->irq, &msix_vec->mask); + irq_update_affinity_hint(msix_vec->irq, &msix_vec->mask); if (status) { ibdev_dbg(&rf->iwdev->ibdev, "ERR: ceq irq config fail\n"); return IRDMA_ERR_CFG; @@ -1704,14 +1709,14 @@ clean_msixtbl: */ static void irdma_get_used_rsrc(struct irdma_device *iwdev) { - iwdev->rf->used_pds = find_next_zero_bit(iwdev->rf->allocated_pds, - iwdev->rf->max_pd, 0); - iwdev->rf->used_qps = find_next_zero_bit(iwdev->rf->allocated_qps, - iwdev->rf->max_qp, 0); - iwdev->rf->used_cqs = find_next_zero_bit(iwdev->rf->allocated_cqs, - iwdev->rf->max_cq, 0); - iwdev->rf->used_mrs = find_next_zero_bit(iwdev->rf->allocated_mrs, - iwdev->rf->max_mr, 0); + iwdev->rf->used_pds = find_first_zero_bit(iwdev->rf->allocated_pds, + iwdev->rf->max_pd); + iwdev->rf->used_qps = find_first_zero_bit(iwdev->rf->allocated_qps, + iwdev->rf->max_qp); + iwdev->rf->used_cqs = find_first_zero_bit(iwdev->rf->allocated_cqs, + iwdev->rf->max_cq); + iwdev->rf->used_mrs = find_first_zero_bit(iwdev->rf->allocated_mrs, + iwdev->rf->max_mr); } void irdma_ctrl_deinit_hw(struct irdma_pci_f *rf) @@ -2191,7 +2196,7 @@ void irdma_del_local_mac_entry(struct irdma_pci_f *rf, u16 idx) * @mac_addr: pointer to mac address * @idx: the index of the mac ip address to add */ -int irdma_add_local_mac_entry(struct irdma_pci_f *rf, u8 *mac_addr, u16 idx) +int irdma_add_local_mac_entry(struct irdma_pci_f *rf, const u8 *mac_addr, u16 idx) { struct irdma_local_mac_entry_info *info; struct irdma_cqp *iwcqp = &rf->cqp; @@ -2362,7 +2367,8 @@ void irdma_del_apbvt(struct irdma_device *iwdev, * @ipv4: flag inicating IPv4 * @action: add, delete or modify */ -void irdma_manage_arp_cache(struct irdma_pci_f *rf, unsigned char *mac_addr, +void irdma_manage_arp_cache(struct irdma_pci_f *rf, + const unsigned char *mac_addr, u32 *ip_addr, bool ipv4, u32 action) { struct irdma_add_arp_cache_entry_info *info; diff --git a/drivers/infiniband/hw/irdma/i40iw_if.c b/drivers/infiniband/hw/irdma/i40iw_if.c index d219f64b2c3d..43e962b97d6a 100644 --- a/drivers/infiniband/hw/irdma/i40iw_if.c +++ b/drivers/infiniband/hw/irdma/i40iw_if.c @@ -198,7 +198,7 @@ static void i40iw_remove(struct auxiliary_device *aux_dev) aux_dev); struct i40e_info *cdev_info = i40e_adev->ldev; - return i40e_client_device_unregister(cdev_info); + i40e_client_device_unregister(cdev_info); } static const struct auxiliary_device_id i40iw_auxiliary_id_table[] = { diff --git a/drivers/infiniband/hw/irdma/main.c b/drivers/infiniband/hw/irdma/main.c index 51a41359e0b4..9fab29039f1c 100644 --- a/drivers/infiniband/hw/irdma/main.c +++ b/drivers/infiniband/hw/irdma/main.c @@ -207,7 +207,7 @@ static void irdma_remove(struct auxiliary_device *aux_dev) struct iidc_auxiliary_dev, adev); struct ice_pf *pf = iidc_adev->pf; - struct irdma_device *iwdev = dev_get_drvdata(&aux_dev->dev); + struct irdma_device *iwdev = auxiliary_get_drvdata(aux_dev); irdma_ib_unregister_device(iwdev); ice_rdma_update_vsi_filter(pf, iwdev->vsi_num, false); @@ -228,7 +228,8 @@ static void irdma_fill_device_info(struct irdma_device *iwdev, struct ice_pf *pf rf->msix_count = pf->num_rdma_msix; rf->msix_entries = &pf->msix_entries[pf->rdma_base_vector]; rf->default_vsi.vsi_idx = vsi->vsi_num; - rf->protocol_used = IRDMA_ROCE_PROTOCOL_ONLY; + rf->protocol_used = pf->rdma_mode & IIDC_RDMA_PROTOCOL_ROCEV2 ? + IRDMA_ROCE_PROTOCOL_ONLY : IRDMA_IWARP_PROTOCOL_ONLY; rf->rdma_ver = IRDMA_GEN_2; rf->rsrc_profile = IRDMA_HMC_PROFILE_DEFAULT; rf->rst_to = IRDMA_RST_TIMEOUT_HZ; @@ -294,7 +295,7 @@ static int irdma_probe(struct auxiliary_device *aux_dev, const struct auxiliary_ ice_rdma_update_vsi_filter(pf, iwdev->vsi_num, true); ibdev_dbg(&iwdev->ibdev, "INIT: Gen2 PF[%d] device probe success\n", PCI_FUNC(rf->pcidev->devfn)); - dev_set_drvdata(&aux_dev->dev, iwdev); + auxiliary_set_drvdata(aux_dev, iwdev); return 0; diff --git a/drivers/infiniband/hw/irdma/main.h b/drivers/infiniband/hw/irdma/main.h index b678fe712447..cb218cab79ac 100644 --- a/drivers/infiniband/hw/irdma/main.h +++ b/drivers/infiniband/hw/irdma/main.h @@ -467,7 +467,8 @@ void irdma_qp_rem_ref(struct ib_qp *ibqp); void irdma_free_lsmm_rsrc(struct irdma_qp *iwqp); struct ib_qp *irdma_get_qp(struct ib_device *ibdev, int qpn); void irdma_flush_wqes(struct irdma_qp *iwqp, u32 flush_mask); -void irdma_manage_arp_cache(struct irdma_pci_f *rf, unsigned char *mac_addr, +void irdma_manage_arp_cache(struct irdma_pci_f *rf, + const unsigned char *mac_addr, u32 *ip_addr, bool ipv4, u32 action); struct irdma_apbvt_entry *irdma_add_apbvt(struct irdma_device *iwdev, u16 port); void irdma_del_apbvt(struct irdma_device *iwdev, @@ -479,7 +480,7 @@ void irdma_free_cqp_request(struct irdma_cqp *cqp, void irdma_put_cqp_request(struct irdma_cqp *cqp, struct irdma_cqp_request *cqp_request); int irdma_alloc_local_mac_entry(struct irdma_pci_f *rf, u16 *mac_tbl_idx); -int irdma_add_local_mac_entry(struct irdma_pci_f *rf, u8 *mac_addr, u16 idx); +int irdma_add_local_mac_entry(struct irdma_pci_f *rf, const u8 *mac_addr, u16 idx); void irdma_del_local_mac_entry(struct irdma_pci_f *rf, u16 idx); u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf); @@ -541,6 +542,7 @@ int irdma_ah_cqp_op(struct irdma_pci_f *rf, struct irdma_sc_ah *sc_ah, u8 cmd, void (*callback_fcn)(struct irdma_cqp_request *cqp_request), void *cb_param); void irdma_gsi_ud_qp_ah_cb(struct irdma_cqp_request *cqp_request); +bool irdma_cq_empty(struct irdma_cq *iwcq); int irdma_inetaddr_event(struct notifier_block *notifier, unsigned long event, void *ptr); int irdma_inet6addr_event(struct notifier_block *notifier, unsigned long event, diff --git a/drivers/infiniband/hw/irdma/osdep.h b/drivers/infiniband/hw/irdma/osdep.h index b2ab52335ca6..63d8bb3a6903 100644 --- a/drivers/infiniband/hw/irdma/osdep.h +++ b/drivers/infiniband/hw/irdma/osdep.h @@ -37,7 +37,6 @@ struct irdma_hw; struct irdma_pci_f; struct ib_device *to_ibdev(struct irdma_sc_dev *dev); -u8 __iomem *irdma_get_hw_addr(void *dev); void irdma_ieq_mpa_crc_ae(struct irdma_sc_dev *dev, struct irdma_sc_qp *qp); enum irdma_status_code irdma_vf_wait_vchnl_resp(struct irdma_sc_dev *dev); bool irdma_vf_clear_to_send(struct irdma_sc_dev *dev); diff --git a/drivers/infiniband/hw/irdma/pble.c b/drivers/infiniband/hw/irdma/pble.c index aeeb1c310965..fed49da770f3 100644 --- a/drivers/infiniband/hw/irdma/pble.c +++ b/drivers/infiniband/hw/irdma/pble.c @@ -25,8 +25,7 @@ void irdma_destroy_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc) list_del(&chunk->list); if (chunk->type == PBLE_SD_PAGED) irdma_pble_free_paged_mem(chunk); - if (chunk->bitmapbuf) - kfree(chunk->bitmapmem.va); + bitmap_free(chunk->bitmapbuf); kfree(chunk->chunkmem.va); } } @@ -283,7 +282,6 @@ add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc) "PBLE: next_fpm_addr = %llx chunk_size[%llu] = 0x%llx\n", pble_rsrc->next_fpm_addr, chunk->size, chunk->size); pble_rsrc->unallocated_pble -= (u32)(chunk->size >> 3); - list_add(&chunk->list, &pble_rsrc->pinfo.clist); sd_reg_val = (sd_entry_type == IRDMA_SD_TYPE_PAGED) ? sd_entry->u.pd_table.pd_page_addr.pa : sd_entry->u.bp.addr.pa; @@ -295,12 +293,12 @@ add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc) goto error; } + list_add(&chunk->list, &pble_rsrc->pinfo.clist); sd_entry->valid = true; return 0; error: - if (chunk->bitmapbuf) - kfree(chunk->bitmapmem.va); + bitmap_free(chunk->bitmapbuf); kfree(chunk->chunkmem.va); return ret_code; diff --git a/drivers/infiniband/hw/irdma/pble.h b/drivers/infiniband/hw/irdma/pble.h index e1b3b8118a2c..d0d4f2b77d34 100644 --- a/drivers/infiniband/hw/irdma/pble.h +++ b/drivers/infiniband/hw/irdma/pble.h @@ -69,7 +69,7 @@ struct irdma_add_page_info { struct irdma_chunk { struct list_head list; struct irdma_dma_info dmainfo; - void *bitmapbuf; + unsigned long *bitmapbuf; u32 sizeofbitmap; u64 size; @@ -78,7 +78,6 @@ struct irdma_chunk { u32 pg_cnt; enum irdma_alloc_type type; struct irdma_sc_dev *dev; - struct irdma_virt_mem bitmapmem; struct irdma_virt_mem chunkmem; }; diff --git a/drivers/infiniband/hw/irdma/protos.h b/drivers/infiniband/hw/irdma/protos.h index 78f598fdbccf..a17c0ffb0cc8 100644 --- a/drivers/infiniband/hw/irdma/protos.h +++ b/drivers/infiniband/hw/irdma/protos.h @@ -37,8 +37,6 @@ void irdma_hw_stats_read_all(struct irdma_vsi_pestat *stats, enum irdma_status_code irdma_cqp_ws_node_cmd(struct irdma_sc_dev *dev, u8 cmd, struct irdma_ws_node_info *node_info); -enum irdma_status_code irdma_cqp_up_map_cmd(struct irdma_sc_dev *dev, u8 cmd, - struct irdma_up_info *map_info); enum irdma_status_code irdma_cqp_ceq_cmd(struct irdma_sc_dev *dev, struct irdma_sc_ceq *sc_ceq, u8 op); enum irdma_status_code irdma_cqp_aeq_cmd(struct irdma_sc_dev *dev, diff --git a/drivers/infiniband/hw/irdma/trace_cm.h b/drivers/infiniband/hw/irdma/trace_cm.h index bcf10ec427d6..f633fb343328 100644 --- a/drivers/infiniband/hw/irdma/trace_cm.h +++ b/drivers/infiniband/hw/irdma/trace_cm.h @@ -144,7 +144,7 @@ DEFINE_EVENT(tos_template, irdma_dcb_tos, DECLARE_EVENT_CLASS(qhash_template, TP_PROTO(struct irdma_device *iwdev, struct irdma_cm_listener *listener, - char *dev_addr), + const char *dev_addr), TP_ARGS(iwdev, listener, dev_addr), TP_STRUCT__entry(__field(struct irdma_device *, iwdev) __field(u16, lport) @@ -173,12 +173,14 @@ DECLARE_EVENT_CLASS(qhash_template, DEFINE_EVENT(qhash_template, irdma_add_mqh_6, TP_PROTO(struct irdma_device *iwdev, - struct irdma_cm_listener *listener, char *dev_addr), + struct irdma_cm_listener *listener, + const char *dev_addr), TP_ARGS(iwdev, listener, dev_addr)); DEFINE_EVENT(qhash_template, irdma_add_mqh_4, TP_PROTO(struct irdma_device *iwdev, - struct irdma_cm_listener *listener, char *dev_addr), + struct irdma_cm_listener *listener, + const char *dev_addr), TP_ARGS(iwdev, listener, dev_addr)); TRACE_EVENT(irdma_addr_resolve, diff --git a/drivers/infiniband/hw/irdma/type.h b/drivers/infiniband/hw/irdma/type.h index 874bc25a938b..9483bb3e10ea 100644 --- a/drivers/infiniband/hw/irdma/type.h +++ b/drivers/infiniband/hw/irdma/type.h @@ -852,7 +852,6 @@ struct irdma_roce_offload_info { u16 err_rq_idx; u32 qkey; u32 dest_qp; - u32 local_qp; u8 roce_tver; u8 ack_credits; u8 err_rq_idx_valid; @@ -1256,7 +1255,7 @@ enum irdma_status_code irdma_sc_qp_modify(struct irdma_sc_qp *qp, u64 scratch, bool post_sq); void irdma_sc_send_lsmm(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size, irdma_stag stag); -void irdma_sc_send_lsmm_nostag(struct irdma_sc_qp *qp, void *lsmm_buf, u32 size); + void irdma_sc_send_rtt(struct irdma_sc_qp *qp, bool read); void irdma_sc_qp_setctx(struct irdma_sc_qp *qp, __le64 *qp_ctx, struct irdma_qp_host_ctx_info *info); diff --git a/drivers/infiniband/hw/irdma/uda.c b/drivers/infiniband/hw/irdma/uda.c index f5b1b6150cdc..7a9988ddbd01 100644 --- a/drivers/infiniband/hw/irdma/uda.c +++ b/drivers/infiniband/hw/irdma/uda.c @@ -1,5 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 or Linux-OpenIB /* Copyright (c) 2016 - 2021 Intel Corporation */ +#include <linux/etherdevice.h> + #include "osdep.h" #include "status.h" #include "hmc.h" diff --git a/drivers/infiniband/hw/irdma/uk.c b/drivers/infiniband/hw/irdma/uk.c index 9b544a3b1288..57a9444e9ea7 100644 --- a/drivers/infiniband/hw/irdma/uk.c +++ b/drivers/infiniband/hw/irdma/uk.c @@ -13,16 +13,16 @@ * @sge: sge length and stag * @valid: The wqe valid */ -static void irdma_set_fragment(__le64 *wqe, u32 offset, struct irdma_sge *sge, +static void irdma_set_fragment(__le64 *wqe, u32 offset, struct ib_sge *sge, u8 valid) { if (sge) { set_64bit_val(wqe, offset, - FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->tag_off)); + FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->addr)); set_64bit_val(wqe, offset + 8, FIELD_PREP(IRDMAQPSQ_VALID, valid) | - FIELD_PREP(IRDMAQPSQ_FRAG_LEN, sge->len) | - FIELD_PREP(IRDMAQPSQ_FRAG_STAG, sge->stag)); + FIELD_PREP(IRDMAQPSQ_FRAG_LEN, sge->length) | + FIELD_PREP(IRDMAQPSQ_FRAG_STAG, sge->lkey)); } else { set_64bit_val(wqe, offset, 0); set_64bit_val(wqe, offset + 8, @@ -38,14 +38,14 @@ static void irdma_set_fragment(__le64 *wqe, u32 offset, struct irdma_sge *sge, * @valid: wqe valid flag */ static void irdma_set_fragment_gen_1(__le64 *wqe, u32 offset, - struct irdma_sge *sge, u8 valid) + struct ib_sge *sge, u8 valid) { if (sge) { set_64bit_val(wqe, offset, - FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->tag_off)); + FIELD_PREP(IRDMAQPSQ_FRAG_TO, sge->addr)); set_64bit_val(wqe, offset + 8, - FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, sge->len) | - FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, sge->stag)); + FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_LEN, sge->length) | + FIELD_PREP(IRDMAQPSQ_GEN1_FRAG_STAG, sge->lkey)); } else { set_64bit_val(wqe, offset, 0); set_64bit_val(wqe, offset + 8, 0); @@ -289,7 +289,7 @@ enum irdma_status_code irdma_uk_rdma_write(struct irdma_qp_uk *qp, return IRDMA_ERR_INVALID_FRAG_COUNT; for (i = 0; i < op_info->num_lo_sges; i++) - total_size += op_info->lo_sg_list[i].len; + total_size += op_info->lo_sg_list[i].length; read_fence |= info->read_fence; @@ -310,7 +310,7 @@ enum irdma_status_code irdma_uk_rdma_write(struct irdma_qp_uk *qp, irdma_clr_wqes(qp, wqe_idx); set_64bit_val(wqe, 16, - FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.tag_off)); + FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr)); if (info->imm_data_valid) { set_64bit_val(wqe, 0, @@ -339,7 +339,7 @@ enum irdma_status_code irdma_uk_rdma_write(struct irdma_qp_uk *qp, ++addl_frag_cnt; } - hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.stag) | + hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) | FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) | FIELD_PREP(IRDMAQPSQ_IMMDATAFLAG, info->imm_data_valid) | FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt) | @@ -391,7 +391,7 @@ enum irdma_status_code irdma_uk_rdma_read(struct irdma_qp_uk *qp, return IRDMA_ERR_INVALID_FRAG_COUNT; for (i = 0; i < op_info->num_lo_sges; i++) - total_size += op_info->lo_sg_list[i].len; + total_size += op_info->lo_sg_list[i].length; ret_code = irdma_fragcnt_to_quanta_sq(op_info->num_lo_sges, &quanta); if (ret_code) @@ -426,8 +426,8 @@ enum irdma_status_code irdma_uk_rdma_read(struct irdma_qp_uk *qp, ++addl_frag_cnt; } set_64bit_val(wqe, 16, - FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.tag_off)); - hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.stag) | + FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr)); + hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) | FIELD_PREP(IRDMAQPSQ_REPORTRTT, (info->report_rtt ? 1 : 0)) | FIELD_PREP(IRDMAQPSQ_ADDFRAGCNT, addl_frag_cnt) | FIELD_PREP(IRDMAQPSQ_OPCODE, @@ -477,7 +477,7 @@ enum irdma_status_code irdma_uk_send(struct irdma_qp_uk *qp, return IRDMA_ERR_INVALID_FRAG_COUNT; for (i = 0; i < op_info->num_sges; i++) - total_size += op_info->sg_list[i].len; + total_size += op_info->sg_list[i].length; if (info->imm_data_valid) frag_cnt = op_info->num_sges + 1; @@ -705,9 +705,9 @@ irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp, struct irdma_post_sq_info *in read_fence |= info->read_fence; set_64bit_val(wqe, 16, - FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.tag_off)); + FIELD_PREP(IRDMAQPSQ_FRAG_TO, op_info->rem_addr.addr)); - hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.stag) | + hdr = FIELD_PREP(IRDMAQPSQ_REMSTAG, op_info->rem_addr.lkey) | FIELD_PREP(IRDMAQPSQ_OPCODE, info->op_type) | FIELD_PREP(IRDMAQPSQ_INLINEDATALEN, op_info->len) | FIELD_PREP(IRDMAQPSQ_REPORTRTT, info->report_rtt ? 1 : 0) | @@ -826,7 +826,7 @@ irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp, u64 hdr; u32 wqe_idx; bool local_fence = false; - struct irdma_sge sge = {}; + struct ib_sge sge = {}; info->push_wqe = qp->push_db ? true : false; op_info = &info->op.inv_local_stag; @@ -839,7 +839,7 @@ irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp, irdma_clr_wqes(qp, wqe_idx); - sge.stag = op_info->target_stag; + sge.lkey = op_info->target_stag; qp->wqe_ops.iw_set_fragment(wqe, 0, &sge, 0); set_64bit_val(wqe, 16, 0); @@ -867,63 +867,6 @@ irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp, } /** - * irdma_uk_mw_bind - bind Memory Window - * @qp: hw qp ptr - * @info: post sq information - * @post_sq: flag to post sq - */ -enum irdma_status_code irdma_uk_mw_bind(struct irdma_qp_uk *qp, - struct irdma_post_sq_info *info, - bool post_sq) -{ - __le64 *wqe; - struct irdma_bind_window *op_info; - u64 hdr; - u32 wqe_idx; - bool local_fence = false; - - info->push_wqe = qp->push_db ? true : false; - op_info = &info->op.bind_window; - local_fence |= info->local_fence; - - wqe = irdma_qp_get_next_send_wqe(qp, &wqe_idx, IRDMA_QP_WQE_MIN_QUANTA, - 0, info); - if (!wqe) - return IRDMA_ERR_QP_TOOMANY_WRS_POSTED; - - irdma_clr_wqes(qp, wqe_idx); - - qp->wqe_ops.iw_set_mw_bind_wqe(wqe, op_info); - - hdr = FIELD_PREP(IRDMAQPSQ_OPCODE, IRDMA_OP_TYPE_BIND_MW) | - FIELD_PREP(IRDMAQPSQ_STAGRIGHTS, - ((op_info->ena_reads << 2) | (op_info->ena_writes << 3))) | - FIELD_PREP(IRDMAQPSQ_VABASEDTO, - (op_info->addressing_type == IRDMA_ADDR_TYPE_VA_BASED ? 1 : 0)) | - FIELD_PREP(IRDMAQPSQ_MEMWINDOWTYPE, - (op_info->mem_window_type_1 ? 1 : 0)) | - FIELD_PREP(IRDMAQPSQ_PUSHWQE, info->push_wqe) | - FIELD_PREP(IRDMAQPSQ_READFENCE, info->read_fence) | - FIELD_PREP(IRDMAQPSQ_LOCALFENCE, local_fence) | - FIELD_PREP(IRDMAQPSQ_SIGCOMPL, info->signaled) | - FIELD_PREP(IRDMAQPSQ_VALID, qp->swqe_polarity); - - dma_wmb(); /* make sure WQE is populated before valid bit is set */ - - set_64bit_val(wqe, 24, hdr); - - if (info->push_wqe) { - irdma_qp_push_wqe(qp, wqe, IRDMA_QP_WQE_MIN_QUANTA, wqe_idx, - post_sq); - } else { - if (post_sq) - irdma_uk_qp_post_wr(qp); - } - - return 0; -} - -/** * irdma_uk_post_receive - post receive wqe * @qp: hw qp ptr * @info: post rq information @@ -1503,8 +1446,8 @@ enum irdma_status_code irdma_uk_qp_init(struct irdma_qp_uk *qp, * @cq: hw cq * @info: hw cq initialization info */ -enum irdma_status_code irdma_uk_cq_init(struct irdma_cq_uk *cq, - struct irdma_cq_uk_init_info *info) +void irdma_uk_cq_init(struct irdma_cq_uk *cq, + struct irdma_cq_uk_init_info *info) { cq->cq_base = info->cq_base; cq->cq_id = info->cq_id; @@ -1515,8 +1458,6 @@ enum irdma_status_code irdma_uk_cq_init(struct irdma_cq_uk *cq, cq->avoid_mem_cflct = info->avoid_mem_cflct; IRDMA_RING_INIT(cq->cq_ring, cq->cq_size); cq->polarity = 1; - - return 0; } /** diff --git a/drivers/infiniband/hw/irdma/user.h b/drivers/infiniband/hw/irdma/user.h index 3dcbb1fbf2c6..3c811fb88404 100644 --- a/drivers/infiniband/hw/irdma/user.h +++ b/drivers/infiniband/hw/irdma/user.h @@ -16,7 +16,6 @@ #define irdma_access_privileges u32 #define irdma_physical_fragment u64 #define irdma_address_list u64 * -#define irdma_sgl struct irdma_sge * #define IRDMA_MAX_MR_SIZE 0x200000000000ULL @@ -151,12 +150,6 @@ struct irdma_cq_uk; struct irdma_qp_uk_init_info; struct irdma_cq_uk_init_info; -struct irdma_sge { - irdma_tagged_offset tag_off; - u32 len; - irdma_stag stag; -}; - struct irdma_ring { u32 head; u32 tail; @@ -172,7 +165,7 @@ struct irdma_extended_cqe { }; struct irdma_post_send { - irdma_sgl sg_list; + struct ib_sge *sg_list; u32 num_sges; u32 qkey; u32 dest_qp; @@ -189,26 +182,26 @@ struct irdma_post_inline_send { struct irdma_post_rq_info { u64 wr_id; - irdma_sgl sg_list; + struct ib_sge *sg_list; u32 num_sges; }; struct irdma_rdma_write { - irdma_sgl lo_sg_list; + struct ib_sge *lo_sg_list; u32 num_lo_sges; - struct irdma_sge rem_addr; + struct ib_sge rem_addr; }; struct irdma_inline_rdma_write { void *data; u32 len; - struct irdma_sge rem_addr; + struct ib_sge rem_addr; }; struct irdma_rdma_read { - irdma_sgl lo_sg_list; + struct ib_sge *lo_sg_list; u32 num_lo_sges; - struct irdma_sge rem_addr; + struct ib_sge rem_addr; }; struct irdma_bind_window { @@ -283,9 +276,7 @@ enum irdma_status_code irdma_uk_inline_rdma_write(struct irdma_qp_uk *qp, enum irdma_status_code irdma_uk_inline_send(struct irdma_qp_uk *qp, struct irdma_post_sq_info *info, bool post_sq); -enum irdma_status_code irdma_uk_mw_bind(struct irdma_qp_uk *qp, - struct irdma_post_sq_info *info, - bool post_sq); + enum irdma_status_code irdma_uk_post_nop(struct irdma_qp_uk *qp, u64 wr_id, bool signaled, bool post_sq); enum irdma_status_code irdma_uk_post_receive(struct irdma_qp_uk *qp, @@ -306,7 +297,7 @@ enum irdma_status_code irdma_uk_stag_local_invalidate(struct irdma_qp_uk *qp, struct irdma_wqe_uk_ops { void (*iw_copy_inline_data)(u8 *dest, u8 *src, u32 len, u8 polarity); u16 (*iw_inline_data_size_to_quanta)(u32 data_size); - void (*iw_set_fragment)(__le64 *wqe, u32 offset, struct irdma_sge *sge, + void (*iw_set_fragment)(__le64 *wqe, u32 offset, struct ib_sge *sge, u8 valid); void (*iw_set_mw_bind_wqe)(__le64 *wqe, struct irdma_bind_window *op_info); @@ -318,8 +309,8 @@ void irdma_uk_cq_request_notification(struct irdma_cq_uk *cq, enum irdma_cmpl_notify cq_notify); void irdma_uk_cq_resize(struct irdma_cq_uk *cq, void *cq_base, int size); void irdma_uk_cq_set_resized_cnt(struct irdma_cq_uk *qp, u16 cnt); -enum irdma_status_code irdma_uk_cq_init(struct irdma_cq_uk *cq, - struct irdma_cq_uk_init_info *info); +void irdma_uk_cq_init(struct irdma_cq_uk *cq, + struct irdma_cq_uk_init_info *info); enum irdma_status_code irdma_uk_qp_init(struct irdma_qp_uk *qp, struct irdma_qp_uk_init_info *info); struct irdma_sq_uk_wr_trk_info { @@ -369,7 +360,6 @@ struct irdma_qp_uk { bool rq_flush_complete:1; /* Indicates flush was seen and RQ was empty after the flush */ bool destroy_pending:1; /* Indicates the QP is being destroyed */ void *back_qp; - spinlock_t *lock; u8 dbg_rq_flushed; u8 sq_flush_seen; u8 rq_flush_seen; diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c index ac91ea5296db..398736d8c78a 100644 --- a/drivers/infiniband/hw/irdma/utils.c +++ b/drivers/infiniband/hw/irdma/utils.c @@ -11,7 +11,7 @@ * @action: modify, delete or add */ int irdma_arp_table(struct irdma_pci_f *rf, u32 *ip_addr, bool ipv4, - u8 *mac_addr, u32 action) + const u8 *mac_addr, u32 action) { unsigned long flags; int arp_index; @@ -77,7 +77,7 @@ int irdma_arp_table(struct irdma_pci_f *rf, u32 *ip_addr, bool ipv4, * @ipv4: IPv4 flag * @mac: MAC address */ -int irdma_add_arp(struct irdma_pci_f *rf, u32 *ip, bool ipv4, u8 *mac) +int irdma_add_arp(struct irdma_pci_f *rf, u32 *ip, bool ipv4, const u8 *mac) { int arpidx; @@ -768,17 +768,6 @@ struct ib_qp *irdma_get_qp(struct ib_device *device, int qpn) } /** - * irdma_get_hw_addr - return hw addr - * @par: points to shared dev - */ -u8 __iomem *irdma_get_hw_addr(void *par) -{ - struct irdma_sc_dev *dev = par; - - return dev->hw->hw_addr; -} - -/** * irdma_remove_cqp_head - return head entry and remove * @dev: device */ @@ -2060,40 +2049,6 @@ exit: } /** - * irdma_cqp_up_map_cmd - Set the up-up mapping - * @dev: pointer to device structure - * @cmd: map command - * @map_info: pointer to up map info - */ -enum irdma_status_code irdma_cqp_up_map_cmd(struct irdma_sc_dev *dev, u8 cmd, - struct irdma_up_info *map_info) -{ - struct irdma_pci_f *rf = dev_to_rf(dev); - struct irdma_cqp *iwcqp = &rf->cqp; - struct irdma_sc_cqp *cqp = &iwcqp->sc_cqp; - struct irdma_cqp_request *cqp_request; - struct cqp_cmds_info *cqp_info; - enum irdma_status_code status; - - cqp_request = irdma_alloc_and_get_cqp_request(iwcqp, false); - if (!cqp_request) - return IRDMA_ERR_NO_MEMORY; - - cqp_info = &cqp_request->info; - memset(cqp_info, 0, sizeof(*cqp_info)); - cqp_info->cqp_cmd = cmd; - cqp_info->post_sq = 1; - cqp_info->in.u.up_map.info = *map_info; - cqp_info->in.u.up_map.cqp = cqp; - cqp_info->in.u.up_map.scratch = (uintptr_t)cqp_request; - - status = irdma_handle_cqp_op(rf, cqp_request); - irdma_put_cqp_request(&rf->cqp, cqp_request); - - return status; -} - -/** * irdma_ah_cqp_op - perform an AH cqp operation * @rf: RDMA PCI function * @sc_ah: address handle @@ -2284,15 +2239,10 @@ enum irdma_status_code irdma_prm_add_pble_mem(struct irdma_pble_prm *pprm, sizeofbitmap = (u64)pchunk->size >> pprm->pble_shift; - pchunk->bitmapmem.size = sizeofbitmap >> 3; - pchunk->bitmapmem.va = kzalloc(pchunk->bitmapmem.size, GFP_KERNEL); - - if (!pchunk->bitmapmem.va) + pchunk->bitmapbuf = bitmap_zalloc(sizeofbitmap, GFP_KERNEL); + if (!pchunk->bitmapbuf) return IRDMA_ERR_NO_MEMORY; - pchunk->bitmapbuf = pchunk->bitmapmem.va; - bitmap_zero(pchunk->bitmapbuf, sizeofbitmap); - pchunk->sizeofbitmap = sizeofbitmap; /* each pble is 8 bytes hence shift by 3 */ pprm->total_pble_alloc += pchunk->size >> 3; @@ -2536,3 +2486,18 @@ void irdma_ib_qp_event(struct irdma_qp *iwqp, enum irdma_qp_event_type event) ibevent.element.qp = &iwqp->ibqp; iwqp->ibqp.event_handler(&ibevent, iwqp->ibqp.qp_context); } + +bool irdma_cq_empty(struct irdma_cq *iwcq) +{ + struct irdma_cq_uk *ukcq; + u64 qword3; + __le64 *cqe; + u8 polarity; + + ukcq = &iwcq->sc_cq.cq_uk; + cqe = IRDMA_GET_CURRENT_CQ_ELEM(ukcq); + get_64bit_val(cqe, 24, &qword3); + polarity = (u8)FIELD_GET(IRDMA_CQ_VALID, qword3); + + return polarity != ukcq->polarity; +} diff --git a/drivers/infiniband/hw/irdma/verbs.c b/drivers/infiniband/hw/irdma/verbs.c index 102dc9342f2a..460e757d3fe6 100644 --- a/drivers/infiniband/hw/irdma/verbs.c +++ b/drivers/infiniband/hw/irdma/verbs.c @@ -21,7 +21,8 @@ static int irdma_query_device(struct ib_device *ibdev, return -EINVAL; memset(props, 0, sizeof(*props)); - ether_addr_copy((u8 *)&props->sys_image_guid, iwdev->netdev->dev_addr); + addrconf_addr_eui48((u8 *)&props->sys_image_guid, + iwdev->netdev->dev_addr); props->fw_ver = (u64)irdma_fw_major_ver(&rf->sc_dev) << 32 | irdma_fw_minor_ver(&rf->sc_dev); props->device_cap_flags = iwdev->device_cap_flags; @@ -833,7 +834,6 @@ static int irdma_create_qp(struct ib_qp *ibqp, qp = &iwqp->sc_qp; qp->qp_uk.back_qp = iwqp; - qp->qp_uk.lock = &iwqp->lock; qp->push_idx = IRDMA_INVALID_PUSH_PAGE_INDEX; iwqp->iwdev = iwdev; @@ -1171,6 +1171,10 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr, udp_info->ttl = attr->ah_attr.grh.hop_limit; udp_info->flow_label = attr->ah_attr.grh.flow_label; udp_info->tos = attr->ah_attr.grh.traffic_class; + udp_info->src_port = + rdma_get_udp_sport(udp_info->flow_label, + ibqp->qp_num, + roce_info->dest_qp); irdma_qp_rem_qos(&iwqp->sc_qp); dev->ws_remove(iwqp->sc_qp.vsi, ctx_info->user_pri); ctx_info->user_pri = rt_tos2priority(udp_info->tos); @@ -1198,7 +1202,6 @@ int irdma_modify_qp_roce(struct ib_qp *ibqp, struct ib_qp_attr *attr, av->attrs = attr->ah_attr; rdma_gid2ip((struct sockaddr *)&av->sgid_addr, &sgid_attr->gid); rdma_gid2ip((struct sockaddr *)&av->dgid_addr, &attr->ah_attr.grh.dgid); - roce_info->local_qp = ibqp->qp_num; if (av->sgid_addr.saddr.sa_family == AF_INET6) { __be32 *daddr = av->dgid_addr.saddr_in6.sin6_addr.in6_u.u6_addr32; @@ -3041,24 +3044,6 @@ done: } /** - * irdma_copy_sg_list - copy sg list for qp - * @sg_list: copied into sg_list - * @sgl: copy from sgl - * @num_sges: count of sg entries - */ -static void irdma_copy_sg_list(struct irdma_sge *sg_list, struct ib_sge *sgl, - int num_sges) -{ - unsigned int i; - - for (i = 0; (i < num_sges) && (i < IRDMA_MAX_WQ_FRAGMENT_COUNT); i++) { - sg_list[i].tag_off = sgl[i].addr; - sg_list[i].len = sgl[i].length; - sg_list[i].stag = sgl[i].lkey; - } -} - -/** * irdma_post_send - kernel application wr * @ibqp: qp ptr for wr * @ib_wr: work request ptr @@ -3134,8 +3119,7 @@ static int irdma_post_send(struct ib_qp *ibqp, ret = irdma_uk_inline_send(ukqp, &info, false); } else { info.op.send.num_sges = ib_wr->num_sge; - info.op.send.sg_list = (struct irdma_sge *) - ib_wr->sg_list; + info.op.send.sg_list = ib_wr->sg_list; if (iwqp->ibqp.qp_type == IB_QPT_UD || iwqp->ibqp.qp_type == IB_QPT_GSI) { ah = to_iwah(ud_wr(ib_wr)->ah); @@ -3170,15 +3154,18 @@ static int irdma_post_send(struct ib_qp *ibqp, if (ib_wr->send_flags & IB_SEND_INLINE) { info.op.inline_rdma_write.data = (void *)(uintptr_t)ib_wr->sg_list[0].addr; - info.op.inline_rdma_write.len = ib_wr->sg_list[0].length; - info.op.inline_rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr; - info.op.inline_rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey; + info.op.inline_rdma_write.len = + ib_wr->sg_list[0].length; + info.op.inline_rdma_write.rem_addr.addr = + rdma_wr(ib_wr)->remote_addr; + info.op.inline_rdma_write.rem_addr.lkey = + rdma_wr(ib_wr)->rkey; ret = irdma_uk_inline_rdma_write(ukqp, &info, false); } else { info.op.rdma_write.lo_sg_list = (void *)ib_wr->sg_list; info.op.rdma_write.num_lo_sges = ib_wr->num_sge; - info.op.rdma_write.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr; - info.op.rdma_write.rem_addr.stag = rdma_wr(ib_wr)->rkey; + info.op.rdma_write.rem_addr.addr = rdma_wr(ib_wr)->remote_addr; + info.op.rdma_write.rem_addr.lkey = rdma_wr(ib_wr)->rkey; ret = irdma_uk_rdma_write(ukqp, &info, false); } @@ -3199,8 +3186,8 @@ static int irdma_post_send(struct ib_qp *ibqp, break; } info.op_type = IRDMA_OP_TYPE_RDMA_READ; - info.op.rdma_read.rem_addr.tag_off = rdma_wr(ib_wr)->remote_addr; - info.op.rdma_read.rem_addr.stag = rdma_wr(ib_wr)->rkey; + info.op.rdma_read.rem_addr.addr = rdma_wr(ib_wr)->remote_addr; + info.op.rdma_read.rem_addr.lkey = rdma_wr(ib_wr)->rkey; info.op.rdma_read.lo_sg_list = (void *)ib_wr->sg_list; info.op.rdma_read.num_lo_sges = ib_wr->num_sge; @@ -3287,7 +3274,6 @@ static int irdma_post_recv(struct ib_qp *ibqp, struct irdma_qp *iwqp; struct irdma_qp_uk *ukqp; struct irdma_post_rq_info post_recv = {}; - struct irdma_sge sg_list[IRDMA_MAX_WQ_FRAGMENT_COUNT]; enum irdma_status_code ret = 0; unsigned long flags; int err = 0; @@ -3302,8 +3288,7 @@ static int irdma_post_recv(struct ib_qp *ibqp, while (ib_wr) { post_recv.num_sges = ib_wr->num_sge; post_recv.wr_id = ib_wr->wr_id; - irdma_copy_sg_list(sg_list, ib_wr->sg_list, ib_wr->num_sge); - post_recv.sg_list = sg_list; + post_recv.sg_list = ib_wr->sg_list; ret = irdma_uk_post_receive(ukqp, &post_recv); if (ret) { ibdev_dbg(&iwqp->iwdev->ibdev, @@ -3604,18 +3589,31 @@ static int irdma_req_notify_cq(struct ib_cq *ibcq, struct irdma_cq *iwcq; struct irdma_cq_uk *ukcq; unsigned long flags; - enum irdma_cmpl_notify cq_notify = IRDMA_CQ_COMPL_EVENT; + enum irdma_cmpl_notify cq_notify; + bool promo_event = false; + int ret = 0; + cq_notify = notify_flags == IB_CQ_SOLICITED ? + IRDMA_CQ_COMPL_SOLICITED : IRDMA_CQ_COMPL_EVENT; iwcq = to_iwcq(ibcq); ukcq = &iwcq->sc_cq.cq_uk; - if (notify_flags == IB_CQ_SOLICITED) - cq_notify = IRDMA_CQ_COMPL_SOLICITED; spin_lock_irqsave(&iwcq->lock, flags); - irdma_uk_cq_request_notification(ukcq, cq_notify); + /* Only promote to arm the CQ for any event if the last arm event was solicited. */ + if (iwcq->last_notify == IRDMA_CQ_COMPL_SOLICITED && notify_flags != IB_CQ_SOLICITED) + promo_event = true; + + if (!iwcq->armed || promo_event) { + iwcq->armed = true; + iwcq->last_notify = cq_notify; + irdma_uk_cq_request_notification(ukcq, cq_notify); + } + + if ((notify_flags & IB_CQ_REPORT_MISSED_EVENTS) && !irdma_cq_empty(iwcq)) + ret = 1; spin_unlock_irqrestore(&iwcq->lock, flags); - return 0; + return ret; } static int irdma_roce_port_immutable(struct ib_device *ibdev, u32 port_num, @@ -3651,89 +3649,89 @@ static int irdma_iw_port_immutable(struct ib_device *ibdev, u32 port_num, return 0; } -static const char *const irdma_hw_stat_names[] = { +static const struct rdma_stat_desc irdma_hw_stat_descs[] = { /* 32bit names */ - [IRDMA_HW_STAT_INDEX_RXVLANERR] = "rxVlanErrors", - [IRDMA_HW_STAT_INDEX_IP4RXDISCARD] = "ip4InDiscards", - [IRDMA_HW_STAT_INDEX_IP4RXTRUNC] = "ip4InTruncatedPkts", - [IRDMA_HW_STAT_INDEX_IP4TXNOROUTE] = "ip4OutNoRoutes", - [IRDMA_HW_STAT_INDEX_IP6RXDISCARD] = "ip6InDiscards", - [IRDMA_HW_STAT_INDEX_IP6RXTRUNC] = "ip6InTruncatedPkts", - [IRDMA_HW_STAT_INDEX_IP6TXNOROUTE] = "ip6OutNoRoutes", - [IRDMA_HW_STAT_INDEX_TCPRTXSEG] = "tcpRetransSegs", - [IRDMA_HW_STAT_INDEX_TCPRXOPTERR] = "tcpInOptErrors", - [IRDMA_HW_STAT_INDEX_TCPRXPROTOERR] = "tcpInProtoErrors", - [IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED] = "cnpHandled", - [IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED] = "cnpIgnored", - [IRDMA_HW_STAT_INDEX_TXNPCNPSENT] = "cnpSent", + [IRDMA_HW_STAT_INDEX_RXVLANERR].name = "rxVlanErrors", + [IRDMA_HW_STAT_INDEX_IP4RXDISCARD].name = "ip4InDiscards", + [IRDMA_HW_STAT_INDEX_IP4RXTRUNC].name = "ip4InTruncatedPkts", + [IRDMA_HW_STAT_INDEX_IP4TXNOROUTE].name = "ip4OutNoRoutes", + [IRDMA_HW_STAT_INDEX_IP6RXDISCARD].name = "ip6InDiscards", + [IRDMA_HW_STAT_INDEX_IP6RXTRUNC].name = "ip6InTruncatedPkts", + [IRDMA_HW_STAT_INDEX_IP6TXNOROUTE].name = "ip6OutNoRoutes", + [IRDMA_HW_STAT_INDEX_TCPRTXSEG].name = "tcpRetransSegs", + [IRDMA_HW_STAT_INDEX_TCPRXOPTERR].name = "tcpInOptErrors", + [IRDMA_HW_STAT_INDEX_TCPRXPROTOERR].name = "tcpInProtoErrors", + [IRDMA_HW_STAT_INDEX_RXRPCNPHANDLED].name = "cnpHandled", + [IRDMA_HW_STAT_INDEX_RXRPCNPIGNORED].name = "cnpIgnored", + [IRDMA_HW_STAT_INDEX_TXNPCNPSENT].name = "cnpSent", /* 64bit names */ - [IRDMA_HW_STAT_INDEX_IP4RXOCTS + IRDMA_HW_STAT_INDEX_MAX_32] = + [IRDMA_HW_STAT_INDEX_IP4RXOCTS + IRDMA_HW_STAT_INDEX_MAX_32].name = "ip4InOctets", - [IRDMA_HW_STAT_INDEX_IP4RXPKTS + IRDMA_HW_STAT_INDEX_MAX_32] = + [IRDMA_HW_STAT_INDEX_IP4RXPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name = "ip4InPkts", - [IRDMA_HW_STAT_INDEX_IP4RXFRAGS + IRDMA_HW_STAT_INDEX_MAX_32] = + [IRDMA_HW_STAT_INDEX_IP4RXFRAGS + IRDMA_HW_STAT_INDEX_MAX_32].name = "ip4InReasmRqd", - [IRDMA_HW_STAT_INDEX_IP4RXMCOCTS + IRDMA_HW_STAT_INDEX_MAX_32] = + [IRDMA_HW_STAT_INDEX_IP4RXMCOCTS + IRDMA_HW_STAT_INDEX_MAX_32].name = "ip4InMcastOctets", - [IRDMA_HW_STAT_INDEX_IP4RXMCPKTS + IRDMA_HW_STAT_INDEX_MAX_32] = + [IRDMA_HW_STAT_INDEX_IP4RXMCPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name = "ip4InMcastPkts", - [IRDMA_HW_STAT_INDEX_IP4TXOCTS + IRDMA_HW_STAT_INDEX_MAX_32] = + [IRDMA_HW_STAT_INDEX_IP4TXOCTS + IRDMA_HW_STAT_INDEX_MAX_32].name = "ip4OutOctets", - [IRDMA_HW_STAT_INDEX_IP4TXPKTS + IRDMA_HW_STAT_INDEX_MAX_32] = + [IRDMA_HW_STAT_INDEX_IP4TXPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name = "ip4OutPkts", - [IRDMA_HW_STAT_INDEX_IP4TXFRAGS + IRDMA_HW_STAT_INDEX_MAX_32] = + [IRDMA_HW_STAT_INDEX_IP4TXFRAGS + IRDMA_HW_STAT_INDEX_MAX_32].name = "ip4OutSegRqd", - [IRDMA_HW_STAT_INDEX_IP4TXMCOCTS + IRDMA_HW_STAT_INDEX_MAX_32] = + [IRDMA_HW_STAT_INDEX_IP4TXMCOCTS + IRDMA_HW_STAT_INDEX_MAX_32].name = "ip4OutMcastOctets", - [IRDMA_HW_STAT_INDEX_IP4TXMCPKTS + IRDMA_HW_STAT_INDEX_MAX_32] = + [IRDMA_HW_STAT_INDEX_IP4TXMCPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name = "ip4OutMcastPkts", - [IRDMA_HW_STAT_INDEX_IP6RXOCTS + IRDMA_HW_STAT_INDEX_MAX_32] = + [IRDMA_HW_STAT_INDEX_IP6RXOCTS + IRDMA_HW_STAT_INDEX_MAX_32].name = "ip6InOctets", - [IRDMA_HW_STAT_INDEX_IP6RXPKTS + IRDMA_HW_STAT_INDEX_MAX_32] = + [IRDMA_HW_STAT_INDEX_IP6RXPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name = "ip6InPkts", - [IRDMA_HW_STAT_INDEX_IP6RXFRAGS + IRDMA_HW_STAT_INDEX_MAX_32] = + [IRDMA_HW_STAT_INDEX_IP6RXFRAGS + IRDMA_HW_STAT_INDEX_MAX_32].name = "ip6InReasmRqd", - [IRDMA_HW_STAT_INDEX_IP6RXMCOCTS + IRDMA_HW_STAT_INDEX_MAX_32] = + [IRDMA_HW_STAT_INDEX_IP6RXMCOCTS + IRDMA_HW_STAT_INDEX_MAX_32].name = "ip6InMcastOctets", - [IRDMA_HW_STAT_INDEX_IP6RXMCPKTS + IRDMA_HW_STAT_INDEX_MAX_32] = + [IRDMA_HW_STAT_INDEX_IP6RXMCPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name = "ip6InMcastPkts", - [IRDMA_HW_STAT_INDEX_IP6TXOCTS + IRDMA_HW_STAT_INDEX_MAX_32] = + [IRDMA_HW_STAT_INDEX_IP6TXOCTS + IRDMA_HW_STAT_INDEX_MAX_32].name = "ip6OutOctets", - [IRDMA_HW_STAT_INDEX_IP6TXPKTS + IRDMA_HW_STAT_INDEX_MAX_32] = + [IRDMA_HW_STAT_INDEX_IP6TXPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name = "ip6OutPkts", - [IRDMA_HW_STAT_INDEX_IP6TXFRAGS + IRDMA_HW_STAT_INDEX_MAX_32] = + [IRDMA_HW_STAT_INDEX_IP6TXFRAGS + IRDMA_HW_STAT_INDEX_MAX_32].name = "ip6OutSegRqd", - [IRDMA_HW_STAT_INDEX_IP6TXMCOCTS + IRDMA_HW_STAT_INDEX_MAX_32] = + [IRDMA_HW_STAT_INDEX_IP6TXMCOCTS + IRDMA_HW_STAT_INDEX_MAX_32].name = "ip6OutMcastOctets", - [IRDMA_HW_STAT_INDEX_IP6TXMCPKTS + IRDMA_HW_STAT_INDEX_MAX_32] = + [IRDMA_HW_STAT_INDEX_IP6TXMCPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name = "ip6OutMcastPkts", - [IRDMA_HW_STAT_INDEX_TCPRXSEGS + IRDMA_HW_STAT_INDEX_MAX_32] = + [IRDMA_HW_STAT_INDEX_TCPRXSEGS + IRDMA_HW_STAT_INDEX_MAX_32].name = "tcpInSegs", - [IRDMA_HW_STAT_INDEX_TCPTXSEG + IRDMA_HW_STAT_INDEX_MAX_32] = + [IRDMA_HW_STAT_INDEX_TCPTXSEG + IRDMA_HW_STAT_INDEX_MAX_32].name = "tcpOutSegs", - [IRDMA_HW_STAT_INDEX_RDMARXRDS + IRDMA_HW_STAT_INDEX_MAX_32] = + [IRDMA_HW_STAT_INDEX_RDMARXRDS + IRDMA_HW_STAT_INDEX_MAX_32].name = "iwInRdmaReads", - [IRDMA_HW_STAT_INDEX_RDMARXSNDS + IRDMA_HW_STAT_INDEX_MAX_32] = + [IRDMA_HW_STAT_INDEX_RDMARXSNDS + IRDMA_HW_STAT_INDEX_MAX_32].name = "iwInRdmaSends", - [IRDMA_HW_STAT_INDEX_RDMARXWRS + IRDMA_HW_STAT_INDEX_MAX_32] = + [IRDMA_HW_STAT_INDEX_RDMARXWRS + IRDMA_HW_STAT_INDEX_MAX_32].name = "iwInRdmaWrites", - [IRDMA_HW_STAT_INDEX_RDMATXRDS + IRDMA_HW_STAT_INDEX_MAX_32] = + [IRDMA_HW_STAT_INDEX_RDMATXRDS + IRDMA_HW_STAT_INDEX_MAX_32].name = "iwOutRdmaReads", - [IRDMA_HW_STAT_INDEX_RDMATXSNDS + IRDMA_HW_STAT_INDEX_MAX_32] = + [IRDMA_HW_STAT_INDEX_RDMATXSNDS + IRDMA_HW_STAT_INDEX_MAX_32].name = "iwOutRdmaSends", - [IRDMA_HW_STAT_INDEX_RDMATXWRS + IRDMA_HW_STAT_INDEX_MAX_32] = + [IRDMA_HW_STAT_INDEX_RDMATXWRS + IRDMA_HW_STAT_INDEX_MAX_32].name = "iwOutRdmaWrites", - [IRDMA_HW_STAT_INDEX_RDMAVBND + IRDMA_HW_STAT_INDEX_MAX_32] = + [IRDMA_HW_STAT_INDEX_RDMAVBND + IRDMA_HW_STAT_INDEX_MAX_32].name = "iwRdmaBnd", - [IRDMA_HW_STAT_INDEX_RDMAVINV + IRDMA_HW_STAT_INDEX_MAX_32] = + [IRDMA_HW_STAT_INDEX_RDMAVINV + IRDMA_HW_STAT_INDEX_MAX_32].name = "iwRdmaInv", - [IRDMA_HW_STAT_INDEX_UDPRXPKTS + IRDMA_HW_STAT_INDEX_MAX_32] = + [IRDMA_HW_STAT_INDEX_UDPRXPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name = "RxUDP", - [IRDMA_HW_STAT_INDEX_UDPTXPKTS + IRDMA_HW_STAT_INDEX_MAX_32] = + [IRDMA_HW_STAT_INDEX_UDPTXPKTS + IRDMA_HW_STAT_INDEX_MAX_32].name = "TxUDP", - [IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS + IRDMA_HW_STAT_INDEX_MAX_32] = - "RxECNMrkd", + [IRDMA_HW_STAT_INDEX_RXNPECNMARKEDPKTS + IRDMA_HW_STAT_INDEX_MAX_32] + .name = "RxECNMrkd", }; static void irdma_get_dev_fw_str(struct ib_device *dev, char *str) @@ -3757,10 +3755,10 @@ static struct rdma_hw_stats *irdma_alloc_hw_port_stats(struct ib_device *ibdev, IRDMA_HW_STAT_INDEX_MAX_64; unsigned long lifespan = RDMA_HW_STATS_DEFAULT_LIFESPAN; - BUILD_BUG_ON(ARRAY_SIZE(irdma_hw_stat_names) != + BUILD_BUG_ON(ARRAY_SIZE(irdma_hw_stat_descs) != (IRDMA_HW_STAT_INDEX_MAX_32 + IRDMA_HW_STAT_INDEX_MAX_64)); - return rdma_alloc_hw_stats_struct(irdma_hw_stat_names, num_counters, + return rdma_alloc_hw_stats_struct(irdma_hw_stat_descs, num_counters, lifespan); } @@ -4328,24 +4326,6 @@ static enum rdma_link_layer irdma_get_link_layer(struct ib_device *ibdev, return IB_LINK_LAYER_ETHERNET; } -static __be64 irdma_mac_to_guid(struct net_device *ndev) -{ - unsigned char *mac = ndev->dev_addr; - __be64 guid; - unsigned char *dst = (unsigned char *)&guid; - - dst[0] = mac[0] ^ 2; - dst[1] = mac[1]; - dst[2] = mac[2]; - dst[3] = 0xff; - dst[4] = 0xfe; - dst[5] = mac[3]; - dst[6] = mac[4]; - dst[7] = mac[5]; - - return guid; -} - static const struct ib_device_ops irdma_roce_dev_ops = { .attach_mcast = irdma_attach_mcast, .create_ah = irdma_create_ah, @@ -4415,7 +4395,8 @@ static const struct ib_device_ops irdma_dev_ops = { static void irdma_init_roce_device(struct irdma_device *iwdev) { iwdev->ibdev.node_type = RDMA_NODE_IB_CA; - iwdev->ibdev.node_guid = irdma_mac_to_guid(iwdev->netdev); + addrconf_addr_eui48((u8 *)&iwdev->ibdev.node_guid, + iwdev->netdev->dev_addr); ib_set_device_ops(&iwdev->ibdev, &irdma_roce_dev_ops); } @@ -4428,7 +4409,8 @@ static int irdma_init_iw_device(struct irdma_device *iwdev) struct net_device *netdev = iwdev->netdev; iwdev->ibdev.node_type = RDMA_NODE_RNIC; - ether_addr_copy((u8 *)&iwdev->ibdev.node_guid, netdev->dev_addr); + addrconf_addr_eui48((u8 *)&iwdev->ibdev.node_guid, + netdev->dev_addr); iwdev->ibdev.ops.iw_add_ref = irdma_qp_add_ref; iwdev->ibdev.ops.iw_rem_ref = irdma_qp_rem_ref; iwdev->ibdev.ops.iw_get_qp = irdma_get_qp; diff --git a/drivers/infiniband/hw/irdma/verbs.h b/drivers/infiniband/hw/irdma/verbs.h index 5c244cd321a3..d0fdef8d09ea 100644 --- a/drivers/infiniband/hw/irdma/verbs.h +++ b/drivers/infiniband/hw/irdma/verbs.h @@ -110,6 +110,8 @@ struct irdma_cq { u16 cq_size; u16 cq_num; bool user_mode; + bool armed; + enum irdma_cmpl_notify last_notify; u32 polled_cmpls; u32 cq_mem_size; struct irdma_dma_mem kmem; diff --git a/drivers/infiniband/hw/mlx4/alias_GUID.c b/drivers/infiniband/hw/mlx4/alias_GUID.c index 571d9c542024..e2e1f5daddc4 100644 --- a/drivers/infiniband/hw/mlx4/alias_GUID.c +++ b/drivers/infiniband/hw/mlx4/alias_GUID.c @@ -822,10 +822,8 @@ void mlx4_ib_destroy_alias_guid_service(struct mlx4_ib_dev *dev) } spin_unlock_irqrestore(&sriov->alias_guid.ag_work_lock, flags); } - for (i = 0 ; i < dev->num_ports; i++) { - flush_workqueue(dev->sriov.alias_guid.ports_guid[i].wq); + for (i = 0 ; i < dev->num_ports; i++) destroy_workqueue(dev->sriov.alias_guid.ports_guid[i].wq); - } ib_sa_unregister_client(dev->sriov.alias_guid.sa_client); kfree(dev->sriov.alias_guid.sa_client); } diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index f367f4a4abff..93b1650eacfa 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -85,14 +85,6 @@ static enum rdma_link_layer mlx4_ib_port_link_layer(struct ib_device *device, static struct workqueue_struct *wq; -static void init_query_mad(struct ib_smp *mad) -{ - mad->base_version = 1; - mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; - mad->class_version = 1; - mad->method = IB_MGMT_METHOD_GET; -} - static int check_flow_steering_support(struct mlx4_dev *dev) { int eth_num_ports = 0; @@ -471,7 +463,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev, if (!in_mad || !out_mad) goto out; - init_query_mad(in_mad); + ib_init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; err = mlx4_MAD_IFC(to_mdev(ibdev), MLX4_MAD_IFC_IGNORE_KEYS, @@ -669,7 +661,7 @@ static int ib_link_query_port(struct ib_device *ibdev, u32 port, if (!in_mad || !out_mad) goto out; - init_query_mad(in_mad); + ib_init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; in_mad->attr_mod = cpu_to_be32(port); @@ -721,7 +713,7 @@ static int ib_link_query_port(struct ib_device *ibdev, u32 port, /* If reported active speed is QDR, check if is FDR-10 */ if (props->active_speed == IB_SPEED_QDR) { - init_query_mad(in_mad); + ib_init_query_mad(in_mad); in_mad->attr_id = MLX4_ATTR_EXTENDED_PORT_INFO; in_mad->attr_mod = cpu_to_be32(port); @@ -848,7 +840,7 @@ int __mlx4_ib_query_gid(struct ib_device *ibdev, u32 port, int index, if (!in_mad || !out_mad) goto out; - init_query_mad(in_mad); + ib_init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; in_mad->attr_mod = cpu_to_be32(port); @@ -870,7 +862,7 @@ int __mlx4_ib_query_gid(struct ib_device *ibdev, u32 port, int index, } } - init_query_mad(in_mad); + ib_init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_GUID_INFO; in_mad->attr_mod = cpu_to_be32(index / 8); @@ -917,7 +909,7 @@ static int mlx4_ib_query_sl2vl(struct ib_device *ibdev, u32 port, if (!in_mad || !out_mad) goto out; - init_query_mad(in_mad); + ib_init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_SL_TO_VL_TABLE; in_mad->attr_mod = 0; @@ -971,7 +963,7 @@ int __mlx4_ib_query_pkey(struct ib_device *ibdev, u32 port, u16 index, if (!in_mad || !out_mad) goto out; - init_query_mad(in_mad); + ib_init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE; in_mad->attr_mod = cpu_to_be32(index / 32); @@ -1990,7 +1982,7 @@ static int init_node_data(struct mlx4_ib_dev *dev) if (!in_mad || !out_mad) goto out; - init_query_mad(in_mad); + ib_init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_NODE_DESC; if (mlx4_is_master(dev->dev)) mad_ifc_flags |= MLX4_MAD_IFC_NET_VIEW; @@ -2105,10 +2097,10 @@ mlx4_ib_alloc_hw_device_stats(struct ib_device *ibdev) struct mlx4_ib_dev *dev = to_mdev(ibdev); struct mlx4_ib_diag_counters *diag = dev->diag_counters; - if (!diag[0].name) + if (!diag[0].descs) return NULL; - return rdma_alloc_hw_stats_struct(diag[0].name, diag[0].num_counters, + return rdma_alloc_hw_stats_struct(diag[0].descs, diag[0].num_counters, RDMA_HW_STATS_DEFAULT_LIFESPAN); } @@ -2118,10 +2110,10 @@ mlx4_ib_alloc_hw_port_stats(struct ib_device *ibdev, u32 port_num) struct mlx4_ib_dev *dev = to_mdev(ibdev); struct mlx4_ib_diag_counters *diag = dev->diag_counters; - if (!diag[1].name) + if (!diag[1].descs) return NULL; - return rdma_alloc_hw_stats_struct(diag[1].name, diag[1].num_counters, + return rdma_alloc_hw_stats_struct(diag[1].descs, diag[1].num_counters, RDMA_HW_STATS_DEFAULT_LIFESPAN); } @@ -2151,10 +2143,8 @@ static int mlx4_ib_get_hw_stats(struct ib_device *ibdev, } static int __mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev, - const char ***name, - u32 **offset, - u32 *num, - bool port) + struct rdma_stat_desc **pdescs, + u32 **offset, u32 *num, bool port) { u32 num_counters; @@ -2166,46 +2156,46 @@ static int __mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev, if (!port) num_counters += ARRAY_SIZE(diag_device_only); - *name = kcalloc(num_counters, sizeof(**name), GFP_KERNEL); - if (!*name) + *pdescs = kcalloc(num_counters, sizeof(struct rdma_stat_desc), + GFP_KERNEL); + if (!*pdescs) return -ENOMEM; *offset = kcalloc(num_counters, sizeof(**offset), GFP_KERNEL); if (!*offset) - goto err_name; + goto err; *num = num_counters; return 0; -err_name: - kfree(*name); +err: + kfree(*pdescs); return -ENOMEM; } static void mlx4_ib_fill_diag_counters(struct mlx4_ib_dev *ibdev, - const char **name, - u32 *offset, - bool port) + struct rdma_stat_desc *descs, + u32 *offset, bool port) { int i; int j; for (i = 0, j = 0; i < ARRAY_SIZE(diag_basic); i++, j++) { - name[i] = diag_basic[i].name; + descs[i].name = diag_basic[i].name; offset[i] = diag_basic[i].offset; } if (ibdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_DIAG_PER_PORT) { for (i = 0; i < ARRAY_SIZE(diag_ext); i++, j++) { - name[j] = diag_ext[i].name; + descs[j].name = diag_ext[i].name; offset[j] = diag_ext[i].offset; } } if (!port) { for (i = 0; i < ARRAY_SIZE(diag_device_only); i++, j++) { - name[j] = diag_device_only[i].name; + descs[j].name = diag_device_only[i].name; offset[j] = diag_device_only[i].offset; } } @@ -2217,6 +2207,11 @@ static const struct ib_device_ops mlx4_ib_hw_stats_ops = { .get_hw_stats = mlx4_ib_get_hw_stats, }; +static const struct ib_device_ops mlx4_ib_hw_stats_ops1 = { + .alloc_hw_device_stats = mlx4_ib_alloc_hw_device_stats, + .get_hw_stats = mlx4_ib_get_hw_stats, +}; + static int mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev) { struct mlx4_ib_diag_counters *diag = ibdev->diag_counters; @@ -2229,17 +2224,24 @@ static int mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev) return 0; for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) { - /* i == 1 means we are building port counters */ - if (i && !per_port) - continue; + /* + * i == 1 means we are building port counters, set a different + * stats ops without port stats callback. + */ + if (i && !per_port) { + ib_set_device_ops(&ibdev->ib_dev, + &mlx4_ib_hw_stats_ops1); + + return 0; + } - ret = __mlx4_ib_alloc_diag_counters(ibdev, &diag[i].name, + ret = __mlx4_ib_alloc_diag_counters(ibdev, &diag[i].descs, &diag[i].offset, &diag[i].num_counters, i); if (ret) goto err_alloc; - mlx4_ib_fill_diag_counters(ibdev, diag[i].name, + mlx4_ib_fill_diag_counters(ibdev, diag[i].descs, diag[i].offset, i); } @@ -2249,7 +2251,7 @@ static int mlx4_ib_alloc_diag_counters(struct mlx4_ib_dev *ibdev) err_alloc: if (i) { - kfree(diag[i - 1].name); + kfree(diag[i - 1].descs); kfree(diag[i - 1].offset); } @@ -2262,7 +2264,7 @@ static void mlx4_ib_diag_cleanup(struct mlx4_ib_dev *ibdev) for (i = 0; i < MLX4_DIAG_COUNTERS_TYPES; i++) { kfree(ibdev->diag_counters[i].offset); - kfree(ibdev->diag_counters[i].name); + kfree(ibdev->diag_counters[i].descs); } } @@ -2275,7 +2277,7 @@ static void mlx4_ib_update_qps(struct mlx4_ib_dev *ibdev, u64 release_mac = MLX4_IB_INVALID_MAC; struct mlx4_ib_qp *qp; - new_smac = mlx4_mac_to_u64(dev->dev_addr); + new_smac = ether_addr_to_u64(dev->dev_addr); atomic64_set(&ibdev->iboe.mac[port - 1], new_smac); /* no need for update QP1 and mac registration in non-SRIOV */ @@ -2774,10 +2776,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev) if (err) goto err_counter; - ibdev->ib_uc_qpns_bitmap = - kmalloc_array(BITS_TO_LONGS(ibdev->steer_qpn_count), - sizeof(long), - GFP_KERNEL); + ibdev->ib_uc_qpns_bitmap = bitmap_alloc(ibdev->steer_qpn_count, + GFP_KERNEL); if (!ibdev->ib_uc_qpns_bitmap) goto err_steer_qp_release; @@ -2865,7 +2865,7 @@ err_diag_counters: mlx4_ib_diag_cleanup(ibdev); err_steer_free_bitmap: - kfree(ibdev->ib_uc_qpns_bitmap); + bitmap_free(ibdev->ib_uc_qpns_bitmap); err_steer_qp_release: mlx4_qp_release_range(dev, ibdev->steer_qpn_base, @@ -2978,7 +2978,7 @@ static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr) mlx4_qp_release_range(dev, ibdev->steer_qpn_base, ibdev->steer_qpn_count); - kfree(ibdev->ib_uc_qpns_bitmap); + bitmap_free(ibdev->ib_uc_qpns_bitmap); iounmap(ibdev->uar_map); for (p = 0; p < ibdev->num_ports; ++p) @@ -3237,7 +3237,7 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, case MLX4_DEV_EVENT_PORT_MGMT_CHANGE: ew = kmalloc(sizeof *ew, GFP_ATOMIC); if (!ew) - break; + return; INIT_WORK(&ew->work, handle_port_mgmt_change_event); memcpy(&ew->ib_eqe, eqe, sizeof *eqe); diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index c60f6e9ac640..d84023b4b1b8 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h @@ -601,7 +601,7 @@ struct mlx4_ib_counters { #define MLX4_DIAG_COUNTERS_TYPES 2 struct mlx4_ib_diag_counters { - const char **name; + struct rdma_stat_desc *descs; u32 *offset; u32 num_counters; }; diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 8662f462e2a5..b17d6ebc5b70 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -1099,8 +1099,10 @@ static int create_qp_common(struct ib_pd *pd, struct ib_qp_init_attr *init_attr, if (dev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) qp->flags |= MLX4_IB_QP_NETIF; - else + else { + err = -EINVAL; goto err; + } } err = set_kernel_sq_size(dev, &init_attr->cap, qp_type, qp); @@ -1853,7 +1855,7 @@ static int mlx4_set_path(struct mlx4_ib_dev *dev, const struct ib_qp_attr *qp, u16 vlan_id, u8 *smac) { return _mlx4_set_path(dev, &qp->ah_attr, - mlx4_mac_to_u64(smac), + ether_addr_to_u64(smac), vlan_id, path, &mqp->pri, port); } diff --git a/drivers/infiniband/hw/mlx5/cmd.c b/drivers/infiniband/hw/mlx5/cmd.c index a8db8a051170..ff3742b0460a 100644 --- a/drivers/infiniband/hw/mlx5/cmd.c +++ b/drivers/infiniband/hw/mlx5/cmd.c @@ -206,3 +206,29 @@ out: kfree(in); return err; } + +int mlx5_cmd_uar_alloc(struct mlx5_core_dev *dev, u32 *uarn, u16 uid) +{ + u32 out[MLX5_ST_SZ_DW(alloc_uar_out)] = {}; + u32 in[MLX5_ST_SZ_DW(alloc_uar_in)] = {}; + int err; + + MLX5_SET(alloc_uar_in, in, opcode, MLX5_CMD_OP_ALLOC_UAR); + MLX5_SET(alloc_uar_in, in, uid, uid); + err = mlx5_cmd_exec_inout(dev, alloc_uar, in, out); + if (err) + return err; + + *uarn = MLX5_GET(alloc_uar_out, out, uar); + return 0; +} + +int mlx5_cmd_uar_dealloc(struct mlx5_core_dev *dev, u32 uarn, u16 uid) +{ + u32 in[MLX5_ST_SZ_DW(dealloc_uar_in)] = {}; + + MLX5_SET(dealloc_uar_in, in, opcode, MLX5_CMD_OP_DEALLOC_UAR); + MLX5_SET(dealloc_uar_in, in, uar, uarn); + MLX5_SET(dealloc_uar_in, in, uid, uid); + return mlx5_cmd_exec_in(dev, dealloc_uar, in); +} diff --git a/drivers/infiniband/hw/mlx5/cmd.h b/drivers/infiniband/hw/mlx5/cmd.h index 66c96292ed43..ee46638db5de 100644 --- a/drivers/infiniband/hw/mlx5/cmd.h +++ b/drivers/infiniband/hw/mlx5/cmd.h @@ -57,4 +57,6 @@ int mlx5_cmd_xrcd_alloc(struct mlx5_core_dev *dev, u32 *xrcdn, u16 uid); int mlx5_cmd_xrcd_dealloc(struct mlx5_core_dev *dev, u32 xrcdn, u16 uid); int mlx5_cmd_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb, u16 opmod, u8 port); +int mlx5_cmd_uar_alloc(struct mlx5_core_dev *dev, u32 *uarn, u16 uid); +int mlx5_cmd_uar_dealloc(struct mlx5_core_dev *dev, u32 uarn, u16 uid); #endif /* MLX5_IB_CMD_H */ diff --git a/drivers/infiniband/hw/mlx5/counters.c b/drivers/infiniband/hw/mlx5/counters.c index 224ba36f2946..945758f39523 100644 --- a/drivers/infiniband/hw/mlx5/counters.c +++ b/drivers/infiniband/hw/mlx5/counters.c @@ -12,6 +12,7 @@ struct mlx5_ib_counter { const char *name; size_t offset; + u32 type; }; #define INIT_Q_COUNTER(_name) \ @@ -75,6 +76,21 @@ static const struct mlx5_ib_counter ext_ppcnt_cnts[] = { INIT_EXT_PPCNT_COUNTER(rx_icrc_encapsulated), }; +#define INIT_OP_COUNTER(_name, _type) \ + { .name = #_name, .type = MLX5_IB_OPCOUNTER_##_type} + +static const struct mlx5_ib_counter basic_op_cnts[] = { + INIT_OP_COUNTER(cc_rx_ce_pkts, CC_RX_CE_PKTS), +}; + +static const struct mlx5_ib_counter rdmarx_cnp_op_cnts[] = { + INIT_OP_COUNTER(cc_rx_cnp_pkts, CC_RX_CNP_PKTS), +}; + +static const struct mlx5_ib_counter rdmatx_cnp_op_cnts[] = { + INIT_OP_COUNTER(cc_tx_cnp_pkts, CC_TX_CNP_PKTS), +}; + static int mlx5_ib_read_counters(struct ib_counters *counters, struct ib_counters_read_attr *read_attr, struct uverbs_attr_bundle *attrs) @@ -161,17 +177,34 @@ u16 mlx5_ib_get_counters_id(struct mlx5_ib_dev *dev, u32 port_num) return cnts->set_id; } +static struct rdma_hw_stats *do_alloc_stats(const struct mlx5_ib_counters *cnts) +{ + struct rdma_hw_stats *stats; + u32 num_hw_counters; + int i; + + num_hw_counters = cnts->num_q_counters + cnts->num_cong_counters + + cnts->num_ext_ppcnt_counters; + stats = rdma_alloc_hw_stats_struct(cnts->descs, + num_hw_counters + + cnts->num_op_counters, + RDMA_HW_STATS_DEFAULT_LIFESPAN); + if (!stats) + return NULL; + + for (i = 0; i < cnts->num_op_counters; i++) + set_bit(num_hw_counters + i, stats->is_disabled); + + return stats; +} + static struct rdma_hw_stats * mlx5_ib_alloc_hw_device_stats(struct ib_device *ibdev) { struct mlx5_ib_dev *dev = to_mdev(ibdev); const struct mlx5_ib_counters *cnts = &dev->port[0].cnts; - return rdma_alloc_hw_stats_struct(cnts->names, - cnts->num_q_counters + - cnts->num_cong_counters + - cnts->num_ext_ppcnt_counters, - RDMA_HW_STATS_DEFAULT_LIFESPAN); + return do_alloc_stats(cnts); } static struct rdma_hw_stats * @@ -180,11 +213,7 @@ mlx5_ib_alloc_hw_port_stats(struct ib_device *ibdev, u32 port_num) struct mlx5_ib_dev *dev = to_mdev(ibdev); const struct mlx5_ib_counters *cnts = &dev->port[port_num - 1].cnts; - return rdma_alloc_hw_stats_struct(cnts->names, - cnts->num_q_counters + - cnts->num_cong_counters + - cnts->num_ext_ppcnt_counters, - RDMA_HW_STATS_DEFAULT_LIFESPAN); + return do_alloc_stats(cnts); } static int mlx5_ib_query_q_counters(struct mlx5_core_dev *mdev, @@ -241,9 +270,9 @@ free: return ret; } -static int mlx5_ib_get_hw_stats(struct ib_device *ibdev, - struct rdma_hw_stats *stats, - u32 port_num, int index) +static int do_get_hw_stats(struct ib_device *ibdev, + struct rdma_hw_stats *stats, + u32 port_num, int index) { struct mlx5_ib_dev *dev = to_mdev(ibdev); const struct mlx5_ib_counters *cnts = get_counters(dev, port_num - 1); @@ -295,6 +324,88 @@ done: return num_counters; } +static int do_get_op_stat(struct ib_device *ibdev, + struct rdma_hw_stats *stats, + u32 port_num, int index) +{ + struct mlx5_ib_dev *dev = to_mdev(ibdev); + const struct mlx5_ib_counters *cnts; + const struct mlx5_ib_op_fc *opfcs; + u64 packets = 0, bytes; + u32 type; + int ret; + + cnts = get_counters(dev, port_num - 1); + opfcs = cnts->opfcs; + type = *(u32 *)cnts->descs[index].priv; + if (type >= MLX5_IB_OPCOUNTER_MAX) + return -EINVAL; + + if (!opfcs[type].fc) + goto out; + + ret = mlx5_fc_query(dev->mdev, opfcs[type].fc, + &packets, &bytes); + if (ret) + return ret; + +out: + stats->value[index] = packets; + return index; +} + +static int do_get_op_stats(struct ib_device *ibdev, + struct rdma_hw_stats *stats, + u32 port_num) +{ + struct mlx5_ib_dev *dev = to_mdev(ibdev); + const struct mlx5_ib_counters *cnts; + int index, ret, num_hw_counters; + + cnts = get_counters(dev, port_num - 1); + num_hw_counters = cnts->num_q_counters + cnts->num_cong_counters + + cnts->num_ext_ppcnt_counters; + for (index = num_hw_counters; + index < (num_hw_counters + cnts->num_op_counters); index++) { + ret = do_get_op_stat(ibdev, stats, port_num, index); + if (ret != index) + return ret; + } + + return cnts->num_op_counters; +} + +static int mlx5_ib_get_hw_stats(struct ib_device *ibdev, + struct rdma_hw_stats *stats, + u32 port_num, int index) +{ + int num_counters, num_hw_counters, num_op_counters; + struct mlx5_ib_dev *dev = to_mdev(ibdev); + const struct mlx5_ib_counters *cnts; + + cnts = get_counters(dev, port_num - 1); + num_hw_counters = cnts->num_q_counters + cnts->num_cong_counters + + cnts->num_ext_ppcnt_counters; + num_counters = num_hw_counters + cnts->num_op_counters; + + if (index < 0 || index > num_counters) + return -EINVAL; + else if (index > 0 && index < num_hw_counters) + return do_get_hw_stats(ibdev, stats, port_num, index); + else if (index >= num_hw_counters && index < num_counters) + return do_get_op_stat(ibdev, stats, port_num, index); + + num_hw_counters = do_get_hw_stats(ibdev, stats, port_num, index); + if (num_hw_counters < 0) + return num_hw_counters; + + num_op_counters = do_get_op_stats(ibdev, stats, port_num); + if (num_op_counters < 0) + return num_op_counters; + + return num_hw_counters + num_op_counters; +} + static struct rdma_hw_stats * mlx5_ib_counter_alloc_stats(struct rdma_counter *counter) { @@ -302,11 +413,7 @@ mlx5_ib_counter_alloc_stats(struct rdma_counter *counter) const struct mlx5_ib_counters *cnts = get_counters(dev, counter->port - 1); - return rdma_alloc_hw_stats_struct(cnts->names, - cnts->num_q_counters + - cnts->num_cong_counters + - cnts->num_ext_ppcnt_counters, - RDMA_HW_STATS_DEFAULT_LIFESPAN); + return do_alloc_stats(cnts); } static int mlx5_ib_counter_update_stats(struct rdma_counter *counter) @@ -371,67 +478,89 @@ static int mlx5_ib_counter_unbind_qp(struct ib_qp *qp) return mlx5_ib_qp_set_counter(qp, NULL); } - static void mlx5_ib_fill_counters(struct mlx5_ib_dev *dev, - const char **names, - size_t *offsets) + struct rdma_stat_desc *descs, size_t *offsets) { int i; int j = 0; for (i = 0; i < ARRAY_SIZE(basic_q_cnts); i++, j++) { - names[j] = basic_q_cnts[i].name; + descs[j].name = basic_q_cnts[i].name; offsets[j] = basic_q_cnts[i].offset; } if (MLX5_CAP_GEN(dev->mdev, out_of_seq_cnt)) { for (i = 0; i < ARRAY_SIZE(out_of_seq_q_cnts); i++, j++) { - names[j] = out_of_seq_q_cnts[i].name; + descs[j].name = out_of_seq_q_cnts[i].name; offsets[j] = out_of_seq_q_cnts[i].offset; } } if (MLX5_CAP_GEN(dev->mdev, retransmission_q_counters)) { for (i = 0; i < ARRAY_SIZE(retrans_q_cnts); i++, j++) { - names[j] = retrans_q_cnts[i].name; + descs[j].name = retrans_q_cnts[i].name; offsets[j] = retrans_q_cnts[i].offset; } } if (MLX5_CAP_GEN(dev->mdev, enhanced_error_q_counters)) { for (i = 0; i < ARRAY_SIZE(extended_err_cnts); i++, j++) { - names[j] = extended_err_cnts[i].name; + descs[j].name = extended_err_cnts[i].name; offsets[j] = extended_err_cnts[i].offset; } } if (MLX5_CAP_GEN(dev->mdev, roce_accl)) { for (i = 0; i < ARRAY_SIZE(roce_accl_cnts); i++, j++) { - names[j] = roce_accl_cnts[i].name; + descs[j].name = roce_accl_cnts[i].name; offsets[j] = roce_accl_cnts[i].offset; } } if (MLX5_CAP_GEN(dev->mdev, cc_query_allowed)) { for (i = 0; i < ARRAY_SIZE(cong_cnts); i++, j++) { - names[j] = cong_cnts[i].name; + descs[j].name = cong_cnts[i].name; offsets[j] = cong_cnts[i].offset; } } if (MLX5_CAP_PCAM_FEATURE(dev->mdev, rx_icrc_encapsulated_counter)) { for (i = 0; i < ARRAY_SIZE(ext_ppcnt_cnts); i++, j++) { - names[j] = ext_ppcnt_cnts[i].name; + descs[j].name = ext_ppcnt_cnts[i].name; offsets[j] = ext_ppcnt_cnts[i].offset; } } + + for (i = 0; i < ARRAY_SIZE(basic_op_cnts); i++, j++) { + descs[j].name = basic_op_cnts[i].name; + descs[j].flags |= IB_STAT_FLAG_OPTIONAL; + descs[j].priv = &basic_op_cnts[i].type; + } + + if (MLX5_CAP_FLOWTABLE(dev->mdev, + ft_field_support_2_nic_receive_rdma.bth_opcode)) { + for (i = 0; i < ARRAY_SIZE(rdmarx_cnp_op_cnts); i++, j++) { + descs[j].name = rdmarx_cnp_op_cnts[i].name; + descs[j].flags |= IB_STAT_FLAG_OPTIONAL; + descs[j].priv = &rdmarx_cnp_op_cnts[i].type; + } + } + + if (MLX5_CAP_FLOWTABLE(dev->mdev, + ft_field_support_2_nic_transmit_rdma.bth_opcode)) { + for (i = 0; i < ARRAY_SIZE(rdmatx_cnp_op_cnts); i++, j++) { + descs[j].name = rdmatx_cnp_op_cnts[i].name; + descs[j].flags |= IB_STAT_FLAG_OPTIONAL; + descs[j].priv = &rdmatx_cnp_op_cnts[i].type; + } + } } static int __mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev, struct mlx5_ib_counters *cnts) { - u32 num_counters; + u32 num_counters, num_op_counters; num_counters = ARRAY_SIZE(basic_q_cnts); @@ -457,20 +586,34 @@ static int __mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev, cnts->num_ext_ppcnt_counters = ARRAY_SIZE(ext_ppcnt_cnts); num_counters += ARRAY_SIZE(ext_ppcnt_cnts); } - cnts->names = kcalloc(num_counters, sizeof(*cnts->names), GFP_KERNEL); - if (!cnts->names) + + num_op_counters = ARRAY_SIZE(basic_op_cnts); + + if (MLX5_CAP_FLOWTABLE(dev->mdev, + ft_field_support_2_nic_receive_rdma.bth_opcode)) + num_op_counters += ARRAY_SIZE(rdmarx_cnp_op_cnts); + + if (MLX5_CAP_FLOWTABLE(dev->mdev, + ft_field_support_2_nic_transmit_rdma.bth_opcode)) + num_op_counters += ARRAY_SIZE(rdmatx_cnp_op_cnts); + + cnts->num_op_counters = num_op_counters; + num_counters += num_op_counters; + cnts->descs = kcalloc(num_counters, + sizeof(struct rdma_stat_desc), GFP_KERNEL); + if (!cnts->descs) return -ENOMEM; cnts->offsets = kcalloc(num_counters, sizeof(*cnts->offsets), GFP_KERNEL); if (!cnts->offsets) - goto err_names; + goto err; return 0; -err_names: - kfree(cnts->names); - cnts->names = NULL; +err: + kfree(cnts->descs); + cnts->descs = NULL; return -ENOMEM; } @@ -478,7 +621,7 @@ static void mlx5_ib_dealloc_counters(struct mlx5_ib_dev *dev) { u32 in[MLX5_ST_SZ_DW(dealloc_q_counter_in)] = {}; int num_cnt_ports; - int i; + int i, j; num_cnt_ports = is_mdev_switchdev_mode(dev->mdev) ? 1 : dev->num_ports; @@ -491,8 +634,20 @@ static void mlx5_ib_dealloc_counters(struct mlx5_ib_dev *dev) dev->port[i].cnts.set_id); mlx5_cmd_exec_in(dev->mdev, dealloc_q_counter, in); } - kfree(dev->port[i].cnts.names); + kfree(dev->port[i].cnts.descs); kfree(dev->port[i].cnts.offsets); + + for (j = 0; j < MLX5_IB_OPCOUNTER_MAX; j++) { + if (!dev->port[i].cnts.opfcs[j].fc) + continue; + + if (IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)) + mlx5_ib_fs_remove_op_fc(dev, + &dev->port[i].cnts.opfcs[j], j); + mlx5_fc_destroy(dev->mdev, + dev->port[i].cnts.opfcs[j].fc); + dev->port[i].cnts.opfcs[j].fc = NULL; + } } } @@ -514,7 +669,7 @@ static int mlx5_ib_alloc_counters(struct mlx5_ib_dev *dev) if (err) goto err_alloc; - mlx5_ib_fill_counters(dev, dev->port[i].cnts.names, + mlx5_ib_fill_counters(dev, dev->port[i].cnts.descs, dev->port[i].cnts.offsets); MLX5_SET(alloc_q_counter_in, in, uid, @@ -672,6 +827,56 @@ void mlx5_ib_counters_clear_description(struct ib_counters *counters) mutex_unlock(&mcounters->mcntrs_mutex); } +static int mlx5_ib_modify_stat(struct ib_device *device, u32 port, + unsigned int index, bool enable) +{ + struct mlx5_ib_dev *dev = to_mdev(device); + struct mlx5_ib_counters *cnts; + struct mlx5_ib_op_fc *opfc; + u32 num_hw_counters, type; + int ret; + + cnts = &dev->port[port - 1].cnts; + num_hw_counters = cnts->num_q_counters + cnts->num_cong_counters + + cnts->num_ext_ppcnt_counters; + if (index < num_hw_counters || + index >= (num_hw_counters + cnts->num_op_counters)) + return -EINVAL; + + if (!(cnts->descs[index].flags & IB_STAT_FLAG_OPTIONAL)) + return -EINVAL; + + type = *(u32 *)cnts->descs[index].priv; + if (type >= MLX5_IB_OPCOUNTER_MAX) + return -EINVAL; + + opfc = &cnts->opfcs[type]; + + if (enable) { + if (opfc->fc) + return -EEXIST; + + opfc->fc = mlx5_fc_create(dev->mdev, false); + if (IS_ERR(opfc->fc)) + return PTR_ERR(opfc->fc); + + ret = mlx5_ib_fs_add_op_fc(dev, port, opfc, type); + if (ret) { + mlx5_fc_destroy(dev->mdev, opfc->fc); + opfc->fc = NULL; + } + return ret; + } + + if (!opfc->fc) + return -EINVAL; + + mlx5_ib_fs_remove_op_fc(dev, opfc, type); + mlx5_fc_destroy(dev->mdev, opfc->fc); + opfc->fc = NULL; + return 0; +} + static const struct ib_device_ops hw_stats_ops = { .alloc_hw_port_stats = mlx5_ib_alloc_hw_port_stats, .get_hw_stats = mlx5_ib_get_hw_stats, @@ -680,6 +885,8 @@ static const struct ib_device_ops hw_stats_ops = { .counter_dealloc = mlx5_ib_counter_dealloc, .counter_alloc_stats = mlx5_ib_counter_alloc_stats, .counter_update_stats = mlx5_ib_counter_update_stats, + .modify_hw_stat = IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS) ? + mlx5_ib_modify_stat : NULL, }; static const struct ib_device_ops hw_switchdev_stats_ops = { diff --git a/drivers/infiniband/hw/mlx5/cq.c b/drivers/infiniband/hw/mlx5/cq.c index a190fb581591..08371a80fdc2 100644 --- a/drivers/infiniband/hw/mlx5/cq.c +++ b/drivers/infiniband/hw/mlx5/cq.c @@ -328,8 +328,11 @@ static void mlx5_handle_error_cqe(struct mlx5_ib_dev *dev, } wc->vendor_err = cqe->vendor_err_synd; - if (dump) + if (dump) { + mlx5_ib_warn(dev, "WC error: %d, Message: %s\n", wc->status, + ib_wc_status_msg(wc->status)); dump_cqe(dev, cqe); + } } static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64, diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c index e95967aefe78..08b7f6bc56c3 100644 --- a/drivers/infiniband/hw/mlx5/devx.c +++ b/drivers/infiniband/hw/mlx5/devx.c @@ -1292,21 +1292,16 @@ static int devx_handle_mkey_indirect(struct devx_obj *obj, struct mlx5_ib_dev *dev, void *in, void *out) { - struct mlx5_ib_devx_mr *devx_mr = &obj->devx_mr; - struct mlx5_core_mkey *mkey; + struct mlx5_ib_mkey *mkey = &obj->mkey; void *mkc; u8 key; - mkey = &devx_mr->mmkey; mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry); key = MLX5_GET(mkc, mkc, mkey_7_0); mkey->key = mlx5_idx_to_mkey( MLX5_GET(create_mkey_out, out, mkey_index)) | key; mkey->type = MLX5_MKEY_INDIRECT_DEVX; - mkey->iova = MLX5_GET64(mkc, mkc, start_addr); - mkey->size = MLX5_GET64(mkc, mkc, len); - mkey->pd = MLX5_GET(mkc, mkc, pd); - devx_mr->ndescs = MLX5_GET(mkc, mkc, translations_octword_size); + mkey->ndescs = MLX5_GET(mkc, mkc, translations_octword_size); init_waitqueue_head(&mkey->wait); return mlx5r_store_odp_mkey(dev, mkey); @@ -1384,13 +1379,13 @@ static int devx_obj_cleanup(struct ib_uobject *uobject, dev = mlx5_udata_to_mdev(&attrs->driver_udata); if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY && xa_erase(&obj->ib_dev->odp_mkeys, - mlx5_base_mkey(obj->devx_mr.mmkey.key))) + mlx5_base_mkey(obj->mkey.key))) /* * The pagefault_single_data_segment() does commands against * the mmkey, we must wait for that to stop before freeing the * mkey, as another allocation could get the same mkey #. */ - mlx5r_deref_wait_odp_mkey(&obj->devx_mr.mmkey); + mlx5r_deref_wait_odp_mkey(&obj->mkey); if (obj->flags & DEVX_OBJ_FLAGS_DCT) ret = mlx5_core_destroy_dct(obj->ib_dev, &obj->core_dct); diff --git a/drivers/infiniband/hw/mlx5/devx.h b/drivers/infiniband/hw/mlx5/devx.h index 1f69866aed16..ee2213275fd6 100644 --- a/drivers/infiniband/hw/mlx5/devx.h +++ b/drivers/infiniband/hw/mlx5/devx.h @@ -16,7 +16,7 @@ struct devx_obj { u32 dinbox[MLX5_MAX_DESTROY_INBOX_SIZE_DW]; u32 flags; union { - struct mlx5_ib_devx_mr devx_mr; + struct mlx5_ib_mkey mkey; struct mlx5_core_dct core_dct; struct mlx5_core_cq core_cq; u32 flow_counter_bulk_size; diff --git a/drivers/infiniband/hw/mlx5/doorbell.c b/drivers/infiniband/hw/mlx5/doorbell.c index 6398e2f48579..e32111117a5e 100644 --- a/drivers/infiniband/hw/mlx5/doorbell.c +++ b/drivers/infiniband/hw/mlx5/doorbell.c @@ -32,6 +32,7 @@ #include <linux/kref.h> #include <linux/slab.h> +#include <linux/sched/mm.h> #include <rdma/ib_umem.h> #include "mlx5_ib.h" diff --git a/drivers/infiniband/hw/mlx5/fs.c b/drivers/infiniband/hw/mlx5/fs.c index 5fbc0a8454b9..661ed2b44508 100644 --- a/drivers/infiniband/hw/mlx5/fs.c +++ b/drivers/infiniband/hw/mlx5/fs.c @@ -10,12 +10,14 @@ #include <rdma/uverbs_std_types.h> #include <rdma/mlx5_user_ioctl_cmds.h> #include <rdma/mlx5_user_ioctl_verbs.h> +#include <rdma/ib_hdrs.h> #include <rdma/ib_umem.h> #include <linux/mlx5/driver.h> #include <linux/mlx5/fs.h> #include <linux/mlx5/fs_helpers.h> #include <linux/mlx5/accel.h> #include <linux/mlx5/eswitch.h> +#include <net/inet_ecn.h> #include "mlx5_ib.h" #include "counters.h" #include "devx.h" @@ -847,6 +849,191 @@ static struct mlx5_ib_flow_prio *get_flow_table(struct mlx5_ib_dev *dev, return prio; } +enum { + RDMA_RX_ECN_OPCOUNTER_PRIO, + RDMA_RX_CNP_OPCOUNTER_PRIO, +}; + +enum { + RDMA_TX_CNP_OPCOUNTER_PRIO, +}; + +static int set_vhca_port_spec(struct mlx5_ib_dev *dev, u32 port_num, + struct mlx5_flow_spec *spec) +{ + if (!MLX5_CAP_FLOWTABLE_RDMA_RX(dev->mdev, + ft_field_support.source_vhca_port) || + !MLX5_CAP_FLOWTABLE_RDMA_TX(dev->mdev, + ft_field_support.source_vhca_port)) + return -EOPNOTSUPP; + + MLX5_SET_TO_ONES(fte_match_param, &spec->match_criteria, + misc_parameters.source_vhca_port); + MLX5_SET(fte_match_param, &spec->match_value, + misc_parameters.source_vhca_port, port_num); + + return 0; +} + +static int set_ecn_ce_spec(struct mlx5_ib_dev *dev, u32 port_num, + struct mlx5_flow_spec *spec, int ipv) +{ + if (!MLX5_CAP_FLOWTABLE_RDMA_RX(dev->mdev, + ft_field_support.outer_ip_version)) + return -EOPNOTSUPP; + + if (mlx5_core_mp_enabled(dev->mdev) && + set_vhca_port_spec(dev, port_num, spec)) + return -EOPNOTSUPP; + + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, + outer_headers.ip_ecn); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_ecn, + INET_ECN_CE); + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, + outer_headers.ip_version); + MLX5_SET(fte_match_param, spec->match_value, outer_headers.ip_version, + ipv); + + spec->match_criteria_enable = + get_match_criteria_enable(spec->match_criteria); + + return 0; +} + +static int set_cnp_spec(struct mlx5_ib_dev *dev, u32 port_num, + struct mlx5_flow_spec *spec) +{ + if (mlx5_core_mp_enabled(dev->mdev) && + set_vhca_port_spec(dev, port_num, spec)) + return -EOPNOTSUPP; + + MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, + misc_parameters.bth_opcode); + MLX5_SET(fte_match_param, spec->match_value, misc_parameters.bth_opcode, + IB_BTH_OPCODE_CNP); + + spec->match_criteria_enable = + get_match_criteria_enable(spec->match_criteria); + + return 0; +} + +int mlx5_ib_fs_add_op_fc(struct mlx5_ib_dev *dev, u32 port_num, + struct mlx5_ib_op_fc *opfc, + enum mlx5_ib_optional_counter_type type) +{ + enum mlx5_flow_namespace_type fn_type; + int priority, i, err, spec_num; + struct mlx5_flow_act flow_act = {}; + struct mlx5_flow_destination dst; + struct mlx5_flow_namespace *ns; + struct mlx5_ib_flow_prio *prio; + struct mlx5_flow_spec *spec; + + spec = kcalloc(MAX_OPFC_RULES, sizeof(*spec), GFP_KERNEL); + if (!spec) + return -ENOMEM; + + switch (type) { + case MLX5_IB_OPCOUNTER_CC_RX_CE_PKTS: + if (set_ecn_ce_spec(dev, port_num, &spec[0], + MLX5_FS_IPV4_VERSION) || + set_ecn_ce_spec(dev, port_num, &spec[1], + MLX5_FS_IPV6_VERSION)) { + err = -EOPNOTSUPP; + goto free; + } + spec_num = 2; + fn_type = MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS; + priority = RDMA_RX_ECN_OPCOUNTER_PRIO; + break; + + case MLX5_IB_OPCOUNTER_CC_RX_CNP_PKTS: + if (!MLX5_CAP_FLOWTABLE(dev->mdev, + ft_field_support_2_nic_receive_rdma.bth_opcode) || + set_cnp_spec(dev, port_num, &spec[0])) { + err = -EOPNOTSUPP; + goto free; + } + spec_num = 1; + fn_type = MLX5_FLOW_NAMESPACE_RDMA_RX_COUNTERS; + priority = RDMA_RX_CNP_OPCOUNTER_PRIO; + break; + + case MLX5_IB_OPCOUNTER_CC_TX_CNP_PKTS: + if (!MLX5_CAP_FLOWTABLE(dev->mdev, + ft_field_support_2_nic_transmit_rdma.bth_opcode) || + set_cnp_spec(dev, port_num, &spec[0])) { + err = -EOPNOTSUPP; + goto free; + } + spec_num = 1; + fn_type = MLX5_FLOW_NAMESPACE_RDMA_TX_COUNTERS; + priority = RDMA_TX_CNP_OPCOUNTER_PRIO; + break; + + default: + err = -EOPNOTSUPP; + goto free; + } + + ns = mlx5_get_flow_namespace(dev->mdev, fn_type); + if (!ns) { + err = -EOPNOTSUPP; + goto free; + } + + prio = &dev->flow_db->opfcs[type]; + if (!prio->flow_table) { + prio = _get_prio(ns, prio, priority, + dev->num_ports * MAX_OPFC_RULES, 1, 0); + if (IS_ERR(prio)) { + err = PTR_ERR(prio); + goto free; + } + } + + dst.type = MLX5_FLOW_DESTINATION_TYPE_COUNTER; + dst.counter_id = mlx5_fc_id(opfc->fc); + + flow_act.action = + MLX5_FLOW_CONTEXT_ACTION_COUNT | MLX5_FLOW_CONTEXT_ACTION_ALLOW; + + for (i = 0; i < spec_num; i++) { + opfc->rule[i] = mlx5_add_flow_rules(prio->flow_table, &spec[i], + &flow_act, &dst, 1); + if (IS_ERR(opfc->rule[i])) { + err = PTR_ERR(opfc->rule[i]); + goto del_rules; + } + } + prio->refcount += spec_num; + kfree(spec); + + return 0; + +del_rules: + for (i -= 1; i >= 0; i--) + mlx5_del_flow_rules(opfc->rule[i]); + put_flow_table(dev, prio, false); +free: + kfree(spec); + return err; +} + +void mlx5_ib_fs_remove_op_fc(struct mlx5_ib_dev *dev, + struct mlx5_ib_op_fc *opfc, + enum mlx5_ib_optional_counter_type type) +{ + int i; + + for (i = 0; i < MAX_OPFC_RULES && opfc->rule[i]; i++) { + mlx5_del_flow_rules(opfc->rule[i]); + put_flow_table(dev, &dev->flow_db->opfcs[type], true); + } +} + static void set_underlay_qp(struct mlx5_ib_dev *dev, struct mlx5_flow_spec *spec, u32 underlay_qpn) @@ -1321,7 +1508,7 @@ _get_flow_table(struct mlx5_ib_dev *dev, !esw_encap) flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT; break; - case MLX5_FLOW_NAMESPACE_FDB: + case MLX5_FLOW_NAMESPACE_FDB_BYPASS: max_table_size = BIT( MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev, log_max_ft_size)); if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev->mdev, decap) && esw_encap) @@ -1330,7 +1517,7 @@ _get_flow_table(struct mlx5_ib_dev *dev, reformat_l3_tunnel_to_l2) && esw_encap) flags |= MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT; - priority = FDB_BYPASS_PATH; + priority = fs_matcher->priority; break; case MLX5_FLOW_NAMESPACE_RDMA_RX: max_table_size = BIT( @@ -1359,8 +1546,8 @@ _get_flow_table(struct mlx5_ib_dev *dev, case MLX5_FLOW_NAMESPACE_EGRESS: prio = &dev->flow_db->egress_prios[priority]; break; - case MLX5_FLOW_NAMESPACE_FDB: - prio = &dev->flow_db->fdb; + case MLX5_FLOW_NAMESPACE_FDB_BYPASS: + prio = &dev->flow_db->fdb[priority]; break; case MLX5_FLOW_NAMESPACE_RDMA_RX: prio = &dev->flow_db->rdma_rx[priority]; @@ -1750,7 +1937,7 @@ mlx5_ib_ft_type_to_namespace(enum mlx5_ib_uapi_flow_table_type table_type, *namespace = MLX5_FLOW_NAMESPACE_EGRESS; break; case MLX5_IB_UAPI_FLOW_TABLE_TYPE_FDB: - *namespace = MLX5_FLOW_NAMESPACE_FDB; + *namespace = MLX5_FLOW_NAMESPACE_FDB_BYPASS; break; case MLX5_IB_UAPI_FLOW_TABLE_TYPE_RDMA_RX: *namespace = MLX5_FLOW_NAMESPACE_RDMA_RX; @@ -1842,8 +2029,8 @@ static int get_dests(struct uverbs_attr_bundle *attrs, } /* Allow only DEVX object, drop as dest for FDB */ - if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB && !(dest_devx || - (*flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP))) + if (fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB_BYPASS && + !(dest_devx || (*flags & MLX5_IB_ATTR_CREATE_FLOW_FLAGS_DROP))) return -EINVAL; /* Allow only DEVX object or QP as dest when inserting to RDMA_RX */ @@ -1863,7 +2050,7 @@ static int get_dests(struct uverbs_attr_bundle *attrs, if (!is_flow_dest(devx_obj, dest_id, dest_type)) return -EINVAL; /* Allow only flow table as dest when inserting to FDB or RDMA_RX */ - if ((fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB || + if ((fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_FDB_BYPASS || fs_matcher->ns_type == MLX5_FLOW_NAMESPACE_RDMA_RX) && *dest_type != MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE) return -EINVAL; @@ -2133,7 +2320,7 @@ static int UVERBS_HANDLER(MLX5_IB_METHOD_FLOW_MATCHER_CREATE)( if (err) goto end; - if (obj->ns_type == MLX5_FLOW_NAMESPACE_FDB && + if (obj->ns_type == MLX5_FLOW_NAMESPACE_FDB_BYPASS && mlx5_eswitch_mode(dev->mdev) != MLX5_ESWITCH_OFFLOADS) { err = -EINVAL; goto end; diff --git a/drivers/infiniband/hw/mlx5/mad.c b/drivers/infiniband/hw/mlx5/mad.c index ec242a5a17a3..293ed709e5ed 100644 --- a/drivers/infiniband/hw/mlx5/mad.c +++ b/drivers/infiniband/hw/mlx5/mad.c @@ -291,7 +291,7 @@ int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, unsigned int port) if (!in_mad || !out_mad) goto out; - init_query_mad(in_mad); + ib_init_query_mad(in_mad); in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO; in_mad->attr_mod = cpu_to_be32(port); @@ -318,7 +318,7 @@ static int mlx5_query_mad_ifc_smp_attr_node_info(struct ib_device *ibdev, if (!in_mad) return -ENOMEM; - init_query_mad(in_mad); + ib_init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; err = mlx5_MAD_IFC(to_mdev(ibdev), 1, 1, 1, NULL, NULL, in_mad, @@ -405,7 +405,7 @@ int mlx5_query_mad_ifc_node_desc(struct mlx5_ib_dev *dev, char *node_desc) if (!in_mad || !out_mad) goto out; - init_query_mad(in_mad); + ib_init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_NODE_DESC; err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad); @@ -430,7 +430,7 @@ int mlx5_query_mad_ifc_node_guid(struct mlx5_ib_dev *dev, __be64 *node_guid) if (!in_mad || !out_mad) goto out; - init_query_mad(in_mad); + ib_init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; err = mlx5_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad); @@ -456,7 +456,7 @@ int mlx5_query_mad_ifc_pkey(struct ib_device *ibdev, u32 port, u16 index, if (!in_mad || !out_mad) goto out; - init_query_mad(in_mad); + ib_init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE; in_mad->attr_mod = cpu_to_be32(index / 32); @@ -485,7 +485,7 @@ int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u32 port, int index, if (!in_mad || !out_mad) goto out; - init_query_mad(in_mad); + ib_init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; in_mad->attr_mod = cpu_to_be32(port); @@ -496,7 +496,7 @@ int mlx5_query_mad_ifc_gids(struct ib_device *ibdev, u32 port, int index, memcpy(gid->raw, out_mad->data + 8, 8); - init_query_mad(in_mad); + ib_init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_GUID_INFO; in_mad->attr_mod = cpu_to_be32(index / 8); @@ -530,7 +530,7 @@ int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u32 port, /* props being zeroed by the caller, avoid zeroing it here */ - init_query_mad(in_mad); + ib_init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; in_mad->attr_mod = cpu_to_be32(port); @@ -584,6 +584,11 @@ int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u32 port, props->port_cap_flags2 & IB_PORT_LINK_SPEED_HDR_SUP) props->active_speed = IB_SPEED_HDR; break; + case 8: + if (props->port_cap_flags & IB_PORT_CAP_MASK2_SUP && + props->port_cap_flags2 & IB_PORT_LINK_SPEED_NDR_SUP) + props->active_speed = IB_SPEED_NDR; + break; } } @@ -591,7 +596,7 @@ int mlx5_query_mad_ifc_port(struct ib_device *ibdev, u32 port, if (props->active_speed == 4) { if (dev->port_caps[port - 1].ext_port_cap & MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) { - init_query_mad(in_mad); + ib_init_query_mad(in_mad); in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO; in_mad->attr_mod = cpu_to_be32(port); diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c index 8664bcf6d3f5..85f526c861e9 100644 --- a/drivers/infiniband/hw/mlx5/main.c +++ b/drivers/infiniband/hw/mlx5/main.c @@ -1643,7 +1643,8 @@ static int allocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *conte bfregi = &context->bfregi; for (i = 0; i < bfregi->num_static_sys_pages; i++) { - err = mlx5_cmd_alloc_uar(dev->mdev, &bfregi->sys_pages[i]); + err = mlx5_cmd_uar_alloc(dev->mdev, &bfregi->sys_pages[i], + context->devx_uid); if (err) goto error; @@ -1657,7 +1658,8 @@ static int allocate_uars(struct mlx5_ib_dev *dev, struct mlx5_ib_ucontext *conte error: for (--i; i >= 0; i--) - if (mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i])) + if (mlx5_cmd_uar_dealloc(dev->mdev, bfregi->sys_pages[i], + context->devx_uid)) mlx5_ib_warn(dev, "failed to free uar %d\n", i); return err; @@ -1673,7 +1675,8 @@ static void deallocate_uars(struct mlx5_ib_dev *dev, for (i = 0; i < bfregi->num_sys_pages; i++) if (i < bfregi->num_static_sys_pages || bfregi->sys_pages[i] != MLX5_IB_INVALID_UAR_INDEX) - mlx5_cmd_free_uar(dev->mdev, bfregi->sys_pages[i]); + mlx5_cmd_uar_dealloc(dev->mdev, bfregi->sys_pages[i], + context->devx_uid); } int mlx5_ib_enable_lb(struct mlx5_ib_dev *dev, bool td, bool qp) @@ -1891,6 +1894,13 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx, if (req.num_low_latency_bfregs > req.total_num_bfregs - 1) return -EINVAL; + if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) { + err = mlx5_ib_devx_create(dev, true); + if (err < 0) + goto out_ctx; + context->devx_uid = err; + } + lib_uar_4k = req.lib_caps & MLX5_LIB_CAP_4K_UAR; lib_uar_dyn = req.lib_caps & MLX5_LIB_CAP_DYN_UAR; bfregi = &context->bfregi; @@ -1903,7 +1913,7 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx, /* updates req->total_num_bfregs */ err = calc_total_bfregs(dev, lib_uar_4k, &req, bfregi); if (err) - goto out_ctx; + goto out_devx; mutex_init(&bfregi->lock); bfregi->lib_uar_4k = lib_uar_4k; @@ -1911,7 +1921,7 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx, GFP_KERNEL); if (!bfregi->count) { err = -ENOMEM; - goto out_ctx; + goto out_devx; } bfregi->sys_pages = kcalloc(bfregi->num_sys_pages, @@ -1927,17 +1937,10 @@ static int mlx5_ib_alloc_ucontext(struct ib_ucontext *uctx, goto out_sys_pages; uar_done: - if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) { - err = mlx5_ib_devx_create(dev, true); - if (err < 0) - goto out_uars; - context->devx_uid = err; - } - err = mlx5_ib_alloc_transport_domain(dev, &context->tdn, context->devx_uid); if (err) - goto out_devx; + goto out_uars; INIT_LIST_HEAD(&context->db_page_list); mutex_init(&context->db_page_mutex); @@ -1972,9 +1975,6 @@ uar_done: out_mdev: mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid); -out_devx: - if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) - mlx5_ib_devx_destroy(dev, context->devx_uid); out_uars: deallocate_uars(dev, context); @@ -1985,6 +1985,10 @@ out_sys_pages: out_count: kfree(bfregi->count); +out_devx: + if (req.flags & MLX5_IB_ALLOC_UCTX_DEVX) + mlx5_ib_devx_destroy(dev, context->devx_uid); + out_ctx: return err; } @@ -2021,12 +2025,12 @@ static void mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext) bfregi = &context->bfregi; mlx5_ib_dealloc_transport_domain(dev, context->tdn, context->devx_uid); - if (context->devx_uid) - mlx5_ib_devx_destroy(dev, context->devx_uid); - deallocate_uars(dev, context); kfree(bfregi->sys_pages); kfree(bfregi->count); + + if (context->devx_uid) + mlx5_ib_devx_destroy(dev, context->devx_uid); } static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev, @@ -2119,6 +2123,7 @@ static void mlx5_ib_mmap_free(struct rdma_user_mmap_entry *entry) struct mlx5_user_mmap_entry *mentry = to_mmmap(entry); struct mlx5_ib_dev *dev = to_mdev(entry->ucontext->device); struct mlx5_var_table *var_table = &dev->var_table; + struct mlx5_ib_ucontext *context = to_mucontext(entry->ucontext); switch (mentry->mmap_flag) { case MLX5_IB_MMAP_TYPE_MEMIC: @@ -2133,7 +2138,8 @@ static void mlx5_ib_mmap_free(struct rdma_user_mmap_entry *entry) break; case MLX5_IB_MMAP_TYPE_UAR_WC: case MLX5_IB_MMAP_TYPE_UAR_NC: - mlx5_cmd_free_uar(dev->mdev, mentry->page_idx); + mlx5_cmd_uar_dealloc(dev->mdev, mentry->page_idx, + context->devx_uid); kfree(mentry); break; default: @@ -2211,7 +2217,8 @@ static int uar_mmap(struct mlx5_ib_dev *dev, enum mlx5_ib_mmap_cmd cmd, bfregi->count[bfreg_dyn_idx]++; mutex_unlock(&bfregi->lock); - err = mlx5_cmd_alloc_uar(dev->mdev, &uar_index); + err = mlx5_cmd_uar_alloc(dev->mdev, &uar_index, + context->devx_uid); if (err) { mlx5_ib_warn(dev, "UAR alloc failed\n"); goto free_bfreg; @@ -2240,7 +2247,7 @@ err: if (!dyn_uar) return err; - mlx5_cmd_free_uar(dev->mdev, idx); + mlx5_cmd_uar_dealloc(dev->mdev, idx, context->devx_uid); free_bfreg: mlx5_ib_free_bfreg(dev, bfregi, bfreg_dyn_idx); @@ -3489,7 +3496,7 @@ alloc_uar_entry(struct mlx5_ib_ucontext *c, return ERR_PTR(-ENOMEM); dev = to_mdev(c->ibucontext.device); - err = mlx5_cmd_alloc_uar(dev->mdev, &uar_index); + err = mlx5_cmd_uar_alloc(dev->mdev, &uar_index, c->devx_uid); if (err) goto end; @@ -3507,7 +3514,7 @@ alloc_uar_entry(struct mlx5_ib_ucontext *c, return entry; err_insert: - mlx5_cmd_free_uar(dev->mdev, uar_index); + mlx5_cmd_uar_dealloc(dev->mdev, uar_index, c->devx_uid); end: kfree(entry); return ERR_PTR(err); @@ -4415,7 +4422,7 @@ static int mlx5r_mp_probe(struct auxiliary_device *adev, } mutex_unlock(&mlx5_ib_multiport_mutex); - dev_set_drvdata(&adev->dev, mpi); + auxiliary_set_drvdata(adev, mpi); return 0; } @@ -4423,7 +4430,7 @@ static void mlx5r_mp_remove(struct auxiliary_device *adev) { struct mlx5_ib_multiport_info *mpi; - mpi = dev_get_drvdata(&adev->dev); + mpi = auxiliary_get_drvdata(adev); mutex_lock(&mlx5_ib_multiport_mutex); if (mpi->ibdev) mlx5_ib_unbind_slave_port(mpi->ibdev, mpi); @@ -4473,7 +4480,7 @@ static int mlx5r_probe(struct auxiliary_device *adev, return ret; } - dev_set_drvdata(&adev->dev, dev); + auxiliary_set_drvdata(adev, dev); return 0; } @@ -4481,7 +4488,7 @@ static void mlx5r_remove(struct auxiliary_device *adev) { struct mlx5_ib_dev *dev; - dev = dev_get_drvdata(&adev->dev); + dev = auxiliary_get_drvdata(adev); __mlx5_ib_remove(dev, dev->profile, MLX5_IB_STAGE_MAX); } diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h index bf20a388eabe..cbc20e400be0 100644 --- a/drivers/infiniband/hw/mlx5/mlx5_ib.h +++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h @@ -232,6 +232,7 @@ enum { #define MLX5_IB_NUM_FLOW_FT (MLX5_IB_FLOW_LEFTOVERS_PRIO + 1) #define MLX5_IB_NUM_SNIFFER_FTS 2 #define MLX5_IB_NUM_EGRESS_FTS 1 +#define MLX5_IB_NUM_FDB_FTS MLX5_BY_PASS_NUM_REGULAR_PRIOS struct mlx5_ib_flow_prio { struct mlx5_flow_table *flow_table; unsigned int refcount; @@ -263,14 +264,23 @@ struct mlx5_ib_pp { struct mlx5_core_dev *mdev; }; +enum mlx5_ib_optional_counter_type { + MLX5_IB_OPCOUNTER_CC_RX_CE_PKTS, + MLX5_IB_OPCOUNTER_CC_RX_CNP_PKTS, + MLX5_IB_OPCOUNTER_CC_TX_CNP_PKTS, + + MLX5_IB_OPCOUNTER_MAX, +}; + struct mlx5_ib_flow_db { struct mlx5_ib_flow_prio prios[MLX5_IB_NUM_FLOW_FT]; struct mlx5_ib_flow_prio egress_prios[MLX5_IB_NUM_FLOW_FT]; struct mlx5_ib_flow_prio sniffer[MLX5_IB_NUM_SNIFFER_FTS]; struct mlx5_ib_flow_prio egress[MLX5_IB_NUM_EGRESS_FTS]; - struct mlx5_ib_flow_prio fdb; + struct mlx5_ib_flow_prio fdb[MLX5_IB_NUM_FDB_FTS]; struct mlx5_ib_flow_prio rdma_rx[MLX5_IB_NUM_FLOW_FT]; struct mlx5_ib_flow_prio rdma_tx[MLX5_IB_NUM_FLOW_FT]; + struct mlx5_ib_flow_prio opfcs[MLX5_IB_OPCOUNTER_MAX]; struct mlx5_flow_table *lag_demux_ft; /* Protect flow steering bypass flow tables * when add/del flow rules. @@ -619,6 +629,20 @@ struct mlx5_user_mmap_entry { u32 page_idx; }; +enum mlx5_mkey_type { + MLX5_MKEY_MR = 1, + MLX5_MKEY_MW, + MLX5_MKEY_INDIRECT_DEVX, +}; + +struct mlx5_ib_mkey { + u32 key; + enum mlx5_mkey_type type; + unsigned int ndescs; + struct wait_queue_head wait; + refcount_t usecount; +}; + #define MLX5_IB_MTT_PRESENT (MLX5_IB_MTT_READ | MLX5_IB_MTT_WRITE) #define MLX5_IB_DM_MEMIC_ALLOWED_ACCESS (IB_ACCESS_LOCAL_WRITE |\ @@ -637,13 +661,13 @@ struct mlx5_user_mmap_entry { struct mlx5_ib_mr { struct ib_mr ibmr; - struct mlx5_core_mkey mmkey; + struct mlx5_ib_mkey mmkey; /* User MR data */ struct mlx5_cache_ent *cache_ent; + /* Everything after cache_ent is zero'd when MR allocated */ struct ib_umem *umem; - /* This is zero'd when the MR is allocated */ union { /* Used only while the MR is in the cache */ struct { @@ -659,7 +683,6 @@ struct mlx5_ib_mr { void *descs_alloc; dma_addr_t desc_map; int max_descs; - int ndescs; int desc_size; int access_mode; @@ -696,7 +719,7 @@ struct mlx5_ib_mr { /* Zero the fields in the mr that are variant depending on usage */ static inline void mlx5_clear_mr(struct mlx5_ib_mr *mr) { - memset(mr->out, 0, sizeof(*mr) - offsetof(struct mlx5_ib_mr, out)); + memset_after(mr, 0, cache_ent); } static inline bool is_odp_mr(struct mlx5_ib_mr *mr) @@ -713,13 +736,7 @@ static inline bool is_dmabuf_mr(struct mlx5_ib_mr *mr) struct mlx5_ib_mw { struct ib_mw ibmw; - struct mlx5_core_mkey mmkey; - int ndescs; -}; - -struct mlx5_ib_devx_mr { - struct mlx5_core_mkey mmkey; - int ndescs; + struct mlx5_ib_mkey mmkey; }; struct mlx5_ib_umr_context { @@ -797,15 +814,32 @@ struct mlx5_ib_resources { struct mlx5_ib_port_resources ports[2]; }; +#define MAX_OPFC_RULES 2 + +struct mlx5_ib_op_fc { + struct mlx5_fc *fc; + struct mlx5_flow_handle *rule[MAX_OPFC_RULES]; +}; + struct mlx5_ib_counters { - const char **names; + struct rdma_stat_desc *descs; size_t *offsets; u32 num_q_counters; u32 num_cong_counters; u32 num_ext_ppcnt_counters; + u32 num_op_counters; u16 set_id; + struct mlx5_ib_op_fc opfcs[MLX5_IB_OPCOUNTER_MAX]; }; +int mlx5_ib_fs_add_op_fc(struct mlx5_ib_dev *dev, u32 port_num, + struct mlx5_ib_op_fc *opfc, + enum mlx5_ib_optional_counter_type type); + +void mlx5_ib_fs_remove_op_fc(struct mlx5_ib_dev *dev, + struct mlx5_ib_op_fc *opfc, + enum mlx5_ib_optional_counter_type type); + struct mlx5_ib_multiport_info; struct mlx5_ib_multiport { @@ -1432,14 +1466,6 @@ extern const struct uapi_definition mlx5_ib_flow_defs[]; extern const struct uapi_definition mlx5_ib_qos_defs[]; extern const struct uapi_definition mlx5_ib_std_types_defs[]; -static inline void init_query_mad(struct ib_smp *mad) -{ - mad->base_version = 1; - mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; - mad->class_version = 1; - mad->method = IB_MGMT_METHOD_GET; -} - static inline int is_qp1(enum ib_qp_type qp_type) { return qp_type == MLX5_IB_QPT_HW_GSI || qp_type == IB_QPT_GSI; @@ -1579,7 +1605,7 @@ static inline bool mlx5_ib_can_reconfig_with_umr(struct mlx5_ib_dev *dev, } static inline int mlx5r_store_odp_mkey(struct mlx5_ib_dev *dev, - struct mlx5_core_mkey *mmkey) + struct mlx5_ib_mkey *mmkey) { refcount_set(&mmkey->usecount, 1); @@ -1588,14 +1614,14 @@ static inline int mlx5r_store_odp_mkey(struct mlx5_ib_dev *dev, } /* deref an mkey that can participate in ODP flow */ -static inline void mlx5r_deref_odp_mkey(struct mlx5_core_mkey *mmkey) +static inline void mlx5r_deref_odp_mkey(struct mlx5_ib_mkey *mmkey) { if (refcount_dec_and_test(&mmkey->usecount)) wake_up(&mmkey->wait); } /* deref an mkey that can participate in ODP flow and wait for relese */ -static inline void mlx5r_deref_wait_odp_mkey(struct mlx5_core_mkey *mmkey) +static inline void mlx5r_deref_wait_odp_mkey(struct mlx5_ib_mkey *mmkey) { mlx5r_deref_odp_mkey(mmkey); wait_event(mmkey->wait, refcount_read(&mmkey->usecount) == 0); diff --git a/drivers/infiniband/hw/mlx5/mr.c b/drivers/infiniband/hw/mlx5/mr.c index 22e2f4d79743..157d862fb864 100644 --- a/drivers/infiniband/hw/mlx5/mr.c +++ b/drivers/infiniband/hw/mlx5/mr.c @@ -88,9 +88,8 @@ static void set_mkc_access_pd_addr_fields(void *mkc, int acc, u64 start_addr, MLX5_SET64(mkc, mkc, start_addr, start_addr); } -static void -assign_mkey_variant(struct mlx5_ib_dev *dev, struct mlx5_core_mkey *mkey, - u32 *in) +static void assign_mkey_variant(struct mlx5_ib_dev *dev, + struct mlx5_ib_mkey *mkey, u32 *in) { u8 key = atomic_inc_return(&dev->mkey_var); void *mkc; @@ -100,17 +99,22 @@ assign_mkey_variant(struct mlx5_ib_dev *dev, struct mlx5_core_mkey *mkey, mkey->key = key; } -static int -mlx5_ib_create_mkey(struct mlx5_ib_dev *dev, struct mlx5_core_mkey *mkey, - u32 *in, int inlen) +static int mlx5_ib_create_mkey(struct mlx5_ib_dev *dev, + struct mlx5_ib_mkey *mkey, u32 *in, int inlen) { + int ret; + assign_mkey_variant(dev, mkey, in); - return mlx5_core_create_mkey(dev->mdev, mkey, in, inlen); + ret = mlx5_core_create_mkey(dev->mdev, &mkey->key, in, inlen); + if (!ret) + init_waitqueue_head(&mkey->wait); + + return ret; } static int mlx5_ib_create_mkey_cb(struct mlx5_ib_dev *dev, - struct mlx5_core_mkey *mkey, + struct mlx5_ib_mkey *mkey, struct mlx5_async_ctx *async_ctx, u32 *in, int inlen, u32 *out, int outlen, struct mlx5_async_work *context) @@ -133,7 +137,7 @@ static int destroy_mkey(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr) { WARN_ON(xa_load(&dev->odp_mkeys, mlx5_base_mkey(mr->mmkey.key))); - return mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey); + return mlx5_core_destroy_mkey(dev->mdev, mr->mmkey.key); } static void create_mkey_callback(int status, struct mlx5_async_work *context) @@ -260,10 +264,11 @@ static struct mlx5_ib_mr *create_cache_mr(struct mlx5_cache_ent *ent) goto free_in; } - err = mlx5_core_create_mkey(ent->dev->mdev, &mr->mmkey, in, inlen); + err = mlx5_core_create_mkey(ent->dev->mdev, &mr->mmkey.key, in, inlen); if (err) goto free_mr; + init_waitqueue_head(&mr->mmkey.wait); mr->mmkey.type = MLX5_MKEY_MR; WRITE_ONCE(ent->dev->cache.last_add, jiffies); spin_lock_irq(&ent->lock); @@ -290,7 +295,7 @@ static void remove_cache_mr_locked(struct mlx5_cache_ent *ent) ent->available_mrs--; ent->total_mrs--; spin_unlock_irq(&ent->lock); - mlx5_core_destroy_mkey(ent->dev->mdev, &mr->mmkey); + mlx5_core_destroy_mkey(ent->dev->mdev, mr->mmkey.key); kfree(mr); spin_lock_irq(&ent->lock); } @@ -600,29 +605,21 @@ struct mlx5_ib_mr *mlx5_mr_cache_alloc(struct mlx5_ib_dev *dev, /* Return a MR already available in the cache */ static struct mlx5_ib_mr *get_cache_mr(struct mlx5_cache_ent *req_ent) { - struct mlx5_ib_dev *dev = req_ent->dev; struct mlx5_ib_mr *mr = NULL; struct mlx5_cache_ent *ent = req_ent; - /* Try larger MR pools from the cache to satisfy the allocation */ - for (; ent != &dev->cache.ent[MR_CACHE_LAST_STD_ENTRY + 1]; ent++) { - mlx5_ib_dbg(dev, "order %u, cache index %zu\n", ent->order, - ent - dev->cache.ent); - - spin_lock_irq(&ent->lock); - if (!list_empty(&ent->head)) { - mr = list_first_entry(&ent->head, struct mlx5_ib_mr, - list); - list_del(&mr->list); - ent->available_mrs--; - queue_adjust_cache_locked(ent); - spin_unlock_irq(&ent->lock); - mlx5_clear_mr(mr); - return mr; - } + spin_lock_irq(&ent->lock); + if (!list_empty(&ent->head)) { + mr = list_first_entry(&ent->head, struct mlx5_ib_mr, list); + list_del(&mr->list); + ent->available_mrs--; queue_adjust_cache_locked(ent); spin_unlock_irq(&ent->lock); + mlx5_clear_mr(mr); + return mr; } + queue_adjust_cache_locked(ent); + spin_unlock_irq(&ent->lock); req_ent->miss++; return NULL; } @@ -658,7 +655,7 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c) ent->available_mrs--; ent->total_mrs--; spin_unlock_irq(&ent->lock); - mlx5_core_destroy_mkey(dev->mdev, &mr->mmkey); + mlx5_core_destroy_mkey(dev->mdev, mr->mmkey.key); } list_for_each_entry_safe(mr, tmp_mr, &del_list, list) { @@ -911,12 +908,13 @@ static struct mlx5_cache_ent *mr_cache_ent_from_order(struct mlx5_ib_dev *dev, } static void set_mr_fields(struct mlx5_ib_dev *dev, struct mlx5_ib_mr *mr, - u64 length, int access_flags) + u64 length, int access_flags, u64 iova) { mr->ibmr.lkey = mr->mmkey.key; mr->ibmr.rkey = mr->mmkey.key; mr->ibmr.length = length; mr->ibmr.device = &dev->ib_dev; + mr->ibmr.iova = iova; mr->access_flags = access_flags; } @@ -974,11 +972,8 @@ static struct mlx5_ib_mr *alloc_cacheable_mr(struct ib_pd *pd, mr->ibmr.pd = pd; mr->umem = umem; - mr->mmkey.iova = iova; - mr->mmkey.size = umem->length; - mr->mmkey.pd = to_mpd(pd)->pdn; mr->page_shift = order_base_2(page_size); - set_mr_fields(dev, mr, umem->length, access_flags); + set_mr_fields(dev, mr, umem->length, access_flags, iova); return mr; } @@ -1087,8 +1082,8 @@ static void *mlx5_ib_create_xlt_wr(struct mlx5_ib_mr *mr, wr->wr.opcode = MLX5_IB_WR_UMR; wr->pd = mr->ibmr.pd; wr->mkey = mr->mmkey.key; - wr->length = mr->mmkey.size; - wr->virt_addr = mr->mmkey.iova; + wr->length = mr->ibmr.length; + wr->virt_addr = mr->ibmr.iova; wr->access_flags = mr->access_flags; wr->page_shift = mr->page_shift; wr->xlt_size = sg->length; @@ -1340,7 +1335,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem, } mr->mmkey.type = MLX5_MKEY_MR; mr->umem = umem; - set_mr_fields(dev, mr, umem->length, access_flags); + set_mr_fields(dev, mr, umem->length, access_flags, iova); kvfree(in); mlx5_ib_dbg(dev, "mkey = 0x%x\n", mr->mmkey.key); @@ -1387,7 +1382,7 @@ static struct ib_mr *mlx5_ib_get_dm_mr(struct ib_pd *pd, u64 start_addr, kfree(in); - set_mr_fields(dev, mr, length, acc); + set_mr_fields(dev, mr, length, acc, start_addr); return &mr->ibmr; @@ -1709,7 +1704,6 @@ static int umr_rereg_pd_access(struct mlx5_ib_mr *mr, struct ib_pd *pd, return err; mr->access_flags = access_flags; - mr->mmkey.pd = to_mpd(pd)->pdn; return 0; } @@ -1754,7 +1748,6 @@ static int umr_rereg_pas(struct mlx5_ib_mr *mr, struct ib_pd *pd, if (flags & IB_MR_REREG_PD) { mr->ibmr.pd = pd; - mr->mmkey.pd = to_mpd(pd)->pdn; upd_flags |= MLX5_IB_UPD_XLT_PD; } if (flags & IB_MR_REREG_ACCESS) { @@ -1763,8 +1756,8 @@ static int umr_rereg_pas(struct mlx5_ib_mr *mr, struct ib_pd *pd, } mr->ibmr.length = new_umem->length; - mr->mmkey.iova = iova; - mr->mmkey.size = new_umem->length; + mr->ibmr.iova = iova; + mr->ibmr.length = new_umem->length; mr->page_shift = order_base_2(page_size); mr->umem = new_umem; err = mlx5_ib_update_mr_pas(mr, upd_flags); @@ -1834,7 +1827,7 @@ struct ib_mr *mlx5_ib_rereg_user_mr(struct ib_mr *ib_mr, int flags, u64 start, mr->umem = NULL; atomic_sub(ib_umem_num_pages(umem), &dev->mdev->priv.reg_pages); - return create_real_mr(new_pd, umem, mr->mmkey.iova, + return create_real_mr(new_pd, umem, mr->ibmr.iova, new_access_flags); } @@ -2263,9 +2256,9 @@ int mlx5_ib_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata) struct mlx5_ib_dev *dev = to_mdev(ibmw->device); int inlen = MLX5_ST_SZ_BYTES(create_mkey_in); struct mlx5_ib_mw *mw = to_mmw(ibmw); + unsigned int ndescs; u32 *in = NULL; void *mkc; - int ndescs; int err; struct mlx5_ib_alloc_mw req = {}; struct { @@ -2310,7 +2303,7 @@ int mlx5_ib_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata) mw->mmkey.type = MLX5_MKEY_MW; ibmw->rkey = mw->mmkey.key; - mw->ndescs = ndescs; + mw->mmkey.ndescs = ndescs; resp.response_length = min(offsetofend(typeof(resp), response_length), udata->outlen); @@ -2330,7 +2323,7 @@ int mlx5_ib_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata) return 0; free_mkey: - mlx5_core_destroy_mkey(dev->mdev, &mw->mmkey); + mlx5_core_destroy_mkey(dev->mdev, mw->mmkey.key); free: kfree(in); return err; @@ -2349,7 +2342,7 @@ int mlx5_ib_dealloc_mw(struct ib_mw *mw) */ mlx5r_deref_wait_odp_mkey(&mmw->mmkey); - return mlx5_core_destroy_mkey(dev->mdev, &mmw->mmkey); + return mlx5_core_destroy_mkey(dev->mdev, mmw->mmkey.key); } int mlx5_ib_check_mr_status(struct ib_mr *ibmr, u32 check_mask, @@ -2406,7 +2399,7 @@ mlx5_ib_map_pa_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg, mr->meta_length = 0; if (data_sg_nents == 1) { n++; - mr->ndescs = 1; + mr->mmkey.ndescs = 1; if (data_sg_offset) sg_offset = *data_sg_offset; mr->data_length = sg_dma_len(data_sg) - sg_offset; @@ -2459,7 +2452,7 @@ mlx5_ib_sg_to_klms(struct mlx5_ib_mr *mr, if (sg_offset_p) *sg_offset_p = sg_offset; - mr->ndescs = i; + mr->mmkey.ndescs = i; mr->data_length = mr->ibmr.length; if (meta_sg_nents) { @@ -2492,11 +2485,11 @@ static int mlx5_set_page(struct ib_mr *ibmr, u64 addr) struct mlx5_ib_mr *mr = to_mmr(ibmr); __be64 *descs; - if (unlikely(mr->ndescs == mr->max_descs)) + if (unlikely(mr->mmkey.ndescs == mr->max_descs)) return -ENOMEM; descs = mr->descs; - descs[mr->ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR); + descs[mr->mmkey.ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR); return 0; } @@ -2506,11 +2499,11 @@ static int mlx5_set_page_pi(struct ib_mr *ibmr, u64 addr) struct mlx5_ib_mr *mr = to_mmr(ibmr); __be64 *descs; - if (unlikely(mr->ndescs + mr->meta_ndescs == mr->max_descs)) + if (unlikely(mr->mmkey.ndescs + mr->meta_ndescs == mr->max_descs)) return -ENOMEM; descs = mr->descs; - descs[mr->ndescs + mr->meta_ndescs++] = + descs[mr->mmkey.ndescs + mr->meta_ndescs++] = cpu_to_be64(addr | MLX5_EN_RD | MLX5_EN_WR); return 0; @@ -2526,7 +2519,7 @@ mlx5_ib_map_mtt_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg, struct mlx5_ib_mr *pi_mr = mr->mtt_mr; int n; - pi_mr->ndescs = 0; + pi_mr->mmkey.ndescs = 0; pi_mr->meta_ndescs = 0; pi_mr->meta_length = 0; @@ -2560,7 +2553,7 @@ mlx5_ib_map_mtt_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg, * metadata offset at the first metadata page */ pi_mr->pi_iova = (iova & page_mask) + - pi_mr->ndescs * ibmr->page_size + + pi_mr->mmkey.ndescs * ibmr->page_size + (pi_mr->ibmr.iova & ~page_mask); /* * In order to use one MTT MR for data and metadata, we register @@ -2591,7 +2584,7 @@ mlx5_ib_map_klm_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg, struct mlx5_ib_mr *pi_mr = mr->klm_mr; int n; - pi_mr->ndescs = 0; + pi_mr->mmkey.ndescs = 0; pi_mr->meta_ndescs = 0; pi_mr->meta_length = 0; @@ -2626,7 +2619,7 @@ int mlx5_ib_map_mr_sg_pi(struct ib_mr *ibmr, struct scatterlist *data_sg, WARN_ON(ibmr->type != IB_MR_TYPE_INTEGRITY); - mr->ndescs = 0; + mr->mmkey.ndescs = 0; mr->data_length = 0; mr->data_iova = 0; mr->meta_ndescs = 0; @@ -2682,7 +2675,7 @@ int mlx5_ib_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, struct mlx5_ib_mr *mr = to_mmr(ibmr); int n; - mr->ndescs = 0; + mr->mmkey.ndescs = 0; ib_dma_sync_single_for_cpu(ibmr->device, mr->desc_map, mr->desc_size * mr->max_descs, diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c index d0d98e584ebc..86842cd580ba 100644 --- a/drivers/infiniband/hw/mlx5/odp.c +++ b/drivers/infiniband/hw/mlx5/odp.c @@ -430,7 +430,7 @@ static struct mlx5_ib_mr *implicit_get_child_mr(struct mlx5_ib_mr *imr, mr->umem = &odp->umem; mr->ibmr.lkey = mr->mmkey.key; mr->ibmr.rkey = mr->mmkey.key; - mr->mmkey.iova = idx * MLX5_IMR_MTT_SIZE; + mr->ibmr.iova = idx * MLX5_IMR_MTT_SIZE; mr->parent = imr; odp->private = mr; @@ -500,7 +500,7 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd, } imr->ibmr.pd = &pd->ibpd; - imr->mmkey.iova = 0; + imr->ibmr.iova = 0; imr->umem = &umem_odp->umem; imr->ibmr.lkey = imr->mmkey.key; imr->ibmr.rkey = imr->mmkey.key; @@ -738,7 +738,7 @@ static int pagefault_mr(struct mlx5_ib_mr *mr, u64 io_virt, size_t bcnt, { struct ib_umem_odp *odp = to_ib_umem_odp(mr->umem); - if (unlikely(io_virt < mr->mmkey.iova)) + if (unlikely(io_virt < mr->ibmr.iova)) return -EFAULT; if (mr->umem->is_dmabuf) @@ -747,7 +747,7 @@ static int pagefault_mr(struct mlx5_ib_mr *mr, u64 io_virt, size_t bcnt, if (!odp->is_implicit_odp) { u64 user_va; - if (check_add_overflow(io_virt - mr->mmkey.iova, + if (check_add_overflow(io_virt - mr->ibmr.iova, (u64)odp->umem.address, &user_va)) return -EFAULT; if (unlikely(user_va >= ib_umem_end(odp) || @@ -788,7 +788,7 @@ struct pf_frame { int depth; }; -static bool mkey_is_eq(struct mlx5_core_mkey *mmkey, u32 key) +static bool mkey_is_eq(struct mlx5_ib_mkey *mmkey, u32 key) { if (!mmkey) return false; @@ -797,21 +797,6 @@ static bool mkey_is_eq(struct mlx5_core_mkey *mmkey, u32 key) return mmkey->key == key; } -static int get_indirect_num_descs(struct mlx5_core_mkey *mmkey) -{ - struct mlx5_ib_mw *mw; - struct mlx5_ib_devx_mr *devx_mr; - - if (mmkey->type == MLX5_MKEY_MW) { - mw = container_of(mmkey, struct mlx5_ib_mw, mmkey); - return mw->ndescs; - } - - devx_mr = container_of(mmkey, struct mlx5_ib_devx_mr, - mmkey); - return devx_mr->ndescs; -} - /* * Handle a single data segment in a page-fault WQE or RDMA region. * @@ -831,12 +816,11 @@ static int pagefault_single_data_segment(struct mlx5_ib_dev *dev, { int npages = 0, ret, i, outlen, cur_outlen = 0, depth = 0; struct pf_frame *head = NULL, *frame; - struct mlx5_core_mkey *mmkey; + struct mlx5_ib_mkey *mmkey; struct mlx5_ib_mr *mr; struct mlx5_klm *pklm; u32 *out = NULL; size_t offset; - int ndescs; io_virt += *bytes_committed; bcnt -= *bytes_committed; @@ -885,8 +869,6 @@ next_mr: case MLX5_MKEY_MW: case MLX5_MKEY_INDIRECT_DEVX: - ndescs = get_indirect_num_descs(mmkey); - if (depth >= MLX5_CAP_GEN(dev->mdev, max_indirection)) { mlx5_ib_dbg(dev, "indirection level exceeded\n"); ret = -EFAULT; @@ -894,7 +876,7 @@ next_mr: } outlen = MLX5_ST_SZ_BYTES(query_mkey_out) + - sizeof(*pklm) * (ndescs - 2); + sizeof(*pklm) * (mmkey->ndescs - 2); if (outlen > cur_outlen) { kfree(out); @@ -909,14 +891,14 @@ next_mr: pklm = (struct mlx5_klm *)MLX5_ADDR_OF(query_mkey_out, out, bsf0_klm0_pas_mtt0_1); - ret = mlx5_core_query_mkey(dev->mdev, mmkey, out, outlen); + ret = mlx5_core_query_mkey(dev->mdev, mmkey->key, out, outlen); if (ret) goto end; offset = io_virt - MLX5_GET64(query_mkey_out, out, memory_key_mkey_entry.start_addr); - for (i = 0; bcnt && i < ndescs; i++, pklm++) { + for (i = 0; bcnt && i < mmkey->ndescs; i++, pklm++) { if (offset >= be32_to_cpu(pklm->bcount)) { offset -= be32_to_cpu(pklm->bcount); continue; @@ -1562,12 +1544,7 @@ int mlx5r_odp_create_eq(struct mlx5_ib_dev *dev, struct mlx5_ib_pf_eq *eq) .nent = MLX5_IB_NUM_PF_EQE, }; param.mask[0] = 1ull << MLX5_EVENT_TYPE_PAGE_FAULT; - if (!zalloc_cpumask_var(¶m.affinity, GFP_KERNEL)) { - err = -ENOMEM; - goto err_wq; - } eq->core = mlx5_eq_create_generic(dev->mdev, ¶m); - free_cpumask_var(param.affinity); if (IS_ERR(eq->core)) { err = PTR_ERR(eq->core); goto err_wq; @@ -1703,25 +1680,31 @@ get_prefetchable_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice, u32 lkey) { struct mlx5_ib_dev *dev = to_mdev(pd->device); - struct mlx5_core_mkey *mmkey; struct mlx5_ib_mr *mr = NULL; + struct mlx5_ib_mkey *mmkey; xa_lock(&dev->odp_mkeys); mmkey = xa_load(&dev->odp_mkeys, mlx5_base_mkey(lkey)); - if (!mmkey || mmkey->key != lkey || mmkey->type != MLX5_MKEY_MR) + if (!mmkey || mmkey->key != lkey) { + mr = ERR_PTR(-ENOENT); + goto end; + } + if (mmkey->type != MLX5_MKEY_MR) { + mr = ERR_PTR(-EINVAL); goto end; + } mr = container_of(mmkey, struct mlx5_ib_mr, mmkey); if (mr->ibmr.pd != pd) { - mr = NULL; + mr = ERR_PTR(-EPERM); goto end; } /* prefetch with write-access must be supported by the MR */ if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE && !mr->umem->writable) { - mr = NULL; + mr = ERR_PTR(-EPERM); goto end; } @@ -1753,7 +1736,7 @@ static void mlx5_ib_prefetch_mr_work(struct work_struct *w) destroy_prefetch_work(work); } -static bool init_prefetch_work(struct ib_pd *pd, +static int init_prefetch_work(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice, u32 pf_flags, struct prefetch_mr_work *work, struct ib_sge *sg_list, u32 num_sge) @@ -1764,17 +1747,19 @@ static bool init_prefetch_work(struct ib_pd *pd, work->pf_flags = pf_flags; for (i = 0; i < num_sge; ++i) { - work->frags[i].io_virt = sg_list[i].addr; - work->frags[i].length = sg_list[i].length; - work->frags[i].mr = - get_prefetchable_mr(pd, advice, sg_list[i].lkey); - if (!work->frags[i].mr) { + struct mlx5_ib_mr *mr; + + mr = get_prefetchable_mr(pd, advice, sg_list[i].lkey); + if (IS_ERR(mr)) { work->num_sge = i; - return false; + return PTR_ERR(mr); } + work->frags[i].io_virt = sg_list[i].addr; + work->frags[i].length = sg_list[i].length; + work->frags[i].mr = mr; } work->num_sge = num_sge; - return true; + return 0; } static int mlx5_ib_prefetch_sg_list(struct ib_pd *pd, @@ -1790,8 +1775,8 @@ static int mlx5_ib_prefetch_sg_list(struct ib_pd *pd, struct mlx5_ib_mr *mr; mr = get_prefetchable_mr(pd, advice, sg_list[i].lkey); - if (!mr) - return -ENOENT; + if (IS_ERR(mr)) + return PTR_ERR(mr); ret = pagefault_mr(mr, sg_list[i].addr, sg_list[i].length, &bytes_mapped, pf_flags); if (ret < 0) { @@ -1811,6 +1796,7 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd, { u32 pf_flags = 0; struct prefetch_mr_work *work; + int rc; if (advice == IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH) pf_flags |= MLX5_PF_FLAGS_DOWNGRADE; @@ -1826,9 +1812,10 @@ int mlx5_ib_advise_mr_prefetch(struct ib_pd *pd, if (!work) return -ENOMEM; - if (!init_prefetch_work(pd, advice, pf_flags, work, sg_list, num_sge)) { + rc = init_prefetch_work(pd, advice, pf_flags, work, sg_list, num_sge); + if (rc) { destroy_prefetch_work(work); - return -EINVAL; + return rc; } queue_work(system_unbound_wq, &work->work); return 0; diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c index e5abbcfc1d57..29475cf8c7c3 100644 --- a/drivers/infiniband/hw/mlx5/qp.c +++ b/drivers/infiniband/hw/mlx5/qp.c @@ -30,6 +30,7 @@ * SOFTWARE. */ +#include <linux/etherdevice.h> #include <linux/module.h> #include <rdma/ib_umem.h> #include <rdma/ib_cache.h> diff --git a/drivers/infiniband/hw/mlx5/wr.c b/drivers/infiniband/hw/mlx5/wr.c index 8841620af82f..51e48ca9016e 100644 --- a/drivers/infiniband/hw/mlx5/wr.c +++ b/drivers/infiniband/hw/mlx5/wr.c @@ -217,7 +217,7 @@ static __be64 sig_mkey_mask(void) static void set_reg_umr_seg(struct mlx5_wqe_umr_ctrl_seg *umr, struct mlx5_ib_mr *mr, u8 flags, bool atomic) { - int size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size; + int size = (mr->mmkey.ndescs + mr->meta_ndescs) * mr->desc_size; memset(umr, 0, sizeof(*umr)); @@ -374,7 +374,7 @@ static void set_reg_mkey_seg(struct mlx5_mkey_seg *seg, struct mlx5_ib_mr *mr, u32 key, int access) { - int ndescs = ALIGN(mr->ndescs + mr->meta_ndescs, 8) >> 1; + int ndescs = ALIGN(mr->mmkey.ndescs + mr->meta_ndescs, 8) >> 1; memset(seg, 0, sizeof(*seg)); @@ -439,7 +439,7 @@ static void set_reg_data_seg(struct mlx5_wqe_data_seg *dseg, struct mlx5_ib_mr *mr, struct mlx5_ib_pd *pd) { - int bcount = mr->desc_size * (mr->ndescs + mr->meta_ndescs); + int bcount = mr->desc_size * (mr->mmkey.ndescs + mr->meta_ndescs); dseg->addr = cpu_to_be64(mr->desc_map); dseg->byte_count = cpu_to_be32(ALIGN(bcount, 64)); @@ -861,7 +861,7 @@ static int set_reg_wr(struct mlx5_ib_qp *qp, struct mlx5_ib_mr *mr = to_mmr(wr->mr); struct mlx5_ib_pd *pd = to_mpd(qp->ibqp.pd); struct mlx5_ib_dev *dev = to_mdev(pd->ibpd.device); - int mr_list_size = (mr->ndescs + mr->meta_ndescs) * mr->desc_size; + int mr_list_size = (mr->mmkey.ndescs + mr->meta_ndescs) * mr->desc_size; bool umr_inline = mr_list_size <= MLX5_IB_SQ_UMR_INLINE_THRESHOLD; bool atomic = wr->access & IB_ACCESS_REMOTE_ATOMIC; u8 flags = 0; @@ -1111,7 +1111,7 @@ static int handle_reg_mr_integrity(struct mlx5_ib_dev *dev, memset(&pa_pi_mr, 0, sizeof(struct mlx5_ib_mr)); /* No UMR, use local_dma_lkey */ pa_pi_mr.ibmr.lkey = mr->ibmr.pd->local_dma_lkey; - pa_pi_mr.ndescs = mr->ndescs; + pa_pi_mr.mmkey.ndescs = mr->mmkey.ndescs; pa_pi_mr.data_length = mr->data_length; pa_pi_mr.data_iova = mr->data_iova; if (mr->meta_ndescs) { diff --git a/drivers/infiniband/hw/mthca/mthca_allocator.c b/drivers/infiniband/hw/mthca/mthca_allocator.c index aef1d274a14e..9f0f79d02d3c 100644 --- a/drivers/infiniband/hw/mthca/mthca_allocator.c +++ b/drivers/infiniband/hw/mthca/mthca_allocator.c @@ -51,7 +51,7 @@ u32 mthca_alloc(struct mthca_alloc *alloc) } if (obj < alloc->max) { - set_bit(obj, alloc->table); + __set_bit(obj, alloc->table); obj |= alloc->top; } else obj = -1; @@ -69,7 +69,7 @@ void mthca_free(struct mthca_alloc *alloc, u32 obj) spin_lock_irqsave(&alloc->lock, flags); - clear_bit(obj, alloc->table); + __clear_bit(obj, alloc->table); alloc->last = min(alloc->last, obj); alloc->top = (alloc->top + alloc->max) & alloc->mask; @@ -79,8 +79,6 @@ void mthca_free(struct mthca_alloc *alloc, u32 obj) int mthca_alloc_init(struct mthca_alloc *alloc, u32 num, u32 mask, u32 reserved) { - int i; - /* num must be a power of 2 */ if (num != 1 << (ffs(num) - 1)) return -EINVAL; @@ -90,21 +88,18 @@ int mthca_alloc_init(struct mthca_alloc *alloc, u32 num, u32 mask, alloc->max = num; alloc->mask = mask; spin_lock_init(&alloc->lock); - alloc->table = kmalloc_array(BITS_TO_LONGS(num), sizeof(long), - GFP_KERNEL); + alloc->table = bitmap_zalloc(num, GFP_KERNEL); if (!alloc->table) return -ENOMEM; - bitmap_zero(alloc->table, num); - for (i = 0; i < reserved; ++i) - set_bit(i, alloc->table); + bitmap_set(alloc->table, 0, reserved); return 0; } void mthca_alloc_cleanup(struct mthca_alloc *alloc) { - kfree(alloc->table); + bitmap_free(alloc->table); } /* diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c index ce0e0867e488..192f83fd7c8a 100644 --- a/drivers/infiniband/hw/mthca/mthca_mr.c +++ b/drivers/infiniband/hw/mthca/mthca_mr.c @@ -101,13 +101,13 @@ static u32 mthca_buddy_alloc(struct mthca_buddy *buddy, int order) return -1; found: - clear_bit(seg, buddy->bits[o]); + __clear_bit(seg, buddy->bits[o]); --buddy->num_free[o]; while (o > order) { --o; seg <<= 1; - set_bit(seg ^ 1, buddy->bits[o]); + __set_bit(seg ^ 1, buddy->bits[o]); ++buddy->num_free[o]; } @@ -125,13 +125,13 @@ static void mthca_buddy_free(struct mthca_buddy *buddy, u32 seg, int order) spin_lock(&buddy->lock); while (test_bit(seg ^ 1, buddy->bits[order])) { - clear_bit(seg ^ 1, buddy->bits[order]); + __clear_bit(seg ^ 1, buddy->bits[order]); --buddy->num_free[order]; seg >>= 1; ++order; } - set_bit(seg, buddy->bits[order]); + __set_bit(seg, buddy->bits[order]); ++buddy->num_free[order]; spin_unlock(&buddy->lock); @@ -139,7 +139,7 @@ static void mthca_buddy_free(struct mthca_buddy *buddy, u32 seg, int order) static int mthca_buddy_init(struct mthca_buddy *buddy, int max_order) { - int i, s; + int i; buddy->max_order = max_order; spin_lock_init(&buddy->lock); @@ -152,22 +152,20 @@ static int mthca_buddy_init(struct mthca_buddy *buddy, int max_order) goto err_out; for (i = 0; i <= buddy->max_order; ++i) { - s = BITS_TO_LONGS(1 << (buddy->max_order - i)); - buddy->bits[i] = kmalloc_array(s, sizeof(long), GFP_KERNEL); + buddy->bits[i] = bitmap_zalloc(1 << (buddy->max_order - i), + GFP_KERNEL); if (!buddy->bits[i]) goto err_out_free; - bitmap_zero(buddy->bits[i], - 1 << (buddy->max_order - i)); } - set_bit(0, buddy->bits[buddy->max_order]); + __set_bit(0, buddy->bits[buddy->max_order]); buddy->num_free[buddy->max_order] = 1; return 0; err_out_free: for (i = 0; i <= buddy->max_order; ++i) - kfree(buddy->bits[i]); + bitmap_free(buddy->bits[i]); err_out: kfree(buddy->bits); @@ -181,7 +179,7 @@ static void mthca_buddy_cleanup(struct mthca_buddy *buddy) int i; for (i = 0; i <= buddy->max_order; ++i) - kfree(buddy->bits[i]); + bitmap_free(buddy->bits[i]); kfree(buddy->bits); kfree(buddy->num_free); @@ -469,8 +467,7 @@ int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift, mpt_entry->start = cpu_to_be64(iova); mpt_entry->length = cpu_to_be64(total_size); - memset(&mpt_entry->lkey, 0, - sizeof *mpt_entry - offsetof(struct mthca_mpt_entry, lkey)); + memset_startat(mpt_entry, 0, lkey); if (mr->mtt) mpt_entry->mtt_seg = diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c index ceee23ebc0f2..c46df53f26cf 100644 --- a/drivers/infiniband/hw/mthca/mthca_provider.c +++ b/drivers/infiniband/hw/mthca/mthca_provider.c @@ -50,14 +50,6 @@ #include <rdma/mthca-abi.h> #include "mthca_memfree.h" -static void init_query_mad(struct ib_smp *mad) -{ - mad->base_version = 1; - mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; - mad->class_version = 1; - mad->method = IB_MGMT_METHOD_GET; -} - static int mthca_query_device(struct ib_device *ibdev, struct ib_device_attr *props, struct ib_udata *uhw) { @@ -78,7 +70,7 @@ static int mthca_query_device(struct ib_device *ibdev, struct ib_device_attr *pr props->fw_ver = mdev->fw_ver; - init_query_mad(in_mad); + ib_init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; err = mthca_MAD_IFC(mdev, 1, 1, @@ -140,7 +132,7 @@ static int mthca_query_port(struct ib_device *ibdev, /* props being zeroed by the caller, avoid zeroing it here */ - init_query_mad(in_mad); + ib_init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; in_mad->attr_mod = cpu_to_be32(port); @@ -234,7 +226,7 @@ static int mthca_query_pkey(struct ib_device *ibdev, if (!in_mad || !out_mad) goto out; - init_query_mad(in_mad); + ib_init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE; in_mad->attr_mod = cpu_to_be32(index / 32); @@ -263,7 +255,7 @@ static int mthca_query_gid(struct ib_device *ibdev, u32 port, if (!in_mad || !out_mad) goto out; - init_query_mad(in_mad); + ib_init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; in_mad->attr_mod = cpu_to_be32(port); @@ -274,7 +266,7 @@ static int mthca_query_gid(struct ib_device *ibdev, u32 port, memcpy(gid->raw, out_mad->data + 8, 8); - init_query_mad(in_mad); + ib_init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_GUID_INFO; in_mad->attr_mod = cpu_to_be32(index / 8); @@ -1006,7 +998,7 @@ static int mthca_init_node_data(struct mthca_dev *dev) if (!in_mad || !out_mad) goto out; - init_query_mad(in_mad); + ib_init_query_mad(in_mad); in_mad->attr_id = IB_SMP_ATTR_NODE_DESC; err = mthca_MAD_IFC(dev, 1, 1, diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c index c51c3f40700e..265a581133dc 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c @@ -1506,7 +1506,6 @@ int ocrdma_mbx_dealloc_pd(struct ocrdma_dev *dev, struct ocrdma_pd *pd) static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev *dev) { int status = -ENOMEM; - size_t pd_bitmap_size; struct ocrdma_alloc_pd_range *cmd; struct ocrdma_alloc_pd_range_rsp *rsp; @@ -1528,10 +1527,8 @@ static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev *dev) dev->pd_mgr->pd_dpp_start = rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK; dev->pd_mgr->max_dpp_pd = rsp->pd_count; - pd_bitmap_size = - BITS_TO_LONGS(rsp->pd_count) * sizeof(long); - dev->pd_mgr->pd_dpp_bitmap = kzalloc(pd_bitmap_size, - GFP_KERNEL); + dev->pd_mgr->pd_dpp_bitmap = bitmap_zalloc(rsp->pd_count, + GFP_KERNEL); } kfree(cmd); } @@ -1547,9 +1544,8 @@ static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev *dev) dev->pd_mgr->pd_norm_start = rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK; dev->pd_mgr->max_normal_pd = rsp->pd_count; - pd_bitmap_size = BITS_TO_LONGS(rsp->pd_count) * sizeof(long); - dev->pd_mgr->pd_norm_bitmap = kzalloc(pd_bitmap_size, - GFP_KERNEL); + dev->pd_mgr->pd_norm_bitmap = bitmap_zalloc(rsp->pd_count, + GFP_KERNEL); } kfree(cmd); @@ -1611,8 +1607,8 @@ void ocrdma_alloc_pd_pool(struct ocrdma_dev *dev) static void ocrdma_free_pd_pool(struct ocrdma_dev *dev) { ocrdma_mbx_dealloc_pd_range(dev); - kfree(dev->pd_mgr->pd_norm_bitmap); - kfree(dev->pd_mgr->pd_dpp_bitmap); + bitmap_free(dev->pd_mgr->pd_norm_bitmap); + bitmap_free(dev->pd_mgr->pd_dpp_bitmap); kfree(dev->pd_mgr); } diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c index 7abf6cf1e937..5d4b3bc16493 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c @@ -62,20 +62,6 @@ MODULE_DESCRIPTION(OCRDMA_ROCE_DRV_DESC " " OCRDMA_ROCE_DRV_VERSION); MODULE_AUTHOR("Emulex Corporation"); MODULE_LICENSE("Dual BSD/GPL"); -void ocrdma_get_guid(struct ocrdma_dev *dev, u8 *guid) -{ - u8 mac_addr[6]; - - memcpy(&mac_addr[0], &dev->nic_info.mac_addr[0], ETH_ALEN); - guid[0] = mac_addr[0] ^ 2; - guid[1] = mac_addr[1]; - guid[2] = mac_addr[2]; - guid[3] = 0xff; - guid[4] = 0xfe; - guid[5] = mac_addr[3]; - guid[6] = mac_addr[4]; - guid[7] = mac_addr[5]; -} static enum rdma_link_layer ocrdma_link_layer(struct ib_device *device, u32 port_num) { @@ -203,7 +189,8 @@ static int ocrdma_register_device(struct ocrdma_dev *dev) { int ret; - ocrdma_get_guid(dev, (u8 *)&dev->ibdev.node_guid); + addrconf_addr_eui48((u8 *)&dev->ibdev.node_guid, + dev->nic_info.mac_addr); BUILD_BUG_ON(sizeof(OCRDMA_NODE_DESC) > IB_DEVICE_NODE_DESC_MAX); memcpy(dev->ibdev.node_desc, OCRDMA_NODE_DESC, sizeof(OCRDMA_NODE_DESC)); diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c index 735123d0e9ec..acf9970ec245 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c @@ -41,6 +41,7 @@ */ #include <linux/dma-mapping.h> +#include <net/addrconf.h> #include <rdma/ib_verbs.h> #include <rdma/ib_user_verbs.h> #include <rdma/iw_cm.h> @@ -74,7 +75,8 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr, memset(attr, 0, sizeof *attr); memcpy(&attr->fw_ver, &dev->attr.fw_ver[0], min(sizeof(dev->attr.fw_ver), sizeof(attr->fw_ver))); - ocrdma_get_guid(dev, (u8 *)&attr->sys_image_guid); + addrconf_addr_eui48((u8 *)&attr->sys_image_guid, + dev->nic_info.mac_addr); attr->max_mr_size = dev->attr.max_mr_size; attr->page_size_cap = 0xffff000; attr->vendor_id = dev->nic_info.pdev->vendor; @@ -245,13 +247,13 @@ static bool ocrdma_search_mmap(struct ocrdma_ucontext *uctx, u64 phy_addr, static u16 _ocrdma_pd_mgr_get_bitmap(struct ocrdma_dev *dev, bool dpp_pool) { u16 pd_bitmap_idx = 0; - const unsigned long *pd_bitmap; + unsigned long *pd_bitmap; if (dpp_pool) { pd_bitmap = dev->pd_mgr->pd_dpp_bitmap; pd_bitmap_idx = find_first_zero_bit(pd_bitmap, dev->pd_mgr->max_dpp_pd); - __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_dpp_bitmap); + __set_bit(pd_bitmap_idx, pd_bitmap); dev->pd_mgr->pd_dpp_count++; if (dev->pd_mgr->pd_dpp_count > dev->pd_mgr->pd_dpp_thrsh) dev->pd_mgr->pd_dpp_thrsh = dev->pd_mgr->pd_dpp_count; @@ -259,7 +261,7 @@ static u16 _ocrdma_pd_mgr_get_bitmap(struct ocrdma_dev *dev, bool dpp_pool) pd_bitmap = dev->pd_mgr->pd_norm_bitmap; pd_bitmap_idx = find_first_zero_bit(pd_bitmap, dev->pd_mgr->max_normal_pd); - __set_bit(pd_bitmap_idx, dev->pd_mgr->pd_norm_bitmap); + __set_bit(pd_bitmap_idx, pd_bitmap); dev->pd_mgr->pd_norm_count++; if (dev->pd_mgr->pd_norm_count > dev->pd_mgr->pd_norm_thrsh) dev->pd_mgr->pd_norm_thrsh = dev->pd_mgr->pd_norm_count; @@ -1844,12 +1846,10 @@ int ocrdma_modify_srq(struct ib_srq *ibsrq, int ocrdma_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr) { - int status; struct ocrdma_srq *srq; srq = get_ocrdma_srq(ibsrq); - status = ocrdma_mbx_query_srq(srq, srq_attr); - return status; + return ocrdma_mbx_query_srq(srq, srq_attr); } int ocrdma_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata) @@ -1960,7 +1960,6 @@ static int ocrdma_build_inline_sges(struct ocrdma_qp *qp, static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, const struct ib_send_wr *wr) { - int status; struct ocrdma_sge *sge; u32 wqe_size = sizeof(*hdr); @@ -1972,8 +1971,7 @@ static int ocrdma_build_send(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, sge = (struct ocrdma_sge *)(hdr + 1); } - status = ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size); - return status; + return ocrdma_build_inline_sges(qp, hdr, sge, wr, wqe_size); } static int ocrdma_build_write(struct ocrdma_qp *qp, struct ocrdma_hdr_wqe *hdr, diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h index b73d742a520c..f860b7fcef33 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h +++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h @@ -59,7 +59,6 @@ int ocrdma_query_port(struct ib_device *ibdev, u32 port, enum rdma_protocol_type ocrdma_query_protocol(struct ib_device *device, u32 port_num); -void ocrdma_get_guid(struct ocrdma_dev *, u8 *guid); int ocrdma_query_pkey(struct ib_device *ibdev, u32 port, u16 index, u16 *pkey); int ocrdma_alloc_ucontext(struct ib_ucontext *uctx, struct ib_udata *udata); diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c index 755930be01b8..65ce6d0f1885 100644 --- a/drivers/infiniband/hw/qedr/main.c +++ b/drivers/infiniband/hw/qedr/main.c @@ -228,7 +228,6 @@ static const struct ib_device_ops qedr_dev_ops = { .query_srq = qedr_query_srq, .reg_user_mr = qedr_reg_user_mr, .req_notify_cq = qedr_arm_cq, - .resize_cq = qedr_resize_cq, INIT_RDMA_OBJ_SIZE(ib_ah, qedr_ah, ibah), INIT_RDMA_OBJ_SIZE(ib_cq, qedr_cq, ibcq), @@ -272,7 +271,7 @@ static int qedr_register_device(struct qedr_dev *dev) static int qedr_alloc_mem_sb(struct qedr_dev *dev, struct qed_sb_info *sb_info, u16 sb_id) { - struct status_block_e4 *sb_virt; + struct status_block *sb_virt; dma_addr_t sb_phys; int rc; diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c index dcb3653db72d..a53476653b0d 100644 --- a/drivers/infiniband/hw/qedr/verbs.c +++ b/drivers/infiniband/hw/qedr/verbs.c @@ -1052,16 +1052,6 @@ err0: return -EINVAL; } -int qedr_resize_cq(struct ib_cq *ibcq, int new_cnt, struct ib_udata *udata) -{ - struct qedr_dev *dev = get_qedr_dev(ibcq->device); - struct qedr_cq *cq = get_qedr_cq(ibcq); - - DP_ERR(dev, "cq %p RESIZE NOT SUPPORTED\n", cq); - - return 0; -} - #define QEDR_DESTROY_CQ_MAX_ITERATIONS (10) #define QEDR_DESTROY_CQ_ITER_DURATION (10) @@ -1941,6 +1931,7 @@ static int qedr_create_user_qp(struct qedr_dev *dev, /* db offset was calculated in copy_qp_uresp, now set in the user q */ if (qedr_qp_has_sq(qp)) { qp->usq.db_addr = ctx->dpi_addr + uresp.sq_db_offset; + qp->sq.max_wr = attrs->cap.max_send_wr; rc = qedr_db_recovery_add(dev, qp->usq.db_addr, &qp->usq.db_rec_data->db_data, DB_REC_WIDTH_32B, @@ -1951,6 +1942,7 @@ static int qedr_create_user_qp(struct qedr_dev *dev, if (qedr_qp_has_rq(qp)) { qp->urq.db_addr = ctx->dpi_addr + uresp.rq_db_offset; + qp->rq.max_wr = attrs->cap.max_recv_wr; rc = qedr_db_recovery_add(dev, qp->urq.db_addr, &qp->urq.db_rec_data->db_data, DB_REC_WIDTH_32B, @@ -2744,15 +2736,18 @@ int qedr_query_qp(struct ib_qp *ibqp, int rc = 0; memset(¶ms, 0, sizeof(params)); - - rc = dev->ops->rdma_query_qp(dev->rdma_ctx, qp->qed_qp, ¶ms); - if (rc) - goto err; - memset(qp_attr, 0, sizeof(*qp_attr)); memset(qp_init_attr, 0, sizeof(*qp_init_attr)); - qp_attr->qp_state = qedr_get_ibqp_state(params.state); + if (qp->qp_type != IB_QPT_GSI) { + rc = dev->ops->rdma_query_qp(dev->rdma_ctx, qp->qed_qp, ¶ms); + if (rc) + goto err; + qp_attr->qp_state = qedr_get_ibqp_state(params.state); + } else { + qp_attr->qp_state = qedr_get_ibqp_state(QED_ROCE_QP_STATE_RTS); + } + qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state); qp_attr->path_mtu = ib_mtu_int_to_enum(params.mtu); qp_attr->path_mig_state = IB_MIG_MIGRATED; diff --git a/drivers/infiniband/hw/qedr/verbs.h b/drivers/infiniband/hw/qedr/verbs.h index 031687dafc61..081753df79ef 100644 --- a/drivers/infiniband/hw/qedr/verbs.h +++ b/drivers/infiniband/hw/qedr/verbs.h @@ -53,7 +53,6 @@ int qedr_alloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata); int qedr_dealloc_xrcd(struct ib_xrcd *ibxrcd, struct ib_udata *udata); int qedr_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, struct ib_udata *udata); -int qedr_resize_cq(struct ib_cq *, int cqe, struct ib_udata *); int qedr_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata); int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags); int qedr_create_qp(struct ib_qp *qp, struct ib_qp_init_attr *attrs, diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h index 9363bccfc6e7..a8e1c30c370f 100644 --- a/drivers/infiniband/hw/qib/qib.h +++ b/drivers/infiniband/hw/qib/qib.h @@ -196,7 +196,7 @@ struct qib_ctxtdata { pid_t pid; pid_t subpid[QLOGIC_IB_MAX_SUBCTXT]; /* same size as task_struct .comm[], command that opened context */ - char comm[16]; + char comm[TASK_COMM_LEN]; /* pkeys set by this use of this ctxt */ u16 pkeys[4]; /* so file ops can get at unit */ diff --git a/drivers/infiniband/hw/qib/qib_driver.c b/drivers/infiniband/hw/qib/qib_driver.c index 84fc4dcc5399..bf3fa12fe935 100644 --- a/drivers/infiniband/hw/qib/qib_driver.c +++ b/drivers/infiniband/hw/qib/qib_driver.c @@ -1,4 +1,5 @@ /* + * Copyright (c) 2021 Cornelis Networks. All rights reserved. * Copyright (c) 2013 Intel Corporation. All rights reserved. * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. @@ -62,8 +63,8 @@ MODULE_PARM_DESC(compat_ddr_negotiate, "Attempt pre-IBTA 1.2 DDR speed negotiation"); MODULE_LICENSE("Dual BSD/GPL"); -MODULE_AUTHOR("Intel <ibsupport@intel.com>"); -MODULE_DESCRIPTION("Intel IB driver"); +MODULE_AUTHOR("Cornelis <support@cornelisnetworks.com>"); +MODULE_DESCRIPTION("Cornelis IB driver"); /* * QIB_PIO_MAXIBHDR is the max IB header size allowed for in our diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c index 63854f4b6524..aa290928cf96 100644 --- a/drivers/infiniband/hw/qib/qib_file_ops.c +++ b/drivers/infiniband/hw/qib/qib_file_ops.c @@ -1321,7 +1321,7 @@ static int setup_ctxt(struct qib_pportdata *ppd, int ctxt, rcd->tid_pg_list = ptmp; rcd->pid = current->pid; init_waitqueue_head(&dd->rcd[ctxt]->wait); - strlcpy(rcd->comm, current->comm, sizeof(rcd->comm)); + get_task_comm(rcd->comm, current); ctxt_fp(fp) = rcd; qib_stats.sps_ctxts++; dd->freectxts--; diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c index a9b83bc13f4a..aea571943768 100644 --- a/drivers/infiniband/hw/qib/qib_iba6120.c +++ b/drivers/infiniband/hw/qib/qib_iba6120.c @@ -3030,7 +3030,7 @@ static int qib_6120_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs) /* Does read/modify/write to appropriate registers to * set output and direction bits selected by mask. - * these are in their canonical postions (e.g. lsb of + * these are in their canonical positions (e.g. lsb of * dir will end up in D48 of extctrl on existing chips). * returns contents of GP Inputs. */ diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c index d1c0bc31869f..80a8dd6c7814 100644 --- a/drivers/infiniband/hw/qib/qib_iba7220.c +++ b/drivers/infiniband/hw/qib/qib_iba7220.c @@ -3742,7 +3742,7 @@ static int qib_7220_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs) /* * Does read/modify/write to appropriate registers to * set output and direction bits selected by mask. - * these are in their canonical postions (e.g. lsb of + * these are in their canonical positions (e.g. lsb of * dir will end up in D48 of extctrl on existing chips). * returns contents of GP Inputs. */ diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c index ab98b6a3ae1e..ceed302cf6a0 100644 --- a/drivers/infiniband/hw/qib/qib_iba7322.c +++ b/drivers/infiniband/hw/qib/qib_iba7322.c @@ -5665,7 +5665,7 @@ static int qib_7322_ib_updown(struct qib_pportdata *ppd, int ibup, u64 ibcs) /* * Does read/modify/write to appropriate registers to * set output and direction bits selected by mask. - * these are in their canonical postions (e.g. lsb of + * these are in their canonical positions (e.g. lsb of * dir will end up in D48 of extctrl on existing chips). * returns contents of GP Inputs. */ diff --git a/drivers/infiniband/hw/qib/qib_user_sdma.c b/drivers/infiniband/hw/qib/qib_user_sdma.c index ac11943a5ddb..bf2f30d67949 100644 --- a/drivers/infiniband/hw/qib/qib_user_sdma.c +++ b/drivers/infiniband/hw/qib/qib_user_sdma.c @@ -941,7 +941,7 @@ static int qib_user_sdma_queue_pkts(const struct qib_devdata *dd, &addrlimit) || addrlimit > type_max(typeof(pkt->addrlimit))) { ret = -EINVAL; - goto free_pbc; + goto free_pkt; } pkt->addrlimit = addrlimit; diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c index ef91bff5c23c..0080f0be72fe 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.c +++ b/drivers/infiniband/hw/qib/qib_verbs.c @@ -425,7 +425,7 @@ static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off) } #endif -static void copy_io(u32 __iomem *piobuf, struct rvt_sge_state *ss, +static void qib_copy_io(u32 __iomem *piobuf, struct rvt_sge_state *ss, u32 length, unsigned flush_wc) { u32 extra = 0; @@ -975,7 +975,7 @@ static int qib_verbs_send_pio(struct rvt_qp *qp, struct ib_header *ibhdr, qib_pio_copy(piobuf, addr, dwords); goto done; } - copy_io(piobuf, ss, len, flush_wc); + qib_copy_io(piobuf, ss, len, flush_wc); done: if (dd->flags & QIB_USE_SPCL_TRIG) { u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023; diff --git a/drivers/infiniband/hw/usnic/usnic_fwd.c b/drivers/infiniband/hw/usnic/usnic_fwd.c index 398c4c00b932..18a70850b738 100644 --- a/drivers/infiniband/hw/usnic/usnic_fwd.c +++ b/drivers/infiniband/hw/usnic/usnic_fwd.c @@ -103,7 +103,7 @@ void usnic_fwd_dev_free(struct usnic_fwd_dev *ufdev) kfree(ufdev); } -void usnic_fwd_set_mac(struct usnic_fwd_dev *ufdev, char mac[ETH_ALEN]) +void usnic_fwd_set_mac(struct usnic_fwd_dev *ufdev, const char mac[ETH_ALEN]) { spin_lock(&ufdev->lock); memcpy(&ufdev->mac, mac, sizeof(ufdev->mac)); diff --git a/drivers/infiniband/hw/usnic/usnic_fwd.h b/drivers/infiniband/hw/usnic/usnic_fwd.h index f0b71d593da5..a91200886922 100644 --- a/drivers/infiniband/hw/usnic/usnic_fwd.h +++ b/drivers/infiniband/hw/usnic/usnic_fwd.h @@ -74,7 +74,7 @@ struct usnic_filter_action { struct usnic_fwd_dev *usnic_fwd_dev_alloc(struct pci_dev *pdev); void usnic_fwd_dev_free(struct usnic_fwd_dev *ufdev); -void usnic_fwd_set_mac(struct usnic_fwd_dev *ufdev, char mac[ETH_ALEN]); +void usnic_fwd_set_mac(struct usnic_fwd_dev *ufdev, const char mac[ETH_ALEN]); void usnic_fwd_add_ipaddr(struct usnic_fwd_dev *ufdev, __be32 inaddr); void usnic_fwd_del_ipaddr(struct usnic_fwd_dev *ufdev); void usnic_fwd_carrier_up(struct usnic_fwd_dev *ufdev); diff --git a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c index 586b0e52ba7f..7d868f033bbf 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_sysfs.c @@ -243,10 +243,11 @@ static struct attribute *usnic_ib_qpn_default_attrs[] = { &qpn_attr_summary.attr, NULL }; +ATTRIBUTE_GROUPS(usnic_ib_qpn_default); static struct kobj_type usnic_ib_qpn_type = { .sysfs_ops = &usnic_ib_qpn_sysfs_ops, - .default_attrs = usnic_ib_qpn_default_attrs + .default_groups = usnic_ib_qpn_default_groups, }; int usnic_ib_sysfs_register_usdev(struct usnic_ib_dev *us_ibdev) diff --git a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c index 756a83bcff58..5a0e26cd648e 100644 --- a/drivers/infiniband/hw/usnic/usnic_ib_verbs.c +++ b/drivers/infiniband/hw/usnic/usnic_ib_verbs.c @@ -442,12 +442,10 @@ int usnic_ib_query_gid(struct ib_device *ibdev, u32 port, int index, int usnic_ib_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) { struct usnic_ib_pd *pd = to_upd(ibpd); - void *umem_pd; - umem_pd = pd->umem_pd = usnic_uiom_alloc_pd(); - if (IS_ERR_OR_NULL(umem_pd)) { - return umem_pd ? PTR_ERR(umem_pd) : -ENOMEM; - } + pd->umem_pd = usnic_uiom_alloc_pd(); + if (IS_ERR(pd->umem_pd)) + return PTR_ERR(pd->umem_pd); return 0; } diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c index bf51357ea3aa..9a4de962e947 100644 --- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c +++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_doorbell.c @@ -63,12 +63,12 @@ int pvrdma_uar_table_init(struct pvrdma_dev *dev) tbl->max = num; tbl->mask = mask; spin_lock_init(&tbl->lock); - tbl->table = kcalloc(BITS_TO_LONGS(num), sizeof(long), GFP_KERNEL); + tbl->table = bitmap_zalloc(num, GFP_KERNEL); if (!tbl->table) return -ENOMEM; /* 0th UAR is taken by the device. */ - set_bit(0, tbl->table); + __set_bit(0, tbl->table); return 0; } @@ -77,7 +77,7 @@ void pvrdma_uar_table_cleanup(struct pvrdma_dev *dev) { struct pvrdma_id_table *tbl = &dev->uar_table.tbl; - kfree(tbl->table); + bitmap_free(tbl->table); } int pvrdma_uar_alloc(struct pvrdma_dev *dev, struct pvrdma_uar_map *uar) @@ -100,7 +100,7 @@ int pvrdma_uar_alloc(struct pvrdma_dev *dev, struct pvrdma_uar_map *uar) return -ENOMEM; } - set_bit(obj, tbl->table); + __set_bit(obj, tbl->table); obj |= tbl->top; spin_unlock_irqrestore(&tbl->lock, flags); @@ -120,7 +120,7 @@ void pvrdma_uar_free(struct pvrdma_dev *dev, struct pvrdma_uar_map *uar) obj = uar->index & (tbl->max - 1); spin_lock_irqsave(&tbl->lock, flags); - clear_bit(obj, tbl->table); + __clear_bit(obj, tbl->table); tbl->last = min(tbl->last, obj); tbl->top = (tbl->top + tbl->max) & tbl->mask; spin_unlock_irqrestore(&tbl->lock, flags); diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c index 3305f2744bfa..ae50b56e8913 100644 --- a/drivers/infiniband/sw/rdmavt/qp.c +++ b/drivers/infiniband/sw/rdmavt/qp.c @@ -3073,6 +3073,8 @@ do_write: case IB_WR_ATOMIC_FETCH_AND_ADD: if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) goto inv_err; + if (unlikely(wqe->atomic_wr.remote_addr & (sizeof(u64) - 1))) + goto inv_err; if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64), wqe->atomic_wr.remote_addr, wqe->atomic_wr.rkey, diff --git a/drivers/infiniband/sw/rxe/Makefile b/drivers/infiniband/sw/rxe/Makefile index 1e24673e9318..5395a581f4bb 100644 --- a/drivers/infiniband/sw/rxe/Makefile +++ b/drivers/infiniband/sw/rxe/Makefile @@ -22,5 +22,4 @@ rdma_rxe-y := \ rxe_mcast.o \ rxe_task.o \ rxe_net.o \ - rxe_sysfs.o \ rxe_hw_counters.o diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c index 8e0f9c489cab..fab291245366 100644 --- a/drivers/infiniband/sw/rxe/rxe.c +++ b/drivers/infiniband/sw/rxe/rxe.c @@ -13,8 +13,6 @@ MODULE_AUTHOR("Bob Pearson, Frank Zago, John Groves, Kamal Heib"); MODULE_DESCRIPTION("Soft RDMA transport"); MODULE_LICENSE("Dual BSD/GPL"); -bool rxe_initialized; - /* free resources for a rxe device all objects created for this device must * have been destroyed */ @@ -290,7 +288,6 @@ static int __init rxe_module_init(void) return err; rdma_link_register(&rxe_link_ops); - rxe_initialized = true; pr_info("loaded\n"); return 0; } @@ -301,7 +298,6 @@ static void __exit rxe_module_exit(void) ib_unregister_driver(RDMA_DRIVER_RXE); rxe_net_exit(); - rxe_initialized = false; pr_info("unloaded\n"); } diff --git a/drivers/infiniband/sw/rxe/rxe.h b/drivers/infiniband/sw/rxe/rxe.h index 1bb3fb618bf5..fb9066e6f5f0 100644 --- a/drivers/infiniband/sw/rxe/rxe.h +++ b/drivers/infiniband/sw/rxe/rxe.h @@ -39,8 +39,6 @@ #define RXE_ROCE_V2_SPORT (0xc000) -extern bool rxe_initialized; - void rxe_set_mtu(struct rxe_dev *rxe, unsigned int dev_mtu); int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name); diff --git a/drivers/infiniband/sw/rxe/rxe_av.c b/drivers/infiniband/sw/rxe/rxe_av.c index da2e867a1ed9..38c7b6fb39d7 100644 --- a/drivers/infiniband/sw/rxe/rxe_av.c +++ b/drivers/infiniband/sw/rxe/rxe_av.c @@ -101,11 +101,29 @@ void rxe_av_fill_ip_info(struct rxe_av *av, struct rdma_ah_attr *attr) struct rxe_av *rxe_get_av(struct rxe_pkt_info *pkt) { + struct rxe_ah *ah; + u32 ah_num; + if (!pkt || !pkt->qp) return NULL; if (qp_type(pkt->qp) == IB_QPT_RC || qp_type(pkt->qp) == IB_QPT_UC) return &pkt->qp->pri_av; - return (pkt->wqe) ? &pkt->wqe->av : NULL; + if (!pkt->wqe) + return NULL; + + ah_num = pkt->wqe->wr.wr.ud.ah_num; + if (ah_num) { + /* only new user provider or kernel client */ + ah = rxe_pool_get_index(&pkt->rxe->ah_pool, ah_num); + if (!ah || ah->ah_num != ah_num || rxe_ah_pd(ah) != pkt->qp->pd) { + pr_warn("Unable to find AH matching ah_num\n"); + return NULL; + } + return &ah->av; + } + + /* only old user provider for UD sends*/ + return &pkt->wqe->wr.wr.ud.av; } diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c index d2d802c776fd..f363fe3fa414 100644 --- a/drivers/infiniband/sw/rxe/rxe_comp.c +++ b/drivers/infiniband/sw/rxe/rxe_comp.c @@ -142,10 +142,7 @@ static inline enum comp_state get_wqe(struct rxe_qp *qp, /* we come here whether or not we found a response packet to see if * there are any posted WQEs */ - if (qp->is_user) - wqe = queue_head(qp->sq.queue, QUEUE_TYPE_FROM_USER); - else - wqe = queue_head(qp->sq.queue, QUEUE_TYPE_KERNEL); + wqe = queue_head(qp->sq.queue, QUEUE_TYPE_FROM_CLIENT); *wqe_p = wqe; /* no WQE or requester has not started it yet */ @@ -383,30 +380,35 @@ static inline enum comp_state do_atomic(struct rxe_qp *qp, static void make_send_cqe(struct rxe_qp *qp, struct rxe_send_wqe *wqe, struct rxe_cqe *cqe) { + struct ib_wc *wc = &cqe->ibwc; + struct ib_uverbs_wc *uwc = &cqe->uibwc; + memset(cqe, 0, sizeof(*cqe)); if (!qp->is_user) { - struct ib_wc *wc = &cqe->ibwc; - - wc->wr_id = wqe->wr.wr_id; - wc->status = wqe->status; - wc->opcode = wr_to_wc_opcode(wqe->wr.opcode); - if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM || - wqe->wr.opcode == IB_WR_SEND_WITH_IMM) - wc->wc_flags = IB_WC_WITH_IMM; - wc->byte_len = wqe->dma.length; - wc->qp = &qp->ibqp; + wc->wr_id = wqe->wr.wr_id; + wc->status = wqe->status; + wc->qp = &qp->ibqp; } else { - struct ib_uverbs_wc *uwc = &cqe->uibwc; - - uwc->wr_id = wqe->wr.wr_id; - uwc->status = wqe->status; - uwc->opcode = wr_to_wc_opcode(wqe->wr.opcode); - if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM || - wqe->wr.opcode == IB_WR_SEND_WITH_IMM) - uwc->wc_flags = IB_WC_WITH_IMM; - uwc->byte_len = wqe->dma.length; - uwc->qp_num = qp->ibqp.qp_num; + uwc->wr_id = wqe->wr.wr_id; + uwc->status = wqe->status; + uwc->qp_num = qp->ibqp.qp_num; + } + + if (wqe->status == IB_WC_SUCCESS) { + if (!qp->is_user) { + wc->opcode = wr_to_wc_opcode(wqe->wr.opcode); + if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM || + wqe->wr.opcode == IB_WR_SEND_WITH_IMM) + wc->wc_flags = IB_WC_WITH_IMM; + wc->byte_len = wqe->dma.length; + } else { + uwc->opcode = wr_to_wc_opcode(wqe->wr.opcode); + if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM || + wqe->wr.opcode == IB_WR_SEND_WITH_IMM) + uwc->wc_flags = IB_WC_WITH_IMM; + uwc->byte_len = wqe->dma.length; + } } } @@ -432,10 +434,7 @@ static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe) if (post) make_send_cqe(qp, wqe, &cqe); - if (qp->is_user) - advance_consumer(qp->sq.queue, QUEUE_TYPE_FROM_USER); - else - advance_consumer(qp->sq.queue, QUEUE_TYPE_KERNEL); + queue_advance_consumer(qp->sq.queue, QUEUE_TYPE_FROM_CLIENT); if (post) rxe_cq_post(qp->scq, &cqe, 0); @@ -459,8 +458,6 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp, struct rxe_pkt_info *pkt, struct rxe_send_wqe *wqe) { - unsigned long flags; - if (wqe->has_rd_atomic) { wqe->has_rd_atomic = 0; atomic_inc(&qp->req.rd_atomic); @@ -473,11 +470,11 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp, if (unlikely(qp->req.state == QP_STATE_DRAIN)) { /* state_lock used by requester & completer */ - spin_lock_irqsave(&qp->state_lock, flags); + spin_lock_bh(&qp->state_lock); if ((qp->req.state == QP_STATE_DRAIN) && (qp->comp.psn == qp->req.psn)) { qp->req.state = QP_STATE_DRAINED; - spin_unlock_irqrestore(&qp->state_lock, flags); + spin_unlock_bh(&qp->state_lock); if (qp->ibqp.event_handler) { struct ib_event ev; @@ -489,7 +486,7 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp, qp->ibqp.qp_context); } } else { - spin_unlock_irqrestore(&qp->state_lock, flags); + spin_unlock_bh(&qp->state_lock); } } @@ -539,7 +536,7 @@ static void rxe_drain_resp_pkts(struct rxe_qp *qp, bool notify) wqe->status = IB_WC_WR_FLUSH_ERR; do_complete(qp, wqe); } else { - advance_consumer(q, q->type); + queue_advance_consumer(q, q->type); } } } diff --git a/drivers/infiniband/sw/rxe/rxe_cq.c b/drivers/infiniband/sw/rxe/rxe_cq.c index aef288f164fd..6baaaa34458e 100644 --- a/drivers/infiniband/sw/rxe/rxe_cq.c +++ b/drivers/infiniband/sw/rxe/rxe_cq.c @@ -25,11 +25,7 @@ int rxe_cq_chk_attr(struct rxe_dev *rxe, struct rxe_cq *cq, } if (cq) { - if (cq->is_user) - count = queue_count(cq->queue, QUEUE_TYPE_TO_USER); - else - count = queue_count(cq->queue, QUEUE_TYPE_KERNEL); - + count = queue_count(cq->queue, QUEUE_TYPE_TO_CLIENT); if (cqe < count) { pr_warn("cqe(%d) < current # elements in queue (%d)", cqe, count); @@ -46,14 +42,13 @@ err1: static void rxe_send_complete(struct tasklet_struct *t) { struct rxe_cq *cq = from_tasklet(cq, t, comp_task); - unsigned long flags; - spin_lock_irqsave(&cq->cq_lock, flags); + spin_lock_bh(&cq->cq_lock); if (cq->is_dying) { - spin_unlock_irqrestore(&cq->cq_lock, flags); + spin_unlock_bh(&cq->cq_lock); return; } - spin_unlock_irqrestore(&cq->cq_lock, flags); + spin_unlock_bh(&cq->cq_lock); cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); } @@ -65,7 +60,7 @@ int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe, int err; enum queue_type type; - type = uresp ? QUEUE_TYPE_TO_USER : QUEUE_TYPE_KERNEL; + type = QUEUE_TYPE_TO_CLIENT; cq->queue = rxe_queue_init(rxe, &cqe, sizeof(struct rxe_cqe), type); if (!cq->queue) { @@ -81,8 +76,7 @@ int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe, return err; } - if (uresp) - cq->is_user = 1; + cq->is_user = uresp; cq->is_dying = false; @@ -111,19 +105,14 @@ int rxe_cq_resize_queue(struct rxe_cq *cq, int cqe, int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited) { struct ib_event ev; - unsigned long flags; int full; void *addr; - spin_lock_irqsave(&cq->cq_lock, flags); - - if (cq->is_user) - full = queue_full(cq->queue, QUEUE_TYPE_TO_USER); - else - full = queue_full(cq->queue, QUEUE_TYPE_KERNEL); + spin_lock_bh(&cq->cq_lock); + full = queue_full(cq->queue, QUEUE_TYPE_TO_CLIENT); if (unlikely(full)) { - spin_unlock_irqrestore(&cq->cq_lock, flags); + spin_unlock_bh(&cq->cq_lock); if (cq->ibcq.event_handler) { ev.device = cq->ibcq.device; ev.element.cq = &cq->ibcq; @@ -134,19 +123,12 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited) return -EBUSY; } - if (cq->is_user) - addr = producer_addr(cq->queue, QUEUE_TYPE_TO_USER); - else - addr = producer_addr(cq->queue, QUEUE_TYPE_KERNEL); - + addr = queue_producer_addr(cq->queue, QUEUE_TYPE_TO_CLIENT); memcpy(addr, cqe, sizeof(*cqe)); - if (cq->is_user) - advance_producer(cq->queue, QUEUE_TYPE_TO_USER); - else - advance_producer(cq->queue, QUEUE_TYPE_KERNEL); + queue_advance_producer(cq->queue, QUEUE_TYPE_TO_CLIENT); - spin_unlock_irqrestore(&cq->cq_lock, flags); + spin_unlock_bh(&cq->cq_lock); if ((cq->notify == IB_CQ_NEXT_COMP) || (cq->notify == IB_CQ_SOLICITED && solicited)) { @@ -159,16 +141,14 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited) void rxe_cq_disable(struct rxe_cq *cq) { - unsigned long flags; - - spin_lock_irqsave(&cq->cq_lock, flags); + spin_lock_bh(&cq->cq_lock); cq->is_dying = true; - spin_unlock_irqrestore(&cq->cq_lock, flags); + spin_unlock_bh(&cq->cq_lock); } -void rxe_cq_cleanup(struct rxe_pool_entry *arg) +void rxe_cq_cleanup(struct rxe_pool_elem *elem) { - struct rxe_cq *cq = container_of(arg, typeof(*cq), pelem); + struct rxe_cq *cq = container_of(elem, typeof(*cq), elem); if (cq->queue) rxe_queue_cleanup(cq->queue); diff --git a/drivers/infiniband/sw/rxe/rxe_hw_counters.c b/drivers/infiniband/sw/rxe/rxe_hw_counters.c index d5ceb706d964..a012522b577a 100644 --- a/drivers/infiniband/sw/rxe/rxe_hw_counters.c +++ b/drivers/infiniband/sw/rxe/rxe_hw_counters.c @@ -6,22 +6,22 @@ #include "rxe.h" #include "rxe_hw_counters.h" -static const char * const rxe_counter_name[] = { - [RXE_CNT_SENT_PKTS] = "sent_pkts", - [RXE_CNT_RCVD_PKTS] = "rcvd_pkts", - [RXE_CNT_DUP_REQ] = "duplicate_request", - [RXE_CNT_OUT_OF_SEQ_REQ] = "out_of_seq_request", - [RXE_CNT_RCV_RNR] = "rcvd_rnr_err", - [RXE_CNT_SND_RNR] = "send_rnr_err", - [RXE_CNT_RCV_SEQ_ERR] = "rcvd_seq_err", - [RXE_CNT_COMPLETER_SCHED] = "ack_deferred", - [RXE_CNT_RETRY_EXCEEDED] = "retry_exceeded_err", - [RXE_CNT_RNR_RETRY_EXCEEDED] = "retry_rnr_exceeded_err", - [RXE_CNT_COMP_RETRY] = "completer_retry_err", - [RXE_CNT_SEND_ERR] = "send_err", - [RXE_CNT_LINK_DOWNED] = "link_downed", - [RXE_CNT_RDMA_SEND] = "rdma_sends", - [RXE_CNT_RDMA_RECV] = "rdma_recvs", +static const struct rdma_stat_desc rxe_counter_descs[] = { + [RXE_CNT_SENT_PKTS].name = "sent_pkts", + [RXE_CNT_RCVD_PKTS].name = "rcvd_pkts", + [RXE_CNT_DUP_REQ].name = "duplicate_request", + [RXE_CNT_OUT_OF_SEQ_REQ].name = "out_of_seq_request", + [RXE_CNT_RCV_RNR].name = "rcvd_rnr_err", + [RXE_CNT_SND_RNR].name = "send_rnr_err", + [RXE_CNT_RCV_SEQ_ERR].name = "rcvd_seq_err", + [RXE_CNT_COMPLETER_SCHED].name = "ack_deferred", + [RXE_CNT_RETRY_EXCEEDED].name = "retry_exceeded_err", + [RXE_CNT_RNR_RETRY_EXCEEDED].name = "retry_rnr_exceeded_err", + [RXE_CNT_COMP_RETRY].name = "completer_retry_err", + [RXE_CNT_SEND_ERR].name = "send_err", + [RXE_CNT_LINK_DOWNED].name = "link_downed", + [RXE_CNT_RDMA_SEND].name = "rdma_sends", + [RXE_CNT_RDMA_RECV].name = "rdma_recvs", }; int rxe_ib_get_hw_stats(struct ib_device *ibdev, @@ -34,18 +34,18 @@ int rxe_ib_get_hw_stats(struct ib_device *ibdev, if (!port || !stats) return -EINVAL; - for (cnt = 0; cnt < ARRAY_SIZE(rxe_counter_name); cnt++) + for (cnt = 0; cnt < ARRAY_SIZE(rxe_counter_descs); cnt++) stats->value[cnt] = atomic64_read(&dev->stats_counters[cnt]); - return ARRAY_SIZE(rxe_counter_name); + return ARRAY_SIZE(rxe_counter_descs); } struct rdma_hw_stats *rxe_ib_alloc_hw_port_stats(struct ib_device *ibdev, u32 port_num) { - BUILD_BUG_ON(ARRAY_SIZE(rxe_counter_name) != RXE_NUM_OF_COUNTERS); + BUILD_BUG_ON(ARRAY_SIZE(rxe_counter_descs) != RXE_NUM_OF_COUNTERS); - return rdma_alloc_hw_stats_struct(rxe_counter_name, - ARRAY_SIZE(rxe_counter_name), + return rdma_alloc_hw_stats_struct(rxe_counter_descs, + ARRAY_SIZE(rxe_counter_descs), RDMA_HW_STATS_DEFAULT_LIFESPAN); } diff --git a/drivers/infiniband/sw/rxe/rxe_loc.h b/drivers/infiniband/sw/rxe/rxe_loc.h index f0c954575bde..b1e174afb1d4 100644 --- a/drivers/infiniband/sw/rxe/rxe_loc.h +++ b/drivers/infiniband/sw/rxe/rxe_loc.h @@ -37,7 +37,7 @@ int rxe_cq_post(struct rxe_cq *cq, struct rxe_cqe *cqe, int solicited); void rxe_cq_disable(struct rxe_cq *cq); -void rxe_cq_cleanup(struct rxe_pool_entry *arg); +void rxe_cq_cleanup(struct rxe_pool_elem *arg); /* rxe_mcast.c */ int rxe_mcast_get_grp(struct rxe_dev *rxe, union ib_gid *mgid, @@ -51,7 +51,7 @@ int rxe_mcast_drop_grp_elem(struct rxe_dev *rxe, struct rxe_qp *qp, void rxe_drop_all_mcast_groups(struct rxe_qp *qp); -void rxe_mc_cleanup(struct rxe_pool_entry *arg); +void rxe_mc_cleanup(struct rxe_pool_elem *arg); /* rxe_mmap.c */ struct rxe_mmap_info { @@ -86,8 +86,10 @@ struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key, int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length); int advance_dma_data(struct rxe_dma_info *dma, unsigned int length); int rxe_invalidate_mr(struct rxe_qp *qp, u32 rkey); +int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe); +int rxe_mr_set_page(struct ib_mr *ibmr, u64 addr); int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata); -void rxe_mr_cleanup(struct rxe_pool_entry *arg); +void rxe_mr_cleanup(struct rxe_pool_elem *arg); /* rxe_mw.c */ int rxe_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata); @@ -95,7 +97,7 @@ int rxe_dealloc_mw(struct ib_mw *ibmw); int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe); int rxe_invalidate_mw(struct rxe_qp *qp, u32 rkey); struct rxe_mw *rxe_lookup_mw(struct rxe_qp *qp, int access, u32 rkey); -void rxe_mw_cleanup(struct rxe_pool_entry *arg); +void rxe_mw_cleanup(struct rxe_pool_elem *arg); /* rxe_net.c */ struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av, @@ -129,7 +131,7 @@ void rxe_qp_error(struct rxe_qp *qp); void rxe_qp_destroy(struct rxe_qp *qp); -void rxe_qp_cleanup(struct rxe_pool_entry *arg); +void rxe_qp_cleanup(struct rxe_pool_elem *elem); static inline int qp_num(struct rxe_qp *qp) { diff --git a/drivers/infiniband/sw/rxe/rxe_mcast.c b/drivers/infiniband/sw/rxe/rxe_mcast.c index 1c1d1b53312d..bd1ac88b8700 100644 --- a/drivers/infiniband/sw/rxe/rxe_mcast.c +++ b/drivers/infiniband/sw/rxe/rxe_mcast.c @@ -40,12 +40,11 @@ int rxe_mcast_get_grp(struct rxe_dev *rxe, union ib_gid *mgid, int err; struct rxe_mc_grp *grp; struct rxe_pool *pool = &rxe->mc_grp_pool; - unsigned long flags; if (rxe->attr.max_mcast_qp_attach == 0) return -EINVAL; - write_lock_irqsave(&pool->pool_lock, flags); + write_lock_bh(&pool->pool_lock); grp = rxe_pool_get_key_locked(pool, mgid); if (grp) @@ -53,13 +52,13 @@ int rxe_mcast_get_grp(struct rxe_dev *rxe, union ib_gid *mgid, grp = create_grp(rxe, pool, mgid); if (IS_ERR(grp)) { - write_unlock_irqrestore(&pool->pool_lock, flags); + write_unlock_bh(&pool->pool_lock); err = PTR_ERR(grp); return err; } done: - write_unlock_irqrestore(&pool->pool_lock, flags); + write_unlock_bh(&pool->pool_lock); *grp_p = grp; return 0; } @@ -169,9 +168,9 @@ void rxe_drop_all_mcast_groups(struct rxe_qp *qp) } } -void rxe_mc_cleanup(struct rxe_pool_entry *arg) +void rxe_mc_cleanup(struct rxe_pool_elem *elem) { - struct rxe_mc_grp *grp = container_of(arg, typeof(*grp), pelem); + struct rxe_mc_grp *grp = container_of(elem, typeof(*grp), elem); struct rxe_dev *rxe = grp->rxe; rxe_drop_key(grp); diff --git a/drivers/infiniband/sw/rxe/rxe_mr.c b/drivers/infiniband/sw/rxe/rxe_mr.c index 5890a8246216..453ef3c9d535 100644 --- a/drivers/infiniband/sw/rxe/rxe_mr.c +++ b/drivers/infiniband/sw/rxe/rxe_mr.c @@ -24,17 +24,22 @@ u8 rxe_get_next_key(u32 last_key) int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length) { + struct rxe_map_set *set = mr->cur_map_set; + switch (mr->type) { - case RXE_MR_TYPE_DMA: + case IB_MR_TYPE_DMA: return 0; - case RXE_MR_TYPE_MR: - if (iova < mr->iova || length > mr->length || - iova > mr->iova + mr->length - length) + case IB_MR_TYPE_USER: + case IB_MR_TYPE_MEM_REG: + if (iova < set->iova || length > set->length || + iova > set->iova + set->length - length) return -EFAULT; return 0; default: + pr_warn("%s: mr type (%d) not supported\n", + __func__, mr->type); return -EFAULT; } } @@ -45,51 +50,104 @@ int mr_check_range(struct rxe_mr *mr, u64 iova, size_t length) static void rxe_mr_init(int access, struct rxe_mr *mr) { - u32 lkey = mr->pelem.index << 8 | rxe_get_next_key(-1); + u32 lkey = mr->elem.index << 8 | rxe_get_next_key(-1); u32 rkey = (access & IB_ACCESS_REMOTE) ? lkey : 0; - mr->ibmr.lkey = lkey; - mr->ibmr.rkey = rkey; + /* set ibmr->l/rkey and also copy into private l/rkey + * for user MRs these will always be the same + * for cases where caller 'owns' the key portion + * they may be different until REG_MR WQE is executed. + */ + mr->lkey = mr->ibmr.lkey = lkey; + mr->rkey = mr->ibmr.rkey = rkey; + mr->state = RXE_MR_STATE_INVALID; - mr->type = RXE_MR_TYPE_NONE; mr->map_shift = ilog2(RXE_BUF_PER_MAP); } -static int rxe_mr_alloc(struct rxe_mr *mr, int num_buf) +static void rxe_mr_free_map_set(int num_map, struct rxe_map_set *set) { int i; - int num_map; - struct rxe_map **map = mr->map; - num_map = (num_buf + RXE_BUF_PER_MAP - 1) / RXE_BUF_PER_MAP; + for (i = 0; i < num_map; i++) + kfree(set->map[i]); - mr->map = kmalloc_array(num_map, sizeof(*map), GFP_KERNEL); - if (!mr->map) - goto err1; + kfree(set->map); + kfree(set); +} + +static int rxe_mr_alloc_map_set(int num_map, struct rxe_map_set **setp) +{ + int i; + struct rxe_map_set *set; + + set = kmalloc(sizeof(*set), GFP_KERNEL); + if (!set) + goto err_out; + + set->map = kmalloc_array(num_map, sizeof(struct rxe_map *), GFP_KERNEL); + if (!set->map) + goto err_free_set; for (i = 0; i < num_map; i++) { - mr->map[i] = kmalloc(sizeof(**map), GFP_KERNEL); - if (!mr->map[i]) - goto err2; + set->map[i] = kmalloc(sizeof(struct rxe_map), GFP_KERNEL); + if (!set->map[i]) + goto err_free_map; } + *setp = set; + + return 0; + +err_free_map: + for (i--; i >= 0; i--) + kfree(set->map[i]); + + kfree(set->map); +err_free_set: + kfree(set); +err_out: + return -ENOMEM; +} + +/** + * rxe_mr_alloc() - Allocate memory map array(s) for MR + * @mr: Memory region + * @num_buf: Number of buffer descriptors to support + * @both: If non zero allocate both mr->map and mr->next_map + * else just allocate mr->map. Used for fast MRs + * + * Return: 0 on success else an error + */ +static int rxe_mr_alloc(struct rxe_mr *mr, int num_buf, int both) +{ + int ret; + int num_map; + BUILD_BUG_ON(!is_power_of_2(RXE_BUF_PER_MAP)); + num_map = (num_buf + RXE_BUF_PER_MAP - 1) / RXE_BUF_PER_MAP; mr->map_shift = ilog2(RXE_BUF_PER_MAP); mr->map_mask = RXE_BUF_PER_MAP - 1; - mr->num_buf = num_buf; - mr->num_map = num_map; mr->max_buf = num_map * RXE_BUF_PER_MAP; + mr->num_map = num_map; - return 0; + ret = rxe_mr_alloc_map_set(num_map, &mr->cur_map_set); + if (ret) + return -ENOMEM; -err2: - for (i--; i >= 0; i--) - kfree(mr->map[i]); + if (both) { + ret = rxe_mr_alloc_map_set(num_map, &mr->next_map_set); + if (ret) + goto err_free; + } - kfree(mr->map); -err1: + return 0; + +err_free: + rxe_mr_free_map_set(mr->num_map, mr->cur_map_set); + mr->cur_map_set = NULL; return -ENOMEM; } @@ -100,12 +158,13 @@ void rxe_mr_init_dma(struct rxe_pd *pd, int access, struct rxe_mr *mr) mr->ibmr.pd = &pd->ibpd; mr->access = access; mr->state = RXE_MR_STATE_VALID; - mr->type = RXE_MR_TYPE_DMA; + mr->type = IB_MR_TYPE_DMA; } int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova, int access, struct rxe_mr *mr) { + struct rxe_map_set *set; struct rxe_map **map; struct rxe_phys_buf *buf = NULL; struct ib_umem *umem; @@ -113,7 +172,6 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova, int num_buf; void *vaddr; int err; - int i; umem = ib_umem_get(pd->ibpd.device, start, length, access); if (IS_ERR(umem)) { @@ -127,18 +185,20 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova, rxe_mr_init(access, mr); - err = rxe_mr_alloc(mr, num_buf); + err = rxe_mr_alloc(mr, num_buf, 0); if (err) { pr_warn("%s: Unable to allocate memory for map\n", __func__); goto err_release_umem; } - mr->page_shift = PAGE_SHIFT; - mr->page_mask = PAGE_SIZE - 1; + set = mr->cur_map_set; + set->page_shift = PAGE_SHIFT; + set->page_mask = PAGE_SIZE - 1; + + num_buf = 0; + map = set->map; - num_buf = 0; - map = mr->map; if (length > 0) { buf = map[0]->buf; @@ -154,33 +214,29 @@ int rxe_mr_init_user(struct rxe_pd *pd, u64 start, u64 length, u64 iova, pr_warn("%s: Unable to get virtual address\n", __func__); err = -ENOMEM; - goto err_cleanup_map; + goto err_release_umem; } buf->addr = (uintptr_t)vaddr; buf->size = PAGE_SIZE; num_buf++; buf++; - } } mr->ibmr.pd = &pd->ibpd; mr->umem = umem; mr->access = access; - mr->length = length; - mr->iova = iova; - mr->va = start; - mr->offset = ib_umem_offset(umem); mr->state = RXE_MR_STATE_VALID; - mr->type = RXE_MR_TYPE_MR; + mr->type = IB_MR_TYPE_USER; + + set->length = length; + set->iova = iova; + set->va = start; + set->offset = ib_umem_offset(umem); return 0; -err_cleanup_map: - for (i = 0; i < mr->num_map; i++) - kfree(mr->map[i]); - kfree(mr->map); err_release_umem: ib_umem_release(umem); err_out: @@ -191,19 +247,17 @@ int rxe_mr_init_fast(struct rxe_pd *pd, int max_pages, struct rxe_mr *mr) { int err; - rxe_mr_init(0, mr); - - /* In fastreg, we also set the rkey */ - mr->ibmr.rkey = mr->ibmr.lkey; + /* always allow remote access for FMRs */ + rxe_mr_init(IB_ACCESS_REMOTE, mr); - err = rxe_mr_alloc(mr, max_pages); + err = rxe_mr_alloc(mr, max_pages, 1); if (err) goto err1; mr->ibmr.pd = &pd->ibpd; mr->max_buf = max_pages; mr->state = RXE_MR_STATE_FREE; - mr->type = RXE_MR_TYPE_MR; + mr->type = IB_MR_TYPE_MEM_REG; return 0; @@ -214,21 +268,24 @@ err1: static void lookup_iova(struct rxe_mr *mr, u64 iova, int *m_out, int *n_out, size_t *offset_out) { - size_t offset = iova - mr->iova + mr->offset; + struct rxe_map_set *set = mr->cur_map_set; + size_t offset = iova - set->iova + set->offset; int map_index; int buf_index; u64 length; + struct rxe_map *map; - if (likely(mr->page_shift)) { - *offset_out = offset & mr->page_mask; - offset >>= mr->page_shift; + if (likely(set->page_shift)) { + *offset_out = offset & set->page_mask; + offset >>= set->page_shift; *n_out = offset & mr->map_mask; *m_out = offset >> mr->map_shift; } else { map_index = 0; buf_index = 0; - length = mr->map[map_index]->buf[buf_index].size; + map = set->map[map_index]; + length = map->buf[buf_index].size; while (offset >= length) { offset -= length; @@ -238,7 +295,8 @@ static void lookup_iova(struct rxe_mr *mr, u64 iova, int *m_out, int *n_out, map_index++; buf_index = 0; } - length = mr->map[map_index]->buf[buf_index].size; + map = set->map[map_index]; + length = map->buf[buf_index].size; } *m_out = map_index; @@ -259,7 +317,7 @@ void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length) goto out; } - if (!mr->map) { + if (!mr->cur_map_set) { addr = (void *)(uintptr_t)iova; goto out; } @@ -272,13 +330,13 @@ void *iova_to_vaddr(struct rxe_mr *mr, u64 iova, int length) lookup_iova(mr, iova, &m, &n, &offset); - if (offset + length > mr->map[m]->buf[n].size) { + if (offset + length > mr->cur_map_set->map[m]->buf[n].size) { pr_warn("crosses page boundary\n"); addr = NULL; goto out; } - addr = (void *)(uintptr_t)mr->map[m]->buf[n].addr + offset; + addr = (void *)(uintptr_t)mr->cur_map_set->map[m]->buf[n].addr + offset; out: return addr; @@ -302,7 +360,7 @@ int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length, if (length == 0) return 0; - if (mr->type == RXE_MR_TYPE_DMA) { + if (mr->type == IB_MR_TYPE_DMA) { u8 *src, *dest; src = (dir == RXE_TO_MR_OBJ) ? addr : ((void *)(uintptr_t)iova); @@ -314,7 +372,7 @@ int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length, return 0; } - WARN_ON_ONCE(!mr->map); + WARN_ON_ONCE(!mr->cur_map_set); err = mr_check_range(mr, iova, length); if (err) { @@ -324,7 +382,7 @@ int rxe_mr_copy(struct rxe_mr *mr, u64 iova, void *addr, int length, lookup_iova(mr, iova, &m, &i, &offset); - map = mr->map + m; + map = mr->cur_map_set->map + m; buf = map[0]->buf + i; while (length > 0) { @@ -507,8 +565,8 @@ struct rxe_mr *lookup_mr(struct rxe_pd *pd, int access, u32 key, if (!mr) return NULL; - if (unlikely((type == RXE_LOOKUP_LOCAL && mr_lkey(mr) != key) || - (type == RXE_LOOKUP_REMOTE && mr_rkey(mr) != key) || + if (unlikely((type == RXE_LOOKUP_LOCAL && mr->lkey != key) || + (type == RXE_LOOKUP_REMOTE && mr->rkey != key) || mr_pd(mr) != pd || (access && !(access & mr->access)) || mr->state != RXE_MR_STATE_VALID)) { rxe_drop_ref(mr); @@ -531,9 +589,9 @@ int rxe_invalidate_mr(struct rxe_qp *qp, u32 rkey) goto err; } - if (rkey != mr->ibmr.rkey) { - pr_err("%s: rkey (%#x) doesn't match mr->ibmr.rkey (%#x)\n", - __func__, rkey, mr->ibmr.rkey); + if (rkey != mr->rkey) { + pr_err("%s: rkey (%#x) doesn't match mr->rkey (%#x)\n", + __func__, rkey, mr->rkey); ret = -EINVAL; goto err_drop_ref; } @@ -545,6 +603,12 @@ int rxe_invalidate_mr(struct rxe_qp *qp, u32 rkey) goto err_drop_ref; } + if (unlikely(mr->type != IB_MR_TYPE_MEM_REG)) { + pr_warn("%s: mr->type (%d) is wrong type\n", __func__, mr->type); + ret = -EINVAL; + goto err_drop_ref; + } + mr->state = RXE_MR_STATE_FREE; ret = 0; @@ -554,6 +618,67 @@ err: return ret; } +/* user can (re)register fast MR by executing a REG_MR WQE. + * user is expected to hold a reference on the ib mr until the + * WQE completes. + * Once a fast MR is created this is the only way to change the + * private keys. It is the responsibility of the user to maintain + * the ib mr keys in sync with rxe mr keys. + */ +int rxe_reg_fast_mr(struct rxe_qp *qp, struct rxe_send_wqe *wqe) +{ + struct rxe_mr *mr = to_rmr(wqe->wr.wr.reg.mr); + u32 key = wqe->wr.wr.reg.key & 0xff; + u32 access = wqe->wr.wr.reg.access; + struct rxe_map_set *set; + + /* user can only register MR in free state */ + if (unlikely(mr->state != RXE_MR_STATE_FREE)) { + pr_warn("%s: mr->lkey = 0x%x not free\n", + __func__, mr->lkey); + return -EINVAL; + } + + /* user can only register mr with qp in same protection domain */ + if (unlikely(qp->ibqp.pd != mr->ibmr.pd)) { + pr_warn("%s: qp->pd and mr->pd don't match\n", + __func__); + return -EINVAL; + } + + mr->access = access; + mr->lkey = (mr->lkey & ~0xff) | key; + mr->rkey = (access & IB_ACCESS_REMOTE) ? mr->lkey : 0; + mr->state = RXE_MR_STATE_VALID; + + set = mr->cur_map_set; + mr->cur_map_set = mr->next_map_set; + mr->cur_map_set->iova = wqe->wr.wr.reg.mr->iova; + mr->next_map_set = set; + + return 0; +} + +int rxe_mr_set_page(struct ib_mr *ibmr, u64 addr) +{ + struct rxe_mr *mr = to_rmr(ibmr); + struct rxe_map_set *set = mr->next_map_set; + struct rxe_map *map; + struct rxe_phys_buf *buf; + + if (unlikely(set->nbuf == mr->num_buf)) + return -ENOMEM; + + map = set->map[set->nbuf / RXE_BUF_PER_MAP]; + buf = &map->buf[set->nbuf % RXE_BUF_PER_MAP]; + + buf->addr = addr; + buf->size = ibmr->page_size; + set->nbuf++; + + return 0; +} + int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) { struct rxe_mr *mr = to_rmr(ibmr); @@ -564,7 +689,7 @@ int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) return -EINVAL; } - mr->state = RXE_MR_STATE_ZOMBIE; + mr->state = RXE_MR_STATE_INVALID; rxe_drop_ref(mr_pd(mr)); rxe_drop_index(mr); rxe_drop_ref(mr); @@ -572,17 +697,15 @@ int rxe_dereg_mr(struct ib_mr *ibmr, struct ib_udata *udata) return 0; } -void rxe_mr_cleanup(struct rxe_pool_entry *arg) +void rxe_mr_cleanup(struct rxe_pool_elem *elem) { - struct rxe_mr *mr = container_of(arg, typeof(*mr), pelem); - int i; + struct rxe_mr *mr = container_of(elem, typeof(*mr), elem); ib_umem_release(mr->umem); - if (mr->map) { - for (i = 0; i < mr->num_map; i++) - kfree(mr->map[i]); + if (mr->cur_map_set) + rxe_mr_free_map_set(mr->num_map, mr->cur_map_set); - kfree(mr->map); - } + if (mr->next_map_set) + rxe_mr_free_map_set(mr->num_map, mr->next_map_set); } diff --git a/drivers/infiniband/sw/rxe/rxe_mw.c b/drivers/infiniband/sw/rxe/rxe_mw.c index 5ba77df7598e..32dd8c0b8b9e 100644 --- a/drivers/infiniband/sw/rxe/rxe_mw.c +++ b/drivers/infiniband/sw/rxe/rxe_mw.c @@ -21,7 +21,7 @@ int rxe_alloc_mw(struct ib_mw *ibmw, struct ib_udata *udata) } rxe_add_index(mw); - ibmw->rkey = (mw->pelem.index << 8) | rxe_get_next_key(-1); + mw->rkey = ibmw->rkey = (mw->elem.index << 8) | rxe_get_next_key(-1); mw->state = (mw->ibmw.type == IB_MW_TYPE_2) ? RXE_MW_STATE_FREE : RXE_MW_STATE_VALID; spin_lock_init(&mw->lock); @@ -56,11 +56,10 @@ int rxe_dealloc_mw(struct ib_mw *ibmw) { struct rxe_mw *mw = to_rmw(ibmw); struct rxe_pd *pd = to_rpd(ibmw->pd); - unsigned long flags; - spin_lock_irqsave(&mw->lock, flags); + spin_lock_bh(&mw->lock); rxe_do_dealloc_mw(mw); - spin_unlock_irqrestore(&mw->lock, flags); + spin_unlock_bh(&mw->lock); rxe_drop_ref(mw); rxe_drop_ref(pd); @@ -71,6 +70,8 @@ int rxe_dealloc_mw(struct ib_mw *ibmw) static int rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe, struct rxe_mw *mw, struct rxe_mr *mr) { + u32 key = wqe->wr.wr.mw.rkey & 0xff; + if (mw->ibmw.type == IB_MW_TYPE_1) { if (unlikely(mw->state != RXE_MW_STATE_VALID)) { pr_err_once( @@ -108,7 +109,7 @@ static int rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe, } } - if (unlikely((wqe->wr.wr.mw.rkey & 0xff) == (mw->ibmw.rkey & 0xff))) { + if (unlikely(key == (mw->rkey & 0xff))) { pr_err_once("attempt to bind MW with same key\n"); return -EINVAL; } @@ -140,15 +141,15 @@ static int rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe, /* C10-75 */ if (mw->access & IB_ZERO_BASED) { - if (unlikely(wqe->wr.wr.mw.length > mr->length)) { + if (unlikely(wqe->wr.wr.mw.length > mr->cur_map_set->length)) { pr_err_once( "attempt to bind a ZB MW outside of the MR\n"); return -EINVAL; } } else { - if (unlikely((wqe->wr.wr.mw.addr < mr->iova) || + if (unlikely((wqe->wr.wr.mw.addr < mr->cur_map_set->iova) || ((wqe->wr.wr.mw.addr + wqe->wr.wr.mw.length) > - (mr->iova + mr->length)))) { + (mr->cur_map_set->iova + mr->cur_map_set->length)))) { pr_err_once( "attempt to bind a VA MW outside of the MR\n"); return -EINVAL; @@ -161,13 +162,9 @@ static int rxe_check_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe, static void rxe_do_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe, struct rxe_mw *mw, struct rxe_mr *mr) { - u32 rkey; - u32 new_rkey; - - rkey = mw->ibmw.rkey; - new_rkey = (rkey & 0xffffff00) | (wqe->wr.wr.mw.rkey & 0x000000ff); + u32 key = wqe->wr.wr.mw.rkey & 0xff; - mw->ibmw.rkey = new_rkey; + mw->rkey = (mw->rkey & ~0xff) | key; mw->access = wqe->wr.wr.mw.access; mw->state = RXE_MW_STATE_VALID; mw->addr = wqe->wr.wr.mw.addr; @@ -197,29 +194,28 @@ int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe) struct rxe_mw *mw; struct rxe_mr *mr; struct rxe_dev *rxe = to_rdev(qp->ibqp.device); - unsigned long flags; + u32 mw_rkey = wqe->wr.wr.mw.mw_rkey; + u32 mr_lkey = wqe->wr.wr.mw.mr_lkey; - mw = rxe_pool_get_index(&rxe->mw_pool, - wqe->wr.wr.mw.mw_rkey >> 8); + mw = rxe_pool_get_index(&rxe->mw_pool, mw_rkey >> 8); if (unlikely(!mw)) { ret = -EINVAL; goto err; } - if (unlikely(mw->ibmw.rkey != wqe->wr.wr.mw.mw_rkey)) { + if (unlikely(mw->rkey != mw_rkey)) { ret = -EINVAL; goto err_drop_mw; } if (likely(wqe->wr.wr.mw.length)) { - mr = rxe_pool_get_index(&rxe->mr_pool, - wqe->wr.wr.mw.mr_lkey >> 8); + mr = rxe_pool_get_index(&rxe->mr_pool, mr_lkey >> 8); if (unlikely(!mr)) { ret = -EINVAL; goto err_drop_mw; } - if (unlikely(mr->ibmr.lkey != wqe->wr.wr.mw.mr_lkey)) { + if (unlikely(mr->lkey != mr_lkey)) { ret = -EINVAL; goto err_drop_mr; } @@ -227,7 +223,7 @@ int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe) mr = NULL; } - spin_lock_irqsave(&mw->lock, flags); + spin_lock_bh(&mw->lock); ret = rxe_check_bind_mw(qp, wqe, mw, mr); if (ret) @@ -235,7 +231,7 @@ int rxe_bind_mw(struct rxe_qp *qp, struct rxe_send_wqe *wqe) rxe_do_bind_mw(qp, wqe, mw, mr); err_unlock: - spin_unlock_irqrestore(&mw->lock, flags); + spin_unlock_bh(&mw->lock); err_drop_mr: if (mr) rxe_drop_ref(mr); @@ -282,7 +278,6 @@ static void rxe_do_invalidate_mw(struct rxe_mw *mw) int rxe_invalidate_mw(struct rxe_qp *qp, u32 rkey) { struct rxe_dev *rxe = to_rdev(qp->ibqp.device); - unsigned long flags; struct rxe_mw *mw; int ret; @@ -292,12 +287,12 @@ int rxe_invalidate_mw(struct rxe_qp *qp, u32 rkey) goto err; } - if (rkey != mw->ibmw.rkey) { + if (rkey != mw->rkey) { ret = -EINVAL; goto err_drop_ref; } - spin_lock_irqsave(&mw->lock, flags); + spin_lock_bh(&mw->lock); ret = rxe_check_invalidate_mw(qp, mw); if (ret) @@ -305,7 +300,7 @@ int rxe_invalidate_mw(struct rxe_qp *qp, u32 rkey) rxe_do_invalidate_mw(mw); err_unlock: - spin_unlock_irqrestore(&mw->lock, flags); + spin_unlock_bh(&mw->lock); err_drop_ref: rxe_drop_ref(mw); err: @@ -323,7 +318,7 @@ struct rxe_mw *rxe_lookup_mw(struct rxe_qp *qp, int access, u32 rkey) if (!mw) return NULL; - if (unlikely((rxe_mw_rkey(mw) != rkey) || rxe_mw_pd(mw) != pd || + if (unlikely((mw->rkey != rkey) || rxe_mw_pd(mw) != pd || (mw->ibmw.type == IB_MW_TYPE_2 && mw->qp != qp) || (mw->length == 0) || (access && !(access & mw->access)) || @@ -335,9 +330,9 @@ struct rxe_mw *rxe_lookup_mw(struct rxe_qp *qp, int access, u32 rkey) return mw; } -void rxe_mw_cleanup(struct rxe_pool_entry *elem) +void rxe_mw_cleanup(struct rxe_pool_elem *elem) { - struct rxe_mw *mw = container_of(elem, typeof(*mw), pelem); + struct rxe_mw *mw = container_of(elem, typeof(*mw), elem); rxe_drop_index(mw); } diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c index 2cb810cb890a..be72bdbfb4ba 100644 --- a/drivers/infiniband/sw/rxe/rxe_net.c +++ b/drivers/infiniband/sw/rxe/rxe_net.c @@ -22,24 +22,20 @@ static struct rxe_recv_sockets recv_sockets; int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid) { - int err; unsigned char ll_addr[ETH_ALEN]; ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr); - err = dev_mc_add(rxe->ndev, ll_addr); - return err; + return dev_mc_add(rxe->ndev, ll_addr); } int rxe_mcast_delete(struct rxe_dev *rxe, union ib_gid *mgid) { - int err; unsigned char ll_addr[ETH_ALEN]; ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr); - err = dev_mc_del(rxe->ndev, ll_addr); - return err; + return dev_mc_del(rxe->ndev, ll_addr); } static struct dst_entry *rxe_find_route4(struct net_device *ndev, @@ -444,7 +440,6 @@ int rxe_xmit_packet(struct rxe_qp *qp, struct rxe_pkt_info *pkt, else err = rxe_send(skb, pkt); if (err) { - rxe->xmit_errors++; rxe_counter_inc(rxe, RXE_CNT_SEND_ERR); return err; } diff --git a/drivers/infiniband/sw/rxe/rxe_opcode.c b/drivers/infiniband/sw/rxe/rxe_opcode.c index 3ef5a10a6efd..df596ba7527d 100644 --- a/drivers/infiniband/sw/rxe/rxe_opcode.c +++ b/drivers/infiniband/sw/rxe/rxe_opcode.c @@ -108,8 +108,8 @@ struct rxe_wr_opcode_info rxe_wr_opcode_info[] = { struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = { [IB_OPCODE_RC_SEND_FIRST] = { .name = "IB_OPCODE_RC_SEND_FIRST", - .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_RWR_MASK - | RXE_SEND_MASK | RXE_START_MASK, + .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_RWR_MASK | + RXE_SEND_MASK | RXE_START_MASK, .length = RXE_BTH_BYTES, .offset = { [RXE_BTH] = 0, @@ -117,9 +117,9 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = { } }, [IB_OPCODE_RC_SEND_MIDDLE] = { - .name = "IB_OPCODE_RC_SEND_MIDDLE]", - .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_SEND_MASK - | RXE_MIDDLE_MASK, + .name = "IB_OPCODE_RC_SEND_MIDDLE", + .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_SEND_MASK | + RXE_MIDDLE_MASK, .length = RXE_BTH_BYTES, .offset = { [RXE_BTH] = 0, @@ -128,8 +128,8 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = { }, [IB_OPCODE_RC_SEND_LAST] = { .name = "IB_OPCODE_RC_SEND_LAST", - .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK - | RXE_SEND_MASK | RXE_END_MASK, + .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK | + RXE_SEND_MASK | RXE_END_MASK, .length = RXE_BTH_BYTES, .offset = { [RXE_BTH] = 0, @@ -138,21 +138,21 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = { }, [IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE] = { .name = "IB_OPCODE_RC_SEND_LAST_WITH_IMMEDIATE", - .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK - | RXE_COMP_MASK | RXE_SEND_MASK | RXE_END_MASK, + .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK | + RXE_COMP_MASK | RXE_SEND_MASK | RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES, .offset = { [RXE_BTH] = 0, [RXE_IMMDT] = RXE_BTH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_IMMDT_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_IMMDT_BYTES, } }, [IB_OPCODE_RC_SEND_ONLY] = { .name = "IB_OPCODE_RC_SEND_ONLY", - .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK - | RXE_RWR_MASK | RXE_SEND_MASK - | RXE_START_MASK | RXE_END_MASK, + .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK | + RXE_RWR_MASK | RXE_SEND_MASK | + RXE_START_MASK | RXE_END_MASK, .length = RXE_BTH_BYTES, .offset = { [RXE_BTH] = 0, @@ -161,33 +161,33 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = { }, [IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE] = { .name = "IB_OPCODE_RC_SEND_ONLY_WITH_IMMEDIATE", - .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK - | RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK - | RXE_START_MASK | RXE_END_MASK, + .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK | + RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK | + RXE_START_MASK | RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES, .offset = { [RXE_BTH] = 0, [RXE_IMMDT] = RXE_BTH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_IMMDT_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_IMMDT_BYTES, } }, [IB_OPCODE_RC_RDMA_WRITE_FIRST] = { .name = "IB_OPCODE_RC_RDMA_WRITE_FIRST", - .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK - | RXE_WRITE_MASK | RXE_START_MASK, + .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK | + RXE_WRITE_MASK | RXE_START_MASK, .length = RXE_BTH_BYTES + RXE_RETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RETH] = RXE_BTH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RETH_BYTES, } }, [IB_OPCODE_RC_RDMA_WRITE_MIDDLE] = { .name = "IB_OPCODE_RC_RDMA_WRITE_MIDDLE", - .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK - | RXE_MIDDLE_MASK, + .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK | + RXE_MIDDLE_MASK, .length = RXE_BTH_BYTES, .offset = { [RXE_BTH] = 0, @@ -196,8 +196,8 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = { }, [IB_OPCODE_RC_RDMA_WRITE_LAST] = { .name = "IB_OPCODE_RC_RDMA_WRITE_LAST", - .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK - | RXE_END_MASK, + .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK | + RXE_END_MASK, .length = RXE_BTH_BYTES, .offset = { [RXE_BTH] = 0, @@ -206,69 +206,69 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = { }, [IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = { .name = "IB_OPCODE_RC_RDMA_WRITE_LAST_WITH_IMMEDIATE", - .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK - | RXE_WRITE_MASK | RXE_COMP_MASK | RXE_RWR_MASK - | RXE_END_MASK, + .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK | + RXE_WRITE_MASK | RXE_COMP_MASK | RXE_RWR_MASK | + RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES, .offset = { [RXE_BTH] = 0, [RXE_IMMDT] = RXE_BTH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_IMMDT_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_IMMDT_BYTES, } }, [IB_OPCODE_RC_RDMA_WRITE_ONLY] = { .name = "IB_OPCODE_RC_RDMA_WRITE_ONLY", - .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK - | RXE_WRITE_MASK | RXE_START_MASK - | RXE_END_MASK, + .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK | + RXE_WRITE_MASK | RXE_START_MASK | + RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_RETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RETH] = RXE_BTH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RETH_BYTES, } }, [IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = { .name = "IB_OPCODE_RC_RDMA_WRITE_ONLY_WITH_IMMEDIATE", - .mask = RXE_RETH_MASK | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK - | RXE_REQ_MASK | RXE_WRITE_MASK - | RXE_COMP_MASK | RXE_RWR_MASK - | RXE_START_MASK | RXE_END_MASK, + .mask = RXE_RETH_MASK | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | + RXE_REQ_MASK | RXE_WRITE_MASK | + RXE_COMP_MASK | RXE_RWR_MASK | + RXE_START_MASK | RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_RETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RETH] = RXE_BTH_BYTES, - [RXE_IMMDT] = RXE_BTH_BYTES - + RXE_RETH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RETH_BYTES - + RXE_IMMDT_BYTES, + [RXE_IMMDT] = RXE_BTH_BYTES + + RXE_RETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RETH_BYTES + + RXE_IMMDT_BYTES, } }, [IB_OPCODE_RC_RDMA_READ_REQUEST] = { .name = "IB_OPCODE_RC_RDMA_READ_REQUEST", - .mask = RXE_RETH_MASK | RXE_REQ_MASK | RXE_READ_MASK - | RXE_START_MASK | RXE_END_MASK, + .mask = RXE_RETH_MASK | RXE_REQ_MASK | RXE_READ_MASK | + RXE_START_MASK | RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_RETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RETH] = RXE_BTH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RETH_BYTES, } }, [IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST] = { .name = "IB_OPCODE_RC_RDMA_READ_RESPONSE_FIRST", - .mask = RXE_AETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK - | RXE_START_MASK, + .mask = RXE_AETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK | + RXE_START_MASK, .length = RXE_BTH_BYTES + RXE_AETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_AETH] = RXE_BTH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_AETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_AETH_BYTES, } }, [IB_OPCODE_RC_RDMA_READ_RESPONSE_MIDDLE] = { @@ -282,109 +282,110 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = { }, [IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST] = { .name = "IB_OPCODE_RC_RDMA_READ_RESPONSE_LAST", - .mask = RXE_AETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK - | RXE_END_MASK, + .mask = RXE_AETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK | + RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_AETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_AETH] = RXE_BTH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_AETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_AETH_BYTES, } }, [IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY] = { .name = "IB_OPCODE_RC_RDMA_READ_RESPONSE_ONLY", - .mask = RXE_AETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK - | RXE_START_MASK | RXE_END_MASK, + .mask = RXE_AETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK | + RXE_START_MASK | RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_AETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_AETH] = RXE_BTH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_AETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_AETH_BYTES, } }, [IB_OPCODE_RC_ACKNOWLEDGE] = { .name = "IB_OPCODE_RC_ACKNOWLEDGE", - .mask = RXE_AETH_MASK | RXE_ACK_MASK | RXE_START_MASK - | RXE_END_MASK, + .mask = RXE_AETH_MASK | RXE_ACK_MASK | RXE_START_MASK | + RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_AETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_AETH] = RXE_BTH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_AETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_AETH_BYTES, } }, [IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE] = { .name = "IB_OPCODE_RC_ATOMIC_ACKNOWLEDGE", - .mask = RXE_AETH_MASK | RXE_ATMACK_MASK | RXE_ACK_MASK - | RXE_START_MASK | RXE_END_MASK, + .mask = RXE_AETH_MASK | RXE_ATMACK_MASK | RXE_ACK_MASK | + RXE_START_MASK | RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_ATMACK_BYTES + RXE_AETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_AETH] = RXE_BTH_BYTES, - [RXE_ATMACK] = RXE_BTH_BYTES - + RXE_AETH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_ATMACK_BYTES + RXE_AETH_BYTES, + [RXE_ATMACK] = RXE_BTH_BYTES + + RXE_AETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_ATMACK_BYTES + + RXE_AETH_BYTES, } }, [IB_OPCODE_RC_COMPARE_SWAP] = { .name = "IB_OPCODE_RC_COMPARE_SWAP", - .mask = RXE_ATMETH_MASK | RXE_REQ_MASK | RXE_ATOMIC_MASK - | RXE_START_MASK | RXE_END_MASK, + .mask = RXE_ATMETH_MASK | RXE_REQ_MASK | RXE_ATOMIC_MASK | + RXE_START_MASK | RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_ATMETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_ATMETH] = RXE_BTH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_ATMETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_ATMETH_BYTES, } }, [IB_OPCODE_RC_FETCH_ADD] = { .name = "IB_OPCODE_RC_FETCH_ADD", - .mask = RXE_ATMETH_MASK | RXE_REQ_MASK | RXE_ATOMIC_MASK - | RXE_START_MASK | RXE_END_MASK, + .mask = RXE_ATMETH_MASK | RXE_REQ_MASK | RXE_ATOMIC_MASK | + RXE_START_MASK | RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_ATMETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_ATMETH] = RXE_BTH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_ATMETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_ATMETH_BYTES, } }, [IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE] = { .name = "IB_OPCODE_RC_SEND_LAST_WITH_INVALIDATE", - .mask = RXE_IETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK - | RXE_COMP_MASK | RXE_SEND_MASK | RXE_END_MASK, + .mask = RXE_IETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK | + RXE_COMP_MASK | RXE_SEND_MASK | RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_IETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_IETH] = RXE_BTH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_IETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_IETH_BYTES, } }, [IB_OPCODE_RC_SEND_ONLY_WITH_INVALIDATE] = { .name = "IB_OPCODE_RC_SEND_ONLY_INV", - .mask = RXE_IETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK - | RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK - | RXE_END_MASK | RXE_START_MASK, + .mask = RXE_IETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK | + RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK | + RXE_END_MASK | RXE_START_MASK, .length = RXE_BTH_BYTES + RXE_IETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_IETH] = RXE_BTH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_IETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_IETH_BYTES, } }, /* UC */ [IB_OPCODE_UC_SEND_FIRST] = { .name = "IB_OPCODE_UC_SEND_FIRST", - .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_RWR_MASK - | RXE_SEND_MASK | RXE_START_MASK, + .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_RWR_MASK | + RXE_SEND_MASK | RXE_START_MASK, .length = RXE_BTH_BYTES, .offset = { [RXE_BTH] = 0, @@ -393,8 +394,8 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = { }, [IB_OPCODE_UC_SEND_MIDDLE] = { .name = "IB_OPCODE_UC_SEND_MIDDLE", - .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_SEND_MASK - | RXE_MIDDLE_MASK, + .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_SEND_MASK | + RXE_MIDDLE_MASK, .length = RXE_BTH_BYTES, .offset = { [RXE_BTH] = 0, @@ -403,8 +404,8 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = { }, [IB_OPCODE_UC_SEND_LAST] = { .name = "IB_OPCODE_UC_SEND_LAST", - .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK - | RXE_SEND_MASK | RXE_END_MASK, + .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK | + RXE_SEND_MASK | RXE_END_MASK, .length = RXE_BTH_BYTES, .offset = { [RXE_BTH] = 0, @@ -413,21 +414,21 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = { }, [IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE] = { .name = "IB_OPCODE_UC_SEND_LAST_WITH_IMMEDIATE", - .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK - | RXE_COMP_MASK | RXE_SEND_MASK | RXE_END_MASK, + .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK | + RXE_COMP_MASK | RXE_SEND_MASK | RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES, .offset = { [RXE_BTH] = 0, [RXE_IMMDT] = RXE_BTH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_IMMDT_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_IMMDT_BYTES, } }, [IB_OPCODE_UC_SEND_ONLY] = { .name = "IB_OPCODE_UC_SEND_ONLY", - .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK - | RXE_RWR_MASK | RXE_SEND_MASK - | RXE_START_MASK | RXE_END_MASK, + .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_COMP_MASK | + RXE_RWR_MASK | RXE_SEND_MASK | + RXE_START_MASK | RXE_END_MASK, .length = RXE_BTH_BYTES, .offset = { [RXE_BTH] = 0, @@ -436,33 +437,33 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = { }, [IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE] = { .name = "IB_OPCODE_UC_SEND_ONLY_WITH_IMMEDIATE", - .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK - | RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK - | RXE_START_MASK | RXE_END_MASK, + .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK | + RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK | + RXE_START_MASK | RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES, .offset = { [RXE_BTH] = 0, [RXE_IMMDT] = RXE_BTH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_IMMDT_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_IMMDT_BYTES, } }, [IB_OPCODE_UC_RDMA_WRITE_FIRST] = { .name = "IB_OPCODE_UC_RDMA_WRITE_FIRST", - .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK - | RXE_WRITE_MASK | RXE_START_MASK, + .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK | + RXE_WRITE_MASK | RXE_START_MASK, .length = RXE_BTH_BYTES + RXE_RETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RETH] = RXE_BTH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RETH_BYTES, } }, [IB_OPCODE_UC_RDMA_WRITE_MIDDLE] = { .name = "IB_OPCODE_UC_RDMA_WRITE_MIDDLE", - .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK - | RXE_MIDDLE_MASK, + .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK | + RXE_MIDDLE_MASK, .length = RXE_BTH_BYTES, .offset = { [RXE_BTH] = 0, @@ -471,8 +472,8 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = { }, [IB_OPCODE_UC_RDMA_WRITE_LAST] = { .name = "IB_OPCODE_UC_RDMA_WRITE_LAST", - .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK - | RXE_END_MASK, + .mask = RXE_PAYLOAD_MASK | RXE_REQ_MASK | RXE_WRITE_MASK | + RXE_END_MASK, .length = RXE_BTH_BYTES, .offset = { [RXE_BTH] = 0, @@ -481,460 +482,460 @@ struct rxe_opcode_info rxe_opcode[RXE_NUM_OPCODE] = { }, [IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE] = { .name = "IB_OPCODE_UC_RDMA_WRITE_LAST_WITH_IMMEDIATE", - .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK - | RXE_WRITE_MASK | RXE_COMP_MASK | RXE_RWR_MASK - | RXE_END_MASK, + .mask = RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK | + RXE_WRITE_MASK | RXE_COMP_MASK | RXE_RWR_MASK | + RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES, .offset = { [RXE_BTH] = 0, [RXE_IMMDT] = RXE_BTH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_IMMDT_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_IMMDT_BYTES, } }, [IB_OPCODE_UC_RDMA_WRITE_ONLY] = { .name = "IB_OPCODE_UC_RDMA_WRITE_ONLY", - .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK - | RXE_WRITE_MASK | RXE_START_MASK - | RXE_END_MASK, + .mask = RXE_RETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK | + RXE_WRITE_MASK | RXE_START_MASK | + RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_RETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RETH] = RXE_BTH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RETH_BYTES, } }, [IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = { .name = "IB_OPCODE_UC_RDMA_WRITE_ONLY_WITH_IMMEDIATE", - .mask = RXE_RETH_MASK | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK - | RXE_REQ_MASK | RXE_WRITE_MASK - | RXE_COMP_MASK | RXE_RWR_MASK - | RXE_START_MASK | RXE_END_MASK, + .mask = RXE_RETH_MASK | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | + RXE_REQ_MASK | RXE_WRITE_MASK | + RXE_COMP_MASK | RXE_RWR_MASK | + RXE_START_MASK | RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_RETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RETH] = RXE_BTH_BYTES, - [RXE_IMMDT] = RXE_BTH_BYTES - + RXE_RETH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RETH_BYTES - + RXE_IMMDT_BYTES, + [RXE_IMMDT] = RXE_BTH_BYTES + + RXE_RETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RETH_BYTES + + RXE_IMMDT_BYTES, } }, /* RD */ [IB_OPCODE_RD_SEND_FIRST] = { .name = "IB_OPCODE_RD_SEND_FIRST", - .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK - | RXE_REQ_MASK | RXE_RWR_MASK | RXE_SEND_MASK - | RXE_START_MASK, + .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK | + RXE_REQ_MASK | RXE_RWR_MASK | RXE_SEND_MASK | + RXE_START_MASK, .length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RDETH] = RXE_BTH_BYTES, - [RXE_DETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_DETH_BYTES, + [RXE_DETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_DETH_BYTES, } }, [IB_OPCODE_RD_SEND_MIDDLE] = { .name = "IB_OPCODE_RD_SEND_MIDDLE", - .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK - | RXE_REQ_MASK | RXE_SEND_MASK - | RXE_MIDDLE_MASK, + .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK | + RXE_REQ_MASK | RXE_SEND_MASK | + RXE_MIDDLE_MASK, .length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RDETH] = RXE_BTH_BYTES, - [RXE_DETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_DETH_BYTES, + [RXE_DETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_DETH_BYTES, } }, [IB_OPCODE_RD_SEND_LAST] = { .name = "IB_OPCODE_RD_SEND_LAST", - .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK - | RXE_REQ_MASK | RXE_COMP_MASK | RXE_SEND_MASK - | RXE_END_MASK, + .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK | + RXE_REQ_MASK | RXE_COMP_MASK | RXE_SEND_MASK | + RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RDETH] = RXE_BTH_BYTES, - [RXE_DETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_DETH_BYTES, + [RXE_DETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_DETH_BYTES, } }, [IB_OPCODE_RD_SEND_LAST_WITH_IMMEDIATE] = { .name = "IB_OPCODE_RD_SEND_LAST_WITH_IMMEDIATE", - .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_IMMDT_MASK - | RXE_PAYLOAD_MASK | RXE_REQ_MASK - | RXE_COMP_MASK | RXE_SEND_MASK - | RXE_END_MASK, - .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES - + RXE_RDETH_BYTES, + .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_IMMDT_MASK | + RXE_PAYLOAD_MASK | RXE_REQ_MASK | + RXE_COMP_MASK | RXE_SEND_MASK | + RXE_END_MASK, + .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES + + RXE_RDETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RDETH] = RXE_BTH_BYTES, - [RXE_DETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES, - [RXE_IMMDT] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_DETH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_DETH_BYTES - + RXE_IMMDT_BYTES, + [RXE_DETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES, + [RXE_IMMDT] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_DETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_DETH_BYTES + + RXE_IMMDT_BYTES, } }, [IB_OPCODE_RD_SEND_ONLY] = { .name = "IB_OPCODE_RD_SEND_ONLY", - .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK - | RXE_REQ_MASK | RXE_COMP_MASK | RXE_RWR_MASK - | RXE_SEND_MASK | RXE_START_MASK | RXE_END_MASK, + .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK | + RXE_REQ_MASK | RXE_COMP_MASK | RXE_RWR_MASK | + RXE_SEND_MASK | RXE_START_MASK | RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RDETH] = RXE_BTH_BYTES, - [RXE_DETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_DETH_BYTES, + [RXE_DETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_DETH_BYTES, } }, [IB_OPCODE_RD_SEND_ONLY_WITH_IMMEDIATE] = { .name = "IB_OPCODE_RD_SEND_ONLY_WITH_IMMEDIATE", - .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_IMMDT_MASK - | RXE_PAYLOAD_MASK | RXE_REQ_MASK - | RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK - | RXE_START_MASK | RXE_END_MASK, - .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES - + RXE_RDETH_BYTES, + .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_IMMDT_MASK | + RXE_PAYLOAD_MASK | RXE_REQ_MASK | + RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK | + RXE_START_MASK | RXE_END_MASK, + .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES + + RXE_RDETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RDETH] = RXE_BTH_BYTES, - [RXE_DETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES, - [RXE_IMMDT] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_DETH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_DETH_BYTES - + RXE_IMMDT_BYTES, + [RXE_DETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES, + [RXE_IMMDT] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_DETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_DETH_BYTES + + RXE_IMMDT_BYTES, } }, [IB_OPCODE_RD_RDMA_WRITE_FIRST] = { .name = "IB_OPCODE_RD_RDMA_WRITE_FIRST", - .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK - | RXE_PAYLOAD_MASK | RXE_REQ_MASK - | RXE_WRITE_MASK | RXE_START_MASK, - .length = RXE_BTH_BYTES + RXE_RETH_BYTES + RXE_DETH_BYTES - + RXE_RDETH_BYTES, + .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK | + RXE_PAYLOAD_MASK | RXE_REQ_MASK | + RXE_WRITE_MASK | RXE_START_MASK, + .length = RXE_BTH_BYTES + RXE_RETH_BYTES + RXE_DETH_BYTES + + RXE_RDETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RDETH] = RXE_BTH_BYTES, - [RXE_DETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES, - [RXE_RETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_DETH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_DETH_BYTES - + RXE_RETH_BYTES, + [RXE_DETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES, + [RXE_RETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_DETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_DETH_BYTES + + RXE_RETH_BYTES, } }, [IB_OPCODE_RD_RDMA_WRITE_MIDDLE] = { .name = "IB_OPCODE_RD_RDMA_WRITE_MIDDLE", - .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK - | RXE_REQ_MASK | RXE_WRITE_MASK - | RXE_MIDDLE_MASK, + .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK | + RXE_REQ_MASK | RXE_WRITE_MASK | + RXE_MIDDLE_MASK, .length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RDETH] = RXE_BTH_BYTES, - [RXE_DETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_DETH_BYTES, + [RXE_DETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_DETH_BYTES, } }, [IB_OPCODE_RD_RDMA_WRITE_LAST] = { .name = "IB_OPCODE_RD_RDMA_WRITE_LAST", - .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK - | RXE_REQ_MASK | RXE_WRITE_MASK - | RXE_END_MASK, + .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_PAYLOAD_MASK | + RXE_REQ_MASK | RXE_WRITE_MASK | + RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_DETH_BYTES + RXE_RDETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RDETH] = RXE_BTH_BYTES, - [RXE_DETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_DETH_BYTES, + [RXE_DETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_DETH_BYTES, } }, [IB_OPCODE_RD_RDMA_WRITE_LAST_WITH_IMMEDIATE] = { .name = "IB_OPCODE_RD_RDMA_WRITE_LAST_WITH_IMMEDIATE", - .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_IMMDT_MASK - | RXE_PAYLOAD_MASK | RXE_REQ_MASK - | RXE_WRITE_MASK | RXE_COMP_MASK | RXE_RWR_MASK - | RXE_END_MASK, - .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES - + RXE_RDETH_BYTES, + .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_IMMDT_MASK | + RXE_PAYLOAD_MASK | RXE_REQ_MASK | + RXE_WRITE_MASK | RXE_COMP_MASK | RXE_RWR_MASK | + RXE_END_MASK, + .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES + + RXE_RDETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RDETH] = RXE_BTH_BYTES, - [RXE_DETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES, - [RXE_IMMDT] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_DETH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_DETH_BYTES - + RXE_IMMDT_BYTES, + [RXE_DETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES, + [RXE_IMMDT] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_DETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_DETH_BYTES + + RXE_IMMDT_BYTES, } }, [IB_OPCODE_RD_RDMA_WRITE_ONLY] = { .name = "IB_OPCODE_RD_RDMA_WRITE_ONLY", - .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK - | RXE_PAYLOAD_MASK | RXE_REQ_MASK - | RXE_WRITE_MASK | RXE_START_MASK - | RXE_END_MASK, - .length = RXE_BTH_BYTES + RXE_RETH_BYTES + RXE_DETH_BYTES - + RXE_RDETH_BYTES, + .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK | + RXE_PAYLOAD_MASK | RXE_REQ_MASK | + RXE_WRITE_MASK | RXE_START_MASK | + RXE_END_MASK, + .length = RXE_BTH_BYTES + RXE_RETH_BYTES + RXE_DETH_BYTES + + RXE_RDETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RDETH] = RXE_BTH_BYTES, - [RXE_DETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES, - [RXE_RETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_DETH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_DETH_BYTES - + RXE_RETH_BYTES, + [RXE_DETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES, + [RXE_RETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_DETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_DETH_BYTES + + RXE_RETH_BYTES, } }, [IB_OPCODE_RD_RDMA_WRITE_ONLY_WITH_IMMEDIATE] = { .name = "IB_OPCODE_RD_RDMA_WRITE_ONLY_WITH_IMMEDIATE", - .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK - | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK - | RXE_REQ_MASK | RXE_WRITE_MASK - | RXE_COMP_MASK | RXE_RWR_MASK - | RXE_START_MASK | RXE_END_MASK, - .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_RETH_BYTES - + RXE_DETH_BYTES + RXE_RDETH_BYTES, + .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK | + RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | + RXE_REQ_MASK | RXE_WRITE_MASK | + RXE_COMP_MASK | RXE_RWR_MASK | + RXE_START_MASK | RXE_END_MASK, + .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_RETH_BYTES + + RXE_DETH_BYTES + RXE_RDETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RDETH] = RXE_BTH_BYTES, - [RXE_DETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES, - [RXE_RETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_DETH_BYTES, - [RXE_IMMDT] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_DETH_BYTES - + RXE_RETH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_DETH_BYTES - + RXE_RETH_BYTES - + RXE_IMMDT_BYTES, + [RXE_DETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES, + [RXE_RETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_DETH_BYTES, + [RXE_IMMDT] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_DETH_BYTES + + RXE_RETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_DETH_BYTES + + RXE_RETH_BYTES + + RXE_IMMDT_BYTES, } }, [IB_OPCODE_RD_RDMA_READ_REQUEST] = { .name = "IB_OPCODE_RD_RDMA_READ_REQUEST", - .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK - | RXE_REQ_MASK | RXE_READ_MASK - | RXE_START_MASK | RXE_END_MASK, - .length = RXE_BTH_BYTES + RXE_RETH_BYTES + RXE_DETH_BYTES - + RXE_RDETH_BYTES, + .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_RETH_MASK | + RXE_REQ_MASK | RXE_READ_MASK | + RXE_START_MASK | RXE_END_MASK, + .length = RXE_BTH_BYTES + RXE_RETH_BYTES + RXE_DETH_BYTES + + RXE_RDETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RDETH] = RXE_BTH_BYTES, - [RXE_DETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES, - [RXE_RETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_DETH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RETH_BYTES - + RXE_DETH_BYTES - + RXE_RDETH_BYTES, + [RXE_DETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES, + [RXE_RETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_DETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RETH_BYTES + + RXE_DETH_BYTES + + RXE_RDETH_BYTES, } }, [IB_OPCODE_RD_RDMA_READ_RESPONSE_FIRST] = { .name = "IB_OPCODE_RD_RDMA_READ_RESPONSE_FIRST", - .mask = RXE_RDETH_MASK | RXE_AETH_MASK - | RXE_PAYLOAD_MASK | RXE_ACK_MASK - | RXE_START_MASK, + .mask = RXE_RDETH_MASK | RXE_AETH_MASK | + RXE_PAYLOAD_MASK | RXE_ACK_MASK | + RXE_START_MASK, .length = RXE_BTH_BYTES + RXE_AETH_BYTES + RXE_RDETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RDETH] = RXE_BTH_BYTES, - [RXE_AETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_AETH_BYTES, + [RXE_AETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_AETH_BYTES, } }, [IB_OPCODE_RD_RDMA_READ_RESPONSE_MIDDLE] = { .name = "IB_OPCODE_RD_RDMA_READ_RESPONSE_MIDDLE", - .mask = RXE_RDETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK - | RXE_MIDDLE_MASK, + .mask = RXE_RDETH_MASK | RXE_PAYLOAD_MASK | RXE_ACK_MASK | + RXE_MIDDLE_MASK, .length = RXE_BTH_BYTES + RXE_RDETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RDETH] = RXE_BTH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RDETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RDETH_BYTES, } }, [IB_OPCODE_RD_RDMA_READ_RESPONSE_LAST] = { .name = "IB_OPCODE_RD_RDMA_READ_RESPONSE_LAST", - .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_PAYLOAD_MASK - | RXE_ACK_MASK | RXE_END_MASK, + .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_PAYLOAD_MASK | + RXE_ACK_MASK | RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_AETH_BYTES + RXE_RDETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RDETH] = RXE_BTH_BYTES, - [RXE_AETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_AETH_BYTES, + [RXE_AETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_AETH_BYTES, } }, [IB_OPCODE_RD_RDMA_READ_RESPONSE_ONLY] = { .name = "IB_OPCODE_RD_RDMA_READ_RESPONSE_ONLY", - .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_PAYLOAD_MASK - | RXE_ACK_MASK | RXE_START_MASK | RXE_END_MASK, + .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_PAYLOAD_MASK | + RXE_ACK_MASK | RXE_START_MASK | RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_AETH_BYTES + RXE_RDETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RDETH] = RXE_BTH_BYTES, - [RXE_AETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_AETH_BYTES, + [RXE_AETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_AETH_BYTES, } }, [IB_OPCODE_RD_ACKNOWLEDGE] = { .name = "IB_OPCODE_RD_ACKNOWLEDGE", - .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_ACK_MASK - | RXE_START_MASK | RXE_END_MASK, + .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_ACK_MASK | + RXE_START_MASK | RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_AETH_BYTES + RXE_RDETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RDETH] = RXE_BTH_BYTES, - [RXE_AETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES, + [RXE_AETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES, } }, [IB_OPCODE_RD_ATOMIC_ACKNOWLEDGE] = { .name = "IB_OPCODE_RD_ATOMIC_ACKNOWLEDGE", - .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_ATMACK_MASK - | RXE_ACK_MASK | RXE_START_MASK | RXE_END_MASK, - .length = RXE_BTH_BYTES + RXE_ATMACK_BYTES + RXE_AETH_BYTES - + RXE_RDETH_BYTES, + .mask = RXE_RDETH_MASK | RXE_AETH_MASK | RXE_ATMACK_MASK | + RXE_ACK_MASK | RXE_START_MASK | RXE_END_MASK, + .length = RXE_BTH_BYTES + RXE_ATMACK_BYTES + RXE_AETH_BYTES + + RXE_RDETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RDETH] = RXE_BTH_BYTES, - [RXE_AETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES, - [RXE_ATMACK] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_AETH_BYTES, + [RXE_AETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES, + [RXE_ATMACK] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_AETH_BYTES, } }, [IB_OPCODE_RD_COMPARE_SWAP] = { .name = "RD_COMPARE_SWAP", - .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_ATMETH_MASK - | RXE_REQ_MASK | RXE_ATOMIC_MASK - | RXE_START_MASK | RXE_END_MASK, - .length = RXE_BTH_BYTES + RXE_ATMETH_BYTES + RXE_DETH_BYTES - + RXE_RDETH_BYTES, + .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_ATMETH_MASK | + RXE_REQ_MASK | RXE_ATOMIC_MASK | + RXE_START_MASK | RXE_END_MASK, + .length = RXE_BTH_BYTES + RXE_ATMETH_BYTES + RXE_DETH_BYTES + + RXE_RDETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RDETH] = RXE_BTH_BYTES, - [RXE_DETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES, - [RXE_ATMETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_DETH_BYTES, + [RXE_DETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES, + [RXE_ATMETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_DETH_BYTES, [RXE_PAYLOAD] = RXE_BTH_BYTES + - + RXE_ATMETH_BYTES - + RXE_DETH_BYTES + - + RXE_RDETH_BYTES, + RXE_ATMETH_BYTES + + RXE_DETH_BYTES + + RXE_RDETH_BYTES, } }, [IB_OPCODE_RD_FETCH_ADD] = { .name = "IB_OPCODE_RD_FETCH_ADD", - .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_ATMETH_MASK - | RXE_REQ_MASK | RXE_ATOMIC_MASK - | RXE_START_MASK | RXE_END_MASK, - .length = RXE_BTH_BYTES + RXE_ATMETH_BYTES + RXE_DETH_BYTES - + RXE_RDETH_BYTES, + .mask = RXE_RDETH_MASK | RXE_DETH_MASK | RXE_ATMETH_MASK | + RXE_REQ_MASK | RXE_ATOMIC_MASK | + RXE_START_MASK | RXE_END_MASK, + .length = RXE_BTH_BYTES + RXE_ATMETH_BYTES + RXE_DETH_BYTES + + RXE_RDETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_RDETH] = RXE_BTH_BYTES, - [RXE_DETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES, - [RXE_ATMETH] = RXE_BTH_BYTES - + RXE_RDETH_BYTES - + RXE_DETH_BYTES, + [RXE_DETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES, + [RXE_ATMETH] = RXE_BTH_BYTES + + RXE_RDETH_BYTES + + RXE_DETH_BYTES, [RXE_PAYLOAD] = RXE_BTH_BYTES + - + RXE_ATMETH_BYTES - + RXE_DETH_BYTES + - + RXE_RDETH_BYTES, + RXE_ATMETH_BYTES + + RXE_DETH_BYTES + + RXE_RDETH_BYTES, } }, /* UD */ [IB_OPCODE_UD_SEND_ONLY] = { .name = "IB_OPCODE_UD_SEND_ONLY", - .mask = RXE_DETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK - | RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK - | RXE_START_MASK | RXE_END_MASK, + .mask = RXE_DETH_MASK | RXE_PAYLOAD_MASK | RXE_REQ_MASK | + RXE_COMP_MASK | RXE_RWR_MASK | RXE_SEND_MASK | + RXE_START_MASK | RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_DETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_DETH] = RXE_BTH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_DETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_DETH_BYTES, } }, [IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE] = { .name = "IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE", - .mask = RXE_DETH_MASK | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK - | RXE_REQ_MASK | RXE_COMP_MASK | RXE_RWR_MASK - | RXE_SEND_MASK | RXE_START_MASK | RXE_END_MASK, + .mask = RXE_DETH_MASK | RXE_IMMDT_MASK | RXE_PAYLOAD_MASK | + RXE_REQ_MASK | RXE_COMP_MASK | RXE_RWR_MASK | + RXE_SEND_MASK | RXE_START_MASK | RXE_END_MASK, .length = RXE_BTH_BYTES + RXE_IMMDT_BYTES + RXE_DETH_BYTES, .offset = { [RXE_BTH] = 0, [RXE_DETH] = RXE_BTH_BYTES, - [RXE_IMMDT] = RXE_BTH_BYTES - + RXE_DETH_BYTES, - [RXE_PAYLOAD] = RXE_BTH_BYTES - + RXE_DETH_BYTES - + RXE_IMMDT_BYTES, + [RXE_IMMDT] = RXE_BTH_BYTES + + RXE_DETH_BYTES, + [RXE_PAYLOAD] = RXE_BTH_BYTES + + RXE_DETH_BYTES + + RXE_IMMDT_BYTES, } }, diff --git a/drivers/infiniband/sw/rxe/rxe_opcode.h b/drivers/infiniband/sw/rxe/rxe_opcode.h index e02f039b8c44..8f9aaaf260f2 100644 --- a/drivers/infiniband/sw/rxe/rxe_opcode.h +++ b/drivers/infiniband/sw/rxe/rxe_opcode.h @@ -22,7 +22,6 @@ enum rxe_wr_mask { WR_LOCAL_OP_MASK = BIT(5), WR_READ_OR_WRITE_MASK = WR_READ_MASK | WR_WRITE_MASK, - WR_READ_WRITE_OR_SEND_MASK = WR_READ_OR_WRITE_MASK | WR_SEND_MASK, WR_WRITE_OR_SEND_MASK = WR_WRITE_MASK | WR_SEND_MASK, WR_ATOMIC_OR_READ_MASK = WR_ATOMIC_MASK | WR_READ_MASK, }; @@ -82,8 +81,9 @@ enum rxe_hdr_mask { RXE_LOOPBACK_MASK = BIT(NUM_HDR_TYPES + 12), - RXE_READ_OR_ATOMIC = (RXE_READ_MASK | RXE_ATOMIC_MASK), - RXE_WRITE_OR_SEND = (RXE_WRITE_MASK | RXE_SEND_MASK), + RXE_READ_OR_ATOMIC_MASK = (RXE_READ_MASK | RXE_ATOMIC_MASK), + RXE_WRITE_OR_SEND_MASK = (RXE_WRITE_MASK | RXE_SEND_MASK), + RXE_READ_OR_WRITE_MASK = (RXE_READ_MASK | RXE_WRITE_MASK), }; #define OPCODE_NONE (-1) diff --git a/drivers/infiniband/sw/rxe/rxe_param.h b/drivers/infiniband/sw/rxe/rxe_param.h index 742e6ec93686..918270e34a35 100644 --- a/drivers/infiniband/sw/rxe/rxe_param.h +++ b/drivers/infiniband/sw/rxe/rxe_param.h @@ -9,6 +9,8 @@ #include <uapi/rdma/rdma_user_rxe.h> +#define DEFAULT_MAX_VALUE (1 << 20) + static inline enum ib_mtu rxe_mtu_int_to_enum(int mtu) { if (mtu < 256) @@ -37,7 +39,7 @@ static inline enum ib_mtu eth_mtu_int_to_enum(int mtu) enum rxe_device_param { RXE_MAX_MR_SIZE = -1ull, RXE_PAGE_SIZE_CAP = 0xfffff000, - RXE_MAX_QP_WR = 0x4000, + RXE_MAX_QP_WR = DEFAULT_MAX_VALUE, RXE_DEVICE_CAP_FLAGS = IB_DEVICE_BAD_PKEY_CNTR | IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_AUTO_PATH_MIG @@ -58,42 +60,44 @@ enum rxe_device_param { RXE_MAX_INLINE_DATA = RXE_MAX_WQE_SIZE - sizeof(struct rxe_send_wqe), RXE_MAX_SGE_RD = 32, - RXE_MAX_CQ = 16384, + RXE_MAX_CQ = DEFAULT_MAX_VALUE, RXE_MAX_LOG_CQE = 15, - RXE_MAX_PD = 0x7ffc, + RXE_MAX_PD = DEFAULT_MAX_VALUE, RXE_MAX_QP_RD_ATOM = 128, RXE_MAX_RES_RD_ATOM = 0x3f000, RXE_MAX_QP_INIT_RD_ATOM = 128, RXE_MAX_MCAST_GRP = 8192, RXE_MAX_MCAST_QP_ATTACH = 56, RXE_MAX_TOT_MCAST_QP_ATTACH = 0x70000, - RXE_MAX_AH = 100, - RXE_MAX_SRQ_WR = 0x4000, + RXE_MAX_AH = (1<<15) - 1, /* 32Ki - 1 */ + RXE_MIN_AH_INDEX = 1, + RXE_MAX_AH_INDEX = RXE_MAX_AH, + RXE_MAX_SRQ_WR = DEFAULT_MAX_VALUE, RXE_MIN_SRQ_WR = 1, RXE_MAX_SRQ_SGE = 27, RXE_MIN_SRQ_SGE = 1, RXE_MAX_FMR_PAGE_LIST_LEN = 512, - RXE_MAX_PKEYS = 1, + RXE_MAX_PKEYS = 64, RXE_LOCAL_CA_ACK_DELAY = 15, - RXE_MAX_UCONTEXT = 512, + RXE_MAX_UCONTEXT = DEFAULT_MAX_VALUE, RXE_NUM_PORT = 1, - RXE_MAX_QP = 0x10000, RXE_MIN_QP_INDEX = 16, - RXE_MAX_QP_INDEX = 0x00020000, + RXE_MAX_QP_INDEX = DEFAULT_MAX_VALUE, + RXE_MAX_QP = DEFAULT_MAX_VALUE - RXE_MIN_QP_INDEX, - RXE_MAX_SRQ = 0x00001000, RXE_MIN_SRQ_INDEX = 0x00020001, - RXE_MAX_SRQ_INDEX = 0x00040000, + RXE_MAX_SRQ_INDEX = DEFAULT_MAX_VALUE, + RXE_MAX_SRQ = DEFAULT_MAX_VALUE - RXE_MIN_SRQ_INDEX, - RXE_MAX_MR = 0x00001000, - RXE_MAX_MW = 0x00001000, RXE_MIN_MR_INDEX = 0x00000001, - RXE_MAX_MR_INDEX = 0x00010000, + RXE_MAX_MR_INDEX = DEFAULT_MAX_VALUE, + RXE_MAX_MR = DEFAULT_MAX_VALUE - RXE_MIN_MR_INDEX, RXE_MIN_MW_INDEX = 0x00010001, RXE_MAX_MW_INDEX = 0x00020000, + RXE_MAX_MW = 0x00001000, RXE_MAX_PKT_PER_ACK = 64, @@ -113,7 +117,7 @@ enum rxe_device_param { /* default/initial rxe port parameters */ enum rxe_port_param { RXE_PORT_GID_TBL_LEN = 1024, - RXE_PORT_PORT_CAP_FLAGS = RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP, + RXE_PORT_PORT_CAP_FLAGS = IB_PORT_CM_SUP, RXE_PORT_MAX_MSG_SZ = 0x800000, RXE_PORT_BAD_PKEY_CNTR = 0, RXE_PORT_QKEY_VIOL_CNTR = 0, diff --git a/drivers/infiniband/sw/rxe/rxe_pool.c b/drivers/infiniband/sw/rxe/rxe_pool.c index ffa8420b4765..4cb003885e00 100644 --- a/drivers/infiniband/sw/rxe/rxe_pool.c +++ b/drivers/infiniband/sw/rxe/rxe_pool.c @@ -5,33 +5,44 @@ */ #include "rxe.h" -#include "rxe_loc.h" -/* info about object pools - */ -struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = { +#define RXE_POOL_ALIGN (16) + +static const struct rxe_type_info { + const char *name; + size_t size; + size_t elem_offset; + void (*cleanup)(struct rxe_pool_elem *obj); + enum rxe_pool_flags flags; + u32 min_index; + u32 max_index; + size_t key_offset; + size_t key_size; +} rxe_type_info[RXE_NUM_TYPES] = { [RXE_TYPE_UC] = { .name = "rxe-uc", .size = sizeof(struct rxe_ucontext), - .elem_offset = offsetof(struct rxe_ucontext, pelem), + .elem_offset = offsetof(struct rxe_ucontext, elem), .flags = RXE_POOL_NO_ALLOC, }, [RXE_TYPE_PD] = { .name = "rxe-pd", .size = sizeof(struct rxe_pd), - .elem_offset = offsetof(struct rxe_pd, pelem), + .elem_offset = offsetof(struct rxe_pd, elem), .flags = RXE_POOL_NO_ALLOC, }, [RXE_TYPE_AH] = { .name = "rxe-ah", .size = sizeof(struct rxe_ah), - .elem_offset = offsetof(struct rxe_ah, pelem), - .flags = RXE_POOL_NO_ALLOC, + .elem_offset = offsetof(struct rxe_ah, elem), + .flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC, + .min_index = RXE_MIN_AH_INDEX, + .max_index = RXE_MAX_AH_INDEX, }, [RXE_TYPE_SRQ] = { .name = "rxe-srq", .size = sizeof(struct rxe_srq), - .elem_offset = offsetof(struct rxe_srq, pelem), + .elem_offset = offsetof(struct rxe_srq, elem), .flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC, .min_index = RXE_MIN_SRQ_INDEX, .max_index = RXE_MAX_SRQ_INDEX, @@ -39,7 +50,7 @@ struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = { [RXE_TYPE_QP] = { .name = "rxe-qp", .size = sizeof(struct rxe_qp), - .elem_offset = offsetof(struct rxe_qp, pelem), + .elem_offset = offsetof(struct rxe_qp, elem), .cleanup = rxe_qp_cleanup, .flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC, .min_index = RXE_MIN_QP_INDEX, @@ -48,32 +59,32 @@ struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = { [RXE_TYPE_CQ] = { .name = "rxe-cq", .size = sizeof(struct rxe_cq), - .elem_offset = offsetof(struct rxe_cq, pelem), + .elem_offset = offsetof(struct rxe_cq, elem), .flags = RXE_POOL_NO_ALLOC, .cleanup = rxe_cq_cleanup, }, [RXE_TYPE_MR] = { .name = "rxe-mr", .size = sizeof(struct rxe_mr), - .elem_offset = offsetof(struct rxe_mr, pelem), + .elem_offset = offsetof(struct rxe_mr, elem), .cleanup = rxe_mr_cleanup, .flags = RXE_POOL_INDEX, - .max_index = RXE_MAX_MR_INDEX, .min_index = RXE_MIN_MR_INDEX, + .max_index = RXE_MAX_MR_INDEX, }, [RXE_TYPE_MW] = { .name = "rxe-mw", .size = sizeof(struct rxe_mw), - .elem_offset = offsetof(struct rxe_mw, pelem), + .elem_offset = offsetof(struct rxe_mw, elem), .cleanup = rxe_mw_cleanup, .flags = RXE_POOL_INDEX | RXE_POOL_NO_ALLOC, - .max_index = RXE_MAX_MW_INDEX, .min_index = RXE_MIN_MW_INDEX, + .max_index = RXE_MAX_MW_INDEX, }, [RXE_TYPE_MC_GRP] = { .name = "rxe-mc_grp", .size = sizeof(struct rxe_mc_grp), - .elem_offset = offsetof(struct rxe_mc_grp, pelem), + .elem_offset = offsetof(struct rxe_mc_grp, elem), .cleanup = rxe_mc_cleanup, .flags = RXE_POOL_KEY, .key_offset = offsetof(struct rxe_mc_grp, mgid), @@ -82,19 +93,13 @@ struct rxe_type_info rxe_type_info[RXE_NUM_TYPES] = { [RXE_TYPE_MC_ELEM] = { .name = "rxe-mc_elem", .size = sizeof(struct rxe_mc_elem), - .elem_offset = offsetof(struct rxe_mc_elem, pelem), + .elem_offset = offsetof(struct rxe_mc_elem, elem), }, }; -static inline const char *pool_name(struct rxe_pool *pool) -{ - return rxe_type_info[pool->type].name; -} - static int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min) { int err = 0; - size_t size; if ((max - min + 1) < pool->max_elem) { pr_warn("not enough indices for max_elem\n"); @@ -105,16 +110,12 @@ static int rxe_pool_init_index(struct rxe_pool *pool, u32 max, u32 min) pool->index.max_index = max; pool->index.min_index = min; - size = BITS_TO_LONGS(max - min + 1) * sizeof(long); - pool->index.table = kmalloc(size, GFP_KERNEL); + pool->index.table = bitmap_zalloc(max - min + 1, GFP_KERNEL); if (!pool->index.table) { err = -ENOMEM; goto out; } - pool->index.table_size = size; - bitmap_zero(pool->index.table, max - min + 1); - out: return err; } @@ -125,35 +126,36 @@ int rxe_pool_init( enum rxe_elem_type type, unsigned int max_elem) { + const struct rxe_type_info *info = &rxe_type_info[type]; int err = 0; - size_t size = rxe_type_info[type].size; memset(pool, 0, sizeof(*pool)); pool->rxe = rxe; + pool->name = info->name; pool->type = type; pool->max_elem = max_elem; - pool->elem_size = ALIGN(size, RXE_POOL_ALIGN); - pool->flags = rxe_type_info[type].flags; - pool->index.tree = RB_ROOT; - pool->key.tree = RB_ROOT; - pool->cleanup = rxe_type_info[type].cleanup; + pool->elem_size = ALIGN(info->size, RXE_POOL_ALIGN); + pool->elem_offset = info->elem_offset; + pool->flags = info->flags; + pool->cleanup = info->cleanup; atomic_set(&pool->num_elem, 0); rwlock_init(&pool->pool_lock); - if (rxe_type_info[type].flags & RXE_POOL_INDEX) { - err = rxe_pool_init_index(pool, - rxe_type_info[type].max_index, - rxe_type_info[type].min_index); + if (pool->flags & RXE_POOL_INDEX) { + pool->index.tree = RB_ROOT; + err = rxe_pool_init_index(pool, info->max_index, + info->min_index); if (err) goto out; } - if (rxe_type_info[type].flags & RXE_POOL_KEY) { - pool->key.key_offset = rxe_type_info[type].key_offset; - pool->key.key_size = rxe_type_info[type].key_size; + if (pool->flags & RXE_POOL_KEY) { + pool->key.tree = RB_ROOT; + pool->key.key_offset = info->key_offset; + pool->key.key_size = info->key_size; } out: @@ -164,9 +166,10 @@ void rxe_pool_cleanup(struct rxe_pool *pool) { if (atomic_read(&pool->num_elem) > 0) pr_warn("%s pool destroyed with unfree'd elem\n", - pool_name(pool)); + pool->name); - kfree(pool->index.table); + if (pool->flags & RXE_POOL_INDEX) + bitmap_free(pool->index.table); } static u32 alloc_index(struct rxe_pool *pool) @@ -184,15 +187,15 @@ static u32 alloc_index(struct rxe_pool *pool) return index + pool->index.min_index; } -static int rxe_insert_index(struct rxe_pool *pool, struct rxe_pool_entry *new) +static int rxe_insert_index(struct rxe_pool *pool, struct rxe_pool_elem *new) { struct rb_node **link = &pool->index.tree.rb_node; struct rb_node *parent = NULL; - struct rxe_pool_entry *elem; + struct rxe_pool_elem *elem; while (*link) { parent = *link; - elem = rb_entry(parent, struct rxe_pool_entry, index_node); + elem = rb_entry(parent, struct rxe_pool_elem, index_node); if (elem->index == new->index) { pr_warn("element already exists!\n"); @@ -211,19 +214,20 @@ static int rxe_insert_index(struct rxe_pool *pool, struct rxe_pool_entry *new) return 0; } -static int rxe_insert_key(struct rxe_pool *pool, struct rxe_pool_entry *new) +static int rxe_insert_key(struct rxe_pool *pool, struct rxe_pool_elem *new) { struct rb_node **link = &pool->key.tree.rb_node; struct rb_node *parent = NULL; - struct rxe_pool_entry *elem; + struct rxe_pool_elem *elem; int cmp; while (*link) { parent = *link; - elem = rb_entry(parent, struct rxe_pool_entry, key_node); + elem = rb_entry(parent, struct rxe_pool_elem, key_node); cmp = memcmp((u8 *)elem + pool->key.key_offset, - (u8 *)new + pool->key.key_offset, pool->key.key_size); + (u8 *)new + pool->key.key_offset, + pool->key.key_size); if (cmp == 0) { pr_warn("key already exists!\n"); @@ -242,7 +246,7 @@ static int rxe_insert_key(struct rxe_pool *pool, struct rxe_pool_entry *new) return 0; } -int __rxe_add_key_locked(struct rxe_pool_entry *elem, void *key) +int __rxe_add_key_locked(struct rxe_pool_elem *elem, void *key) { struct rxe_pool *pool = elem->pool; int err; @@ -253,37 +257,35 @@ int __rxe_add_key_locked(struct rxe_pool_entry *elem, void *key) return err; } -int __rxe_add_key(struct rxe_pool_entry *elem, void *key) +int __rxe_add_key(struct rxe_pool_elem *elem, void *key) { struct rxe_pool *pool = elem->pool; - unsigned long flags; int err; - write_lock_irqsave(&pool->pool_lock, flags); + write_lock_bh(&pool->pool_lock); err = __rxe_add_key_locked(elem, key); - write_unlock_irqrestore(&pool->pool_lock, flags); + write_unlock_bh(&pool->pool_lock); return err; } -void __rxe_drop_key_locked(struct rxe_pool_entry *elem) +void __rxe_drop_key_locked(struct rxe_pool_elem *elem) { struct rxe_pool *pool = elem->pool; rb_erase(&elem->key_node, &pool->key.tree); } -void __rxe_drop_key(struct rxe_pool_entry *elem) +void __rxe_drop_key(struct rxe_pool_elem *elem) { struct rxe_pool *pool = elem->pool; - unsigned long flags; - write_lock_irqsave(&pool->pool_lock, flags); + write_lock_bh(&pool->pool_lock); __rxe_drop_key_locked(elem); - write_unlock_irqrestore(&pool->pool_lock, flags); + write_unlock_bh(&pool->pool_lock); } -int __rxe_add_index_locked(struct rxe_pool_entry *elem) +int __rxe_add_index_locked(struct rxe_pool_elem *elem) { struct rxe_pool *pool = elem->pool; int err; @@ -294,20 +296,19 @@ int __rxe_add_index_locked(struct rxe_pool_entry *elem) return err; } -int __rxe_add_index(struct rxe_pool_entry *elem) +int __rxe_add_index(struct rxe_pool_elem *elem) { struct rxe_pool *pool = elem->pool; - unsigned long flags; int err; - write_lock_irqsave(&pool->pool_lock, flags); + write_lock_bh(&pool->pool_lock); err = __rxe_add_index_locked(elem); - write_unlock_irqrestore(&pool->pool_lock, flags); + write_unlock_bh(&pool->pool_lock); return err; } -void __rxe_drop_index_locked(struct rxe_pool_entry *elem) +void __rxe_drop_index_locked(struct rxe_pool_elem *elem) { struct rxe_pool *pool = elem->pool; @@ -315,32 +316,31 @@ void __rxe_drop_index_locked(struct rxe_pool_entry *elem) rb_erase(&elem->index_node, &pool->index.tree); } -void __rxe_drop_index(struct rxe_pool_entry *elem) +void __rxe_drop_index(struct rxe_pool_elem *elem) { struct rxe_pool *pool = elem->pool; - unsigned long flags; - write_lock_irqsave(&pool->pool_lock, flags); + write_lock_bh(&pool->pool_lock); __rxe_drop_index_locked(elem); - write_unlock_irqrestore(&pool->pool_lock, flags); + write_unlock_bh(&pool->pool_lock); } void *rxe_alloc_locked(struct rxe_pool *pool) { - struct rxe_type_info *info = &rxe_type_info[pool->type]; - struct rxe_pool_entry *elem; - u8 *obj; + struct rxe_pool_elem *elem; + void *obj; if (atomic_inc_return(&pool->num_elem) > pool->max_elem) goto out_cnt; - obj = kzalloc(info->size, GFP_ATOMIC); + obj = kzalloc(pool->elem_size, GFP_ATOMIC); if (!obj) goto out_cnt; - elem = (struct rxe_pool_entry *)(obj + info->elem_offset); + elem = (struct rxe_pool_elem *)((u8 *)obj + pool->elem_offset); elem->pool = pool; + elem->obj = obj; kref_init(&elem->ref_cnt); return obj; @@ -352,20 +352,20 @@ out_cnt: void *rxe_alloc(struct rxe_pool *pool) { - struct rxe_type_info *info = &rxe_type_info[pool->type]; - struct rxe_pool_entry *elem; - u8 *obj; + struct rxe_pool_elem *elem; + void *obj; if (atomic_inc_return(&pool->num_elem) > pool->max_elem) goto out_cnt; - obj = kzalloc(info->size, GFP_KERNEL); + obj = kzalloc(pool->elem_size, GFP_KERNEL); if (!obj) goto out_cnt; - elem = (struct rxe_pool_entry *)(obj + info->elem_offset); + elem = (struct rxe_pool_elem *)((u8 *)obj + pool->elem_offset); elem->pool = pool; + elem->obj = obj; kref_init(&elem->ref_cnt); return obj; @@ -375,12 +375,13 @@ out_cnt: return NULL; } -int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_entry *elem) +int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem) { if (atomic_inc_return(&pool->num_elem) > pool->max_elem) goto out_cnt; elem->pool = pool; + elem->obj = (u8 *)elem - pool->elem_offset; kref_init(&elem->ref_cnt); return 0; @@ -392,17 +393,16 @@ out_cnt: void rxe_elem_release(struct kref *kref) { - struct rxe_pool_entry *elem = - container_of(kref, struct rxe_pool_entry, ref_cnt); + struct rxe_pool_elem *elem = + container_of(kref, struct rxe_pool_elem, ref_cnt); struct rxe_pool *pool = elem->pool; - struct rxe_type_info *info = &rxe_type_info[pool->type]; - u8 *obj; + void *obj; if (pool->cleanup) pool->cleanup(elem); if (!(pool->flags & RXE_POOL_NO_ALLOC)) { - obj = (u8 *)elem - info->elem_offset; + obj = elem->obj; kfree(obj); } @@ -411,15 +411,14 @@ void rxe_elem_release(struct kref *kref) void *rxe_pool_get_index_locked(struct rxe_pool *pool, u32 index) { - struct rxe_type_info *info = &rxe_type_info[pool->type]; struct rb_node *node; - struct rxe_pool_entry *elem; - u8 *obj; + struct rxe_pool_elem *elem; + void *obj; node = pool->index.tree.rb_node; while (node) { - elem = rb_entry(node, struct rxe_pool_entry, index_node); + elem = rb_entry(node, struct rxe_pool_elem, index_node); if (elem->index > index) node = node->rb_left; @@ -431,7 +430,7 @@ void *rxe_pool_get_index_locked(struct rxe_pool *pool, u32 index) if (node) { kref_get(&elem->ref_cnt); - obj = (u8 *)elem - info->elem_offset; + obj = elem->obj; } else { obj = NULL; } @@ -441,28 +440,26 @@ void *rxe_pool_get_index_locked(struct rxe_pool *pool, u32 index) void *rxe_pool_get_index(struct rxe_pool *pool, u32 index) { - u8 *obj; - unsigned long flags; + void *obj; - read_lock_irqsave(&pool->pool_lock, flags); + read_lock_bh(&pool->pool_lock); obj = rxe_pool_get_index_locked(pool, index); - read_unlock_irqrestore(&pool->pool_lock, flags); + read_unlock_bh(&pool->pool_lock); return obj; } void *rxe_pool_get_key_locked(struct rxe_pool *pool, void *key) { - struct rxe_type_info *info = &rxe_type_info[pool->type]; struct rb_node *node; - struct rxe_pool_entry *elem; - u8 *obj; + struct rxe_pool_elem *elem; + void *obj; int cmp; node = pool->key.tree.rb_node; while (node) { - elem = rb_entry(node, struct rxe_pool_entry, key_node); + elem = rb_entry(node, struct rxe_pool_elem, key_node); cmp = memcmp((u8 *)elem + pool->key.key_offset, key, pool->key.key_size); @@ -477,7 +474,7 @@ void *rxe_pool_get_key_locked(struct rxe_pool *pool, void *key) if (node) { kref_get(&elem->ref_cnt); - obj = (u8 *)elem - info->elem_offset; + obj = elem->obj; } else { obj = NULL; } @@ -487,12 +484,11 @@ void *rxe_pool_get_key_locked(struct rxe_pool *pool, void *key) void *rxe_pool_get_key(struct rxe_pool *pool, void *key) { - u8 *obj; - unsigned long flags; + void *obj; - read_lock_irqsave(&pool->pool_lock, flags); + read_lock_bh(&pool->pool_lock); obj = rxe_pool_get_key_locked(pool, key); - read_unlock_irqrestore(&pool->pool_lock, flags); + read_unlock_bh(&pool->pool_lock); return obj; } diff --git a/drivers/infiniband/sw/rxe/rxe_pool.h b/drivers/infiniband/sw/rxe/rxe_pool.h index 1feca1bffced..214279310f4d 100644 --- a/drivers/infiniband/sw/rxe/rxe_pool.h +++ b/drivers/infiniband/sw/rxe/rxe_pool.h @@ -7,9 +7,6 @@ #ifndef RXE_POOL_H #define RXE_POOL_H -#define RXE_POOL_ALIGN (16) -#define RXE_POOL_CACHE_FLAGS (0) - enum rxe_pool_flags { RXE_POOL_INDEX = BIT(1), RXE_POOL_KEY = BIT(2), @@ -30,24 +27,9 @@ enum rxe_elem_type { RXE_NUM_TYPES, /* keep me last */ }; -struct rxe_pool_entry; - -struct rxe_type_info { - const char *name; - size_t size; - size_t elem_offset; - void (*cleanup)(struct rxe_pool_entry *obj); - enum rxe_pool_flags flags; - u32 max_index; - u32 min_index; - size_t key_offset; - size_t key_size; -}; - -extern struct rxe_type_info rxe_type_info[]; - -struct rxe_pool_entry { +struct rxe_pool_elem { struct rxe_pool *pool; + void *obj; struct kref ref_cnt; struct list_head list; @@ -61,20 +43,21 @@ struct rxe_pool_entry { struct rxe_pool { struct rxe_dev *rxe; + const char *name; rwlock_t pool_lock; /* protects pool add/del/search */ - size_t elem_size; - void (*cleanup)(struct rxe_pool_entry *obj); + void (*cleanup)(struct rxe_pool_elem *obj); enum rxe_pool_flags flags; enum rxe_elem_type type; unsigned int max_elem; atomic_t num_elem; + size_t elem_size; + size_t elem_offset; /* only used if indexed */ struct { struct rb_root tree; unsigned long *table; - size_t table_size; u32 last; u32 max_index; u32 min_index; @@ -104,51 +87,51 @@ void *rxe_alloc_locked(struct rxe_pool *pool); void *rxe_alloc(struct rxe_pool *pool); /* connect already allocated object to pool */ -int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_entry *elem); +int __rxe_add_to_pool(struct rxe_pool *pool, struct rxe_pool_elem *elem); -#define rxe_add_to_pool(pool, obj) __rxe_add_to_pool(pool, &(obj)->pelem) +#define rxe_add_to_pool(pool, obj) __rxe_add_to_pool(pool, &(obj)->elem) /* assign an index to an indexed object and insert object into * pool's rb tree holding and not holding the pool_lock */ -int __rxe_add_index_locked(struct rxe_pool_entry *elem); +int __rxe_add_index_locked(struct rxe_pool_elem *elem); -#define rxe_add_index_locked(obj) __rxe_add_index_locked(&(obj)->pelem) +#define rxe_add_index_locked(obj) __rxe_add_index_locked(&(obj)->elem) -int __rxe_add_index(struct rxe_pool_entry *elem); +int __rxe_add_index(struct rxe_pool_elem *elem); -#define rxe_add_index(obj) __rxe_add_index(&(obj)->pelem) +#define rxe_add_index(obj) __rxe_add_index(&(obj)->elem) /* drop an index and remove object from rb tree * holding and not holding the pool_lock */ -void __rxe_drop_index_locked(struct rxe_pool_entry *elem); +void __rxe_drop_index_locked(struct rxe_pool_elem *elem); -#define rxe_drop_index_locked(obj) __rxe_drop_index_locked(&(obj)->pelem) +#define rxe_drop_index_locked(obj) __rxe_drop_index_locked(&(obj)->elem) -void __rxe_drop_index(struct rxe_pool_entry *elem); +void __rxe_drop_index(struct rxe_pool_elem *elem); -#define rxe_drop_index(obj) __rxe_drop_index(&(obj)->pelem) +#define rxe_drop_index(obj) __rxe_drop_index(&(obj)->elem) /* assign a key to a keyed object and insert object into * pool's rb tree holding and not holding pool_lock */ -int __rxe_add_key_locked(struct rxe_pool_entry *elem, void *key); +int __rxe_add_key_locked(struct rxe_pool_elem *elem, void *key); -#define rxe_add_key_locked(obj, key) __rxe_add_key_locked(&(obj)->pelem, key) +#define rxe_add_key_locked(obj, key) __rxe_add_key_locked(&(obj)->elem, key) -int __rxe_add_key(struct rxe_pool_entry *elem, void *key); +int __rxe_add_key(struct rxe_pool_elem *elem, void *key); -#define rxe_add_key(obj, key) __rxe_add_key(&(obj)->pelem, key) +#define rxe_add_key(obj, key) __rxe_add_key(&(obj)->elem, key) /* remove elem from rb tree holding and not holding the pool_lock */ -void __rxe_drop_key_locked(struct rxe_pool_entry *elem); +void __rxe_drop_key_locked(struct rxe_pool_elem *elem); -#define rxe_drop_key_locked(obj) __rxe_drop_key_locked(&(obj)->pelem) +#define rxe_drop_key_locked(obj) __rxe_drop_key_locked(&(obj)->elem) -void __rxe_drop_key(struct rxe_pool_entry *elem); +void __rxe_drop_key(struct rxe_pool_elem *elem); -#define rxe_drop_key(obj) __rxe_drop_key(&(obj)->pelem) +#define rxe_drop_key(obj) __rxe_drop_key(&(obj)->elem) /* lookup an indexed object from index holding and not holding the pool_lock. * takes a reference on object @@ -168,9 +151,9 @@ void *rxe_pool_get_key(struct rxe_pool *pool, void *key); void rxe_elem_release(struct kref *kref); /* take a reference on an object */ -#define rxe_add_ref(elem) kref_get(&(elem)->pelem.ref_cnt) +#define rxe_add_ref(obj) kref_get(&(obj)->elem.ref_cnt) /* drop a reference on an object */ -#define rxe_drop_ref(elem) kref_put(&(elem)->pelem.ref_cnt, rxe_elem_release) +#define rxe_drop_ref(obj) kref_put(&(obj)->elem.ref_cnt, rxe_elem_release) #endif /* RXE_POOL_H */ diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c index 1ab6af7ddb25..5018b9387694 100644 --- a/drivers/infiniband/sw/rxe/rxe_qp.c +++ b/drivers/infiniband/sw/rxe/rxe_qp.c @@ -167,7 +167,7 @@ static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp, qp->attr.path_mtu = 1; qp->mtu = ib_mtu_enum_to_int(qp->attr.path_mtu); - qpn = qp->pelem.index; + qpn = qp->elem.index; port = &rxe->port; switch (init->qp_type) { @@ -190,8 +190,6 @@ static void rxe_qp_init_misc(struct rxe_dev *rxe, struct rxe_qp *qp, INIT_LIST_HEAD(&qp->grp_list); - skb_queue_head_init(&qp->send_pkts); - spin_lock_init(&qp->grp_lock); spin_lock_init(&qp->state_lock); @@ -219,8 +217,7 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp, * the port number must be in the Dynamic Ports range * (0xc000 - 0xffff). */ - qp->src_port = RXE_ROCE_V2_SPORT + - (hash_32_generic(qp_num(qp), 14) & 0x3fff); + qp->src_port = RXE_ROCE_V2_SPORT + (hash_32(qp_num(qp), 14) & 0x3fff); qp->sq.max_wr = init->cap.max_send_wr; /* These caps are limited by rxe_qp_chk_cap() done by the caller */ @@ -231,7 +228,7 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp, qp->sq.max_inline = init->cap.max_inline_data = wqe_size; wqe_size += sizeof(struct rxe_send_wqe); - type = uresp ? QUEUE_TYPE_FROM_USER : QUEUE_TYPE_KERNEL; + type = QUEUE_TYPE_FROM_CLIENT; qp->sq.queue = rxe_queue_init(rxe, &qp->sq.max_wr, wqe_size, type); if (!qp->sq.queue) @@ -248,12 +245,8 @@ static int rxe_qp_init_req(struct rxe_dev *rxe, struct rxe_qp *qp, return err; } - if (qp->is_user) - qp->req.wqe_index = producer_index(qp->sq.queue, - QUEUE_TYPE_FROM_USER); - else - qp->req.wqe_index = producer_index(qp->sq.queue, - QUEUE_TYPE_KERNEL); + qp->req.wqe_index = queue_get_producer(qp->sq.queue, + QUEUE_TYPE_FROM_CLIENT); qp->req.state = QP_STATE_RESET; qp->req.opcode = -1; @@ -293,7 +286,7 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp, pr_debug("qp#%d max_wr = %d, max_sge = %d, wqe_size = %d\n", qp_num(qp), qp->rq.max_wr, qp->rq.max_sge, wqe_size); - type = uresp ? QUEUE_TYPE_FROM_USER : QUEUE_TYPE_KERNEL; + type = QUEUE_TYPE_FROM_CLIENT; qp->rq.queue = rxe_queue_init(rxe, &qp->rq.max_wr, wqe_size, type); if (!qp->rq.queue) @@ -313,8 +306,6 @@ static int rxe_qp_init_resp(struct rxe_dev *rxe, struct rxe_qp *qp, spin_lock_init(&qp->rq.producer_lock); spin_lock_init(&qp->rq.consumer_lock); - qp->rq.is_user = qp->is_user; - skb_queue_head_init(&qp->resp_pkts); rxe_init_task(rxe, &qp->resp.task, qp, @@ -367,6 +358,7 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd, err2: rxe_queue_cleanup(qp->sq.queue); + qp->sq.queue = NULL; err1: qp->pd = NULL; qp->rcq = NULL; @@ -839,9 +831,9 @@ static void rxe_qp_do_cleanup(struct work_struct *work) } /* called when the last reference to the qp is dropped */ -void rxe_qp_cleanup(struct rxe_pool_entry *arg) +void rxe_qp_cleanup(struct rxe_pool_elem *elem) { - struct rxe_qp *qp = container_of(arg, typeof(*qp), pelem); + struct rxe_qp *qp = container_of(elem, typeof(*qp), elem); execute_in_process_context(rxe_qp_do_cleanup, &qp->cleanup_work); } diff --git a/drivers/infiniband/sw/rxe/rxe_queue.c b/drivers/infiniband/sw/rxe/rxe_queue.c index 72d95398e604..a1b283dd2d4c 100644 --- a/drivers/infiniband/sw/rxe/rxe_queue.c +++ b/drivers/infiniband/sw/rxe/rxe_queue.c @@ -111,17 +111,33 @@ err1: static int resize_finish(struct rxe_queue *q, struct rxe_queue *new_q, unsigned int num_elem) { - if (!queue_empty(q, q->type) && (num_elem < queue_count(q, q->type))) + enum queue_type type = q->type; + u32 prod; + u32 cons; + + if (!queue_empty(q, q->type) && (num_elem < queue_count(q, type))) return -EINVAL; - while (!queue_empty(q, q->type)) { - memcpy(producer_addr(new_q, new_q->type), - consumer_addr(q, q->type), - new_q->elem_size); - advance_producer(new_q, new_q->type); - advance_consumer(q, q->type); + prod = queue_get_producer(new_q, type); + cons = queue_get_consumer(q, type); + + while (!queue_empty(q, type)) { + memcpy(queue_addr_from_index(new_q, prod), + queue_addr_from_index(q, cons), new_q->elem_size); + prod = queue_next_index(new_q, prod); + cons = queue_next_index(q, cons); } + new_q->buf->producer_index = prod; + q->buf->consumer_index = cons; + + /* update private index copies */ + if (type == QUEUE_TYPE_TO_CLIENT) + new_q->index = new_q->buf->producer_index; + else + q->index = q->buf->consumer_index; + + /* exchange rxe_queue headers */ swap(*q, *new_q); return 0; @@ -135,7 +151,6 @@ int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p, struct rxe_queue *new_q; unsigned int num_elem = *num_elem_p; int err; - unsigned long flags = 0, flags1; new_q = rxe_queue_init(q->rxe, &num_elem, elem_size, q->type); if (!new_q) @@ -149,17 +164,17 @@ int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p, goto err1; } - spin_lock_irqsave(consumer_lock, flags1); + spin_lock_bh(consumer_lock); if (producer_lock) { - spin_lock_irqsave(producer_lock, flags); + spin_lock_bh(producer_lock); err = resize_finish(q, new_q, num_elem); - spin_unlock_irqrestore(producer_lock, flags); + spin_unlock_bh(producer_lock); } else { err = resize_finish(q, new_q, num_elem); } - spin_unlock_irqrestore(consumer_lock, flags1); + spin_unlock_bh(consumer_lock); rxe_queue_cleanup(new_q); /* new/old dep on err */ if (err) diff --git a/drivers/infiniband/sw/rxe/rxe_queue.h b/drivers/infiniband/sw/rxe/rxe_queue.h index 2702b0e55fc3..6227112ef7a2 100644 --- a/drivers/infiniband/sw/rxe/rxe_queue.h +++ b/drivers/infiniband/sw/rxe/rxe_queue.h @@ -10,34 +10,47 @@ /* for definition of shared struct rxe_queue_buf */ #include <uapi/rdma/rdma_user_rxe.h> -/* implements a simple circular buffer that can optionally be - * shared between user space and the kernel and can be resized - * the requested element size is rounded up to a power of 2 - * and the number of elements in the buffer is also rounded - * up to a power of 2. Since the queue is empty when the - * producer and consumer indices match the maximum capacity - * of the queue is one less than the number of element slots +/* Implements a simple circular buffer that is shared between user + * and the driver and can be resized. The requested element size is + * rounded up to a power of 2 and the number of elements in the buffer + * is also rounded up to a power of 2. Since the queue is empty when + * the producer and consumer indices match the maximum capacity of the + * queue is one less than the number of element slots. * * Notes: - * - Kernel space indices are always masked off to q->index_mask - * before storing so do not need to be checked on reads. - * - User space indices may be out of range and must be - * masked before use when read. - * - The kernel indices for shared queues must not be written - * by user space so a local copy is used and a shared copy is - * stored when the local copy changes. + * - The driver indices are always masked off to q->index_mask + * before storing so do not need to be checked on reads. + * - The user whether user space or kernel is generally + * not trusted so its parameters are masked to make sure + * they do not access the queue out of bounds on reads. + * - The driver indices for queues must not be written + * by user so a local copy is used and a shared copy is + * stored when the local copy is changed. * - By passing the type in the parameter list separate from q - * the compiler can eliminate the switch statement when the - * actual queue type is known when the function is called. - * In the performance path this is done. In less critical - * paths just q->type is passed. + * the compiler can eliminate the switch statement when the + * actual queue type is known when the function is called at + * compile time. + * - These queues are lock free. The user and driver must protect + * changes to their end of the queues with locks if more than one + * CPU can be accessing it at the same time. */ -/* type of queue */ +/** + * enum queue_type - type of queue + * @QUEUE_TYPE_TO_CLIENT: Queue is written by rxe driver and + * read by client. Used by rxe driver only. + * @QUEUE_TYPE_FROM_CLIENT: Queue is written by client and + * read by rxe driver. Used by rxe driver only. + * @QUEUE_TYPE_TO_DRIVER: Queue is written by client and + * read by rxe driver. Used by kernel client only. + * @QUEUE_TYPE_FROM_DRIVER: Queue is written by rxe driver and + * read by client. Used by kernel client only. + */ enum queue_type { - QUEUE_TYPE_KERNEL, - QUEUE_TYPE_TO_USER, - QUEUE_TYPE_FROM_USER, + QUEUE_TYPE_TO_CLIENT, + QUEUE_TYPE_FROM_CLIENT, + QUEUE_TYPE_TO_DRIVER, + QUEUE_TYPE_FROM_DRIVER, }; struct rxe_queue { @@ -69,238 +82,171 @@ struct rxe_queue *rxe_queue_init(struct rxe_dev *rxe, int *num_elem, int rxe_queue_resize(struct rxe_queue *q, unsigned int *num_elem_p, unsigned int elem_size, struct ib_udata *udata, struct mminfo __user *outbuf, - /* Protect producers while resizing queue */ - spinlock_t *producer_lock, - /* Protect consumers while resizing queue */ - spinlock_t *consumer_lock); + spinlock_t *producer_lock, spinlock_t *consumer_lock); void rxe_queue_cleanup(struct rxe_queue *queue); -static inline int next_index(struct rxe_queue *q, int index) +static inline u32 queue_next_index(struct rxe_queue *q, int index) { - return (index + 1) & q->buf->index_mask; + return (index + 1) & q->index_mask; } -static inline int queue_empty(struct rxe_queue *q, enum queue_type type) +static inline u32 queue_get_producer(const struct rxe_queue *q, + enum queue_type type) { u32 prod; - u32 cons; switch (type) { - case QUEUE_TYPE_FROM_USER: - /* protect user space index */ + case QUEUE_TYPE_FROM_CLIENT: + /* protect user index */ prod = smp_load_acquire(&q->buf->producer_index); - cons = q->index; break; - case QUEUE_TYPE_TO_USER: + case QUEUE_TYPE_TO_CLIENT: prod = q->index; - /* protect user space index */ - cons = smp_load_acquire(&q->buf->consumer_index); break; - case QUEUE_TYPE_KERNEL: + case QUEUE_TYPE_FROM_DRIVER: + /* protect driver index */ + prod = smp_load_acquire(&q->buf->producer_index); + break; + case QUEUE_TYPE_TO_DRIVER: prod = q->buf->producer_index; - cons = q->buf->consumer_index; break; } - return ((prod - cons) & q->index_mask) == 0; + return prod; } -static inline int queue_full(struct rxe_queue *q, enum queue_type type) +static inline u32 queue_get_consumer(const struct rxe_queue *q, + enum queue_type type) { - u32 prod; u32 cons; switch (type) { - case QUEUE_TYPE_FROM_USER: - /* protect user space index */ - prod = smp_load_acquire(&q->buf->producer_index); + case QUEUE_TYPE_FROM_CLIENT: cons = q->index; break; - case QUEUE_TYPE_TO_USER: - prod = q->index; - /* protect user space index */ + case QUEUE_TYPE_TO_CLIENT: + /* protect user index */ cons = smp_load_acquire(&q->buf->consumer_index); break; - case QUEUE_TYPE_KERNEL: - prod = q->buf->producer_index; + case QUEUE_TYPE_FROM_DRIVER: cons = q->buf->consumer_index; break; + case QUEUE_TYPE_TO_DRIVER: + /* protect driver index */ + cons = smp_load_acquire(&q->buf->consumer_index); + break; } - return ((prod + 1 - cons) & q->index_mask) == 0; + return cons; } -static inline unsigned int queue_count(const struct rxe_queue *q, - enum queue_type type) +static inline int queue_empty(struct rxe_queue *q, enum queue_type type) { - u32 prod; - u32 cons; - - switch (type) { - case QUEUE_TYPE_FROM_USER: - /* protect user space index */ - prod = smp_load_acquire(&q->buf->producer_index); - cons = q->index; - break; - case QUEUE_TYPE_TO_USER: - prod = q->index; - /* protect user space index */ - cons = smp_load_acquire(&q->buf->consumer_index); - break; - case QUEUE_TYPE_KERNEL: - prod = q->buf->producer_index; - cons = q->buf->consumer_index; - break; - } + u32 prod = queue_get_producer(q, type); + u32 cons = queue_get_consumer(q, type); - return (prod - cons) & q->index_mask; + return ((prod - cons) & q->index_mask) == 0; } -static inline void advance_producer(struct rxe_queue *q, enum queue_type type) +static inline int queue_full(struct rxe_queue *q, enum queue_type type) { - u32 prod; + u32 prod = queue_get_producer(q, type); + u32 cons = queue_get_consumer(q, type); - switch (type) { - case QUEUE_TYPE_FROM_USER: - pr_warn_once("Normally kernel should not write user space index\n"); - /* protect user space index */ - prod = smp_load_acquire(&q->buf->producer_index); - prod = (prod + 1) & q->index_mask; - /* same */ - smp_store_release(&q->buf->producer_index, prod); - break; - case QUEUE_TYPE_TO_USER: - prod = q->index; - q->index = (prod + 1) & q->index_mask; - q->buf->producer_index = q->index; - break; - case QUEUE_TYPE_KERNEL: - prod = q->buf->producer_index; - q->buf->producer_index = (prod + 1) & q->index_mask; - break; - } + return ((prod + 1 - cons) & q->index_mask) == 0; } -static inline void advance_consumer(struct rxe_queue *q, enum queue_type type) +static inline u32 queue_count(const struct rxe_queue *q, + enum queue_type type) { - u32 cons; + u32 prod = queue_get_producer(q, type); + u32 cons = queue_get_consumer(q, type); - switch (type) { - case QUEUE_TYPE_FROM_USER: - cons = q->index; - q->index = (cons + 1) & q->index_mask; - q->buf->consumer_index = q->index; - break; - case QUEUE_TYPE_TO_USER: - pr_warn_once("Normally kernel should not write user space index\n"); - /* protect user space index */ - cons = smp_load_acquire(&q->buf->consumer_index); - cons = (cons + 1) & q->index_mask; - /* same */ - smp_store_release(&q->buf->consumer_index, cons); - break; - case QUEUE_TYPE_KERNEL: - cons = q->buf->consumer_index; - q->buf->consumer_index = (cons + 1) & q->index_mask; - break; - } + return (prod - cons) & q->index_mask; } -static inline void *producer_addr(struct rxe_queue *q, enum queue_type type) +static inline void queue_advance_producer(struct rxe_queue *q, + enum queue_type type) { u32 prod; switch (type) { - case QUEUE_TYPE_FROM_USER: - /* protect user space index */ - prod = smp_load_acquire(&q->buf->producer_index); - prod &= q->index_mask; + case QUEUE_TYPE_FROM_CLIENT: + pr_warn("%s: attempt to advance client index\n", + __func__); break; - case QUEUE_TYPE_TO_USER: + case QUEUE_TYPE_TO_CLIENT: prod = q->index; + prod = (prod + 1) & q->index_mask; + q->index = prod; + /* protect user index */ + smp_store_release(&q->buf->producer_index, prod); + break; + case QUEUE_TYPE_FROM_DRIVER: + pr_warn("%s: attempt to advance driver index\n", + __func__); break; - case QUEUE_TYPE_KERNEL: + case QUEUE_TYPE_TO_DRIVER: prod = q->buf->producer_index; + prod = (prod + 1) & q->index_mask; + q->buf->producer_index = prod; break; } - - return q->buf->data + (prod << q->log2_elem_size); } -static inline void *consumer_addr(struct rxe_queue *q, enum queue_type type) +static inline void queue_advance_consumer(struct rxe_queue *q, + enum queue_type type) { u32 cons; switch (type) { - case QUEUE_TYPE_FROM_USER: + case QUEUE_TYPE_FROM_CLIENT: cons = q->index; + cons = (cons + 1) & q->index_mask; + q->index = cons; + /* protect user index */ + smp_store_release(&q->buf->consumer_index, cons); break; - case QUEUE_TYPE_TO_USER: - /* protect user space index */ - cons = smp_load_acquire(&q->buf->consumer_index); - cons &= q->index_mask; + case QUEUE_TYPE_TO_CLIENT: + pr_warn("%s: attempt to advance client index\n", + __func__); break; - case QUEUE_TYPE_KERNEL: + case QUEUE_TYPE_FROM_DRIVER: cons = q->buf->consumer_index; + cons = (cons + 1) & q->index_mask; + q->buf->consumer_index = cons; + break; + case QUEUE_TYPE_TO_DRIVER: + pr_warn("%s: attempt to advance driver index\n", + __func__); break; } - - return q->buf->data + (cons << q->log2_elem_size); } -static inline unsigned int producer_index(struct rxe_queue *q, - enum queue_type type) +static inline void *queue_producer_addr(struct rxe_queue *q, + enum queue_type type) { - u32 prod; + u32 prod = queue_get_producer(q, type); - switch (type) { - case QUEUE_TYPE_FROM_USER: - /* protect user space index */ - prod = smp_load_acquire(&q->buf->producer_index); - prod &= q->index_mask; - break; - case QUEUE_TYPE_TO_USER: - prod = q->index; - break; - case QUEUE_TYPE_KERNEL: - prod = q->buf->producer_index; - break; - } - - return prod; + return q->buf->data + (prod << q->log2_elem_size); } -static inline unsigned int consumer_index(struct rxe_queue *q, - enum queue_type type) +static inline void *queue_consumer_addr(struct rxe_queue *q, + enum queue_type type) { - u32 cons; - - switch (type) { - case QUEUE_TYPE_FROM_USER: - cons = q->index; - break; - case QUEUE_TYPE_TO_USER: - /* protect user space index */ - cons = smp_load_acquire(&q->buf->consumer_index); - cons &= q->index_mask; - break; - case QUEUE_TYPE_KERNEL: - cons = q->buf->consumer_index; - break; - } + u32 cons = queue_get_consumer(q, type); - return cons; + return q->buf->data + (cons << q->log2_elem_size); } -static inline void *addr_from_index(struct rxe_queue *q, - unsigned int index) +static inline void *queue_addr_from_index(struct rxe_queue *q, u32 index) { return q->buf->data + ((index & q->index_mask) - << q->buf->log2_elem_size); + << q->log2_elem_size); } -static inline unsigned int index_from_addr(const struct rxe_queue *q, +static inline u32 queue_index_from_addr(const struct rxe_queue *q, const void *addr) { return (((u8 *)addr - q->buf->data) >> q->log2_elem_size) @@ -309,7 +255,7 @@ static inline unsigned int index_from_addr(const struct rxe_queue *q, static inline void *queue_head(struct rxe_queue *q, enum queue_type type) { - return queue_empty(q, type) ? NULL : consumer_addr(q, type); + return queue_empty(q, type) ? NULL : queue_consumer_addr(q, type); } #endif /* RXE_QUEUE_H */ diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c index 3894197a82f6..5eb89052dd66 100644 --- a/drivers/infiniband/sw/rxe/rxe_req.c +++ b/drivers/infiniband/sw/rxe/rxe_req.c @@ -49,21 +49,16 @@ static void req_retry(struct rxe_qp *qp) unsigned int cons; unsigned int prod; - if (qp->is_user) { - cons = consumer_index(q, QUEUE_TYPE_FROM_USER); - prod = producer_index(q, QUEUE_TYPE_FROM_USER); - } else { - cons = consumer_index(q, QUEUE_TYPE_KERNEL); - prod = producer_index(q, QUEUE_TYPE_KERNEL); - } + cons = queue_get_consumer(q, QUEUE_TYPE_FROM_CLIENT); + prod = queue_get_producer(q, QUEUE_TYPE_FROM_CLIENT); qp->req.wqe_index = cons; qp->req.psn = qp->comp.psn; qp->req.opcode = -1; for (wqe_index = cons; wqe_index != prod; - wqe_index = next_index(q, wqe_index)) { - wqe = addr_from_index(qp->sq.queue, wqe_index); + wqe_index = queue_next_index(q, wqe_index)) { + wqe = queue_addr_from_index(qp->sq.queue, wqe_index); mask = wr_opcode_mask(wqe->wr.opcode, qp); if (wqe->state == wqe_state_posted) @@ -115,45 +110,36 @@ void rnr_nak_timer(struct timer_list *t) static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp) { struct rxe_send_wqe *wqe; - unsigned long flags; struct rxe_queue *q = qp->sq.queue; unsigned int index = qp->req.wqe_index; unsigned int cons; unsigned int prod; - if (qp->is_user) { - wqe = queue_head(q, QUEUE_TYPE_FROM_USER); - cons = consumer_index(q, QUEUE_TYPE_FROM_USER); - prod = producer_index(q, QUEUE_TYPE_FROM_USER); - } else { - wqe = queue_head(q, QUEUE_TYPE_KERNEL); - cons = consumer_index(q, QUEUE_TYPE_KERNEL); - prod = producer_index(q, QUEUE_TYPE_KERNEL); - } + wqe = queue_head(q, QUEUE_TYPE_FROM_CLIENT); + cons = queue_get_consumer(q, QUEUE_TYPE_FROM_CLIENT); + prod = queue_get_producer(q, QUEUE_TYPE_FROM_CLIENT); if (unlikely(qp->req.state == QP_STATE_DRAIN)) { /* check to see if we are drained; * state_lock used by requester and completer */ - spin_lock_irqsave(&qp->state_lock, flags); + spin_lock_bh(&qp->state_lock); do { if (qp->req.state != QP_STATE_DRAIN) { /* comp just finished */ - spin_unlock_irqrestore(&qp->state_lock, - flags); + spin_unlock_bh(&qp->state_lock); break; } if (wqe && ((index != cons) || (wqe->state != wqe_state_posted))) { /* comp not done yet */ - spin_unlock_irqrestore(&qp->state_lock, - flags); + spin_unlock_bh(&qp->state_lock); break; } qp->req.state = QP_STATE_DRAINED; - spin_unlock_irqrestore(&qp->state_lock, flags); + spin_unlock_bh(&qp->state_lock); if (qp->ibqp.event_handler) { struct ib_event ev; @@ -170,7 +156,7 @@ static struct rxe_send_wqe *req_next_wqe(struct rxe_qp *qp) if (index == prod) return NULL; - wqe = addr_from_index(q, index); + wqe = queue_addr_from_index(q, index); if (unlikely((qp->req.state == QP_STATE_DRAIN || qp->req.state == QP_STATE_DRAINED) && @@ -383,16 +369,14 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp, int pad = (-payload) & 0x3; int paylen; int solicited; - u16 pkey; u32 qp_num; int ack_req; /* length from start of bth to end of icrc */ paylen = rxe_opcode[opcode].length + payload + pad + RXE_ICRC_SIZE; - /* pkt->hdr, rxe, port_num and mask are initialized in ifc - * layer - */ + /* pkt->hdr, port_num and mask are initialized in ifc layer */ + pkt->rxe = rxe; pkt->opcode = opcode; pkt->qp = qp; pkt->psn = qp->req.psn; @@ -402,6 +386,9 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp, /* init skb */ av = rxe_get_av(pkt); + if (!av) + return NULL; + skb = rxe_init_packet(rxe, av, paylen, pkt); if (unlikely(!skb)) return NULL; @@ -413,8 +400,6 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp, (pkt->mask & (RXE_WRITE_MASK | RXE_IMMDT_MASK)) == (RXE_WRITE_MASK | RXE_IMMDT_MASK)); - pkey = IB_DEFAULT_PKEY_FULL; - qp_num = (pkt->mask & RXE_DETH_MASK) ? ibwr->wr.ud.remote_qpn : qp->attr.dest_qp_num; @@ -423,7 +408,7 @@ static struct sk_buff *init_req_packet(struct rxe_qp *qp, if (ack_req) qp->req.noack_pkts = 0; - bth_init(pkt, pkt->opcode, solicited, 0, pad, pkey, qp_num, + bth_init(pkt, pkt->opcode, solicited, 0, pad, IB_DEFAULT_PKEY_FULL, qp_num, ack_req, pkt->psn); /* init optional headers */ @@ -472,7 +457,7 @@ static int finish_packet(struct rxe_qp *qp, struct rxe_send_wqe *wqe, if (err) return err; - if (pkt->mask & RXE_WRITE_OR_SEND) { + if (pkt->mask & RXE_WRITE_OR_SEND_MASK) { if (wqe->wr.send_flags & IB_SEND_INLINE) { u8 *tmp = &wqe->dma.inline_data[wqe->dma.sge_offset]; @@ -560,7 +545,8 @@ static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe, qp->req.opcode = pkt->opcode; if (pkt->mask & RXE_END_MASK) - qp->req.wqe_index = next_index(qp->sq.queue, qp->req.wqe_index); + qp->req.wqe_index = queue_next_index(qp->sq.queue, + qp->req.wqe_index); qp->need_req_skb = 0; @@ -572,7 +558,6 @@ static void update_state(struct rxe_qp *qp, struct rxe_send_wqe *wqe, static int rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe) { u8 opcode = wqe->wr.opcode; - struct rxe_mr *mr; u32 rkey; int ret; @@ -590,14 +575,11 @@ static int rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe) } break; case IB_WR_REG_MR: - mr = to_rmr(wqe->wr.wr.reg.mr); - rxe_add_ref(mr); - mr->state = RXE_MR_STATE_VALID; - mr->access = wqe->wr.wr.reg.access; - mr->ibmr.lkey = wqe->wr.wr.reg.key; - mr->ibmr.rkey = wqe->wr.wr.reg.key; - mr->iova = wqe->wr.wr.reg.mr->iova; - rxe_drop_ref(mr); + ret = rxe_reg_fast_mr(qp, wqe); + if (unlikely(ret)) { + wqe->status = IB_WC_LOC_QP_OP_ERR; + return ret; + } break; case IB_WR_BIND_MW: ret = rxe_bind_mw(qp, wqe); @@ -614,7 +596,7 @@ static int rxe_do_local_ops(struct rxe_qp *qp, struct rxe_send_wqe *wqe) wqe->state = wqe_state_done; wqe->status = IB_WC_SUCCESS; - qp->req.wqe_index = next_index(qp->sq.queue, qp->req.wqe_index); + qp->req.wqe_index = queue_next_index(qp->sq.queue, qp->req.wqe_index); if ((wqe->wr.send_flags & IB_SEND_SIGNALED) || qp->sq_sig_type == IB_SIGNAL_ALL_WR) @@ -645,7 +627,8 @@ next_wqe: goto exit; if (unlikely(qp->req.state == QP_STATE_RESET)) { - qp->req.wqe_index = consumer_index(q, q->type); + qp->req.wqe_index = queue_get_consumer(q, + QUEUE_TYPE_FROM_CLIENT); qp->req.opcode = -1; qp->req.need_rd_atomic = 0; qp->req.wait_psn = 0; @@ -691,13 +674,13 @@ next_wqe: } mask = rxe_opcode[opcode].mask; - if (unlikely(mask & RXE_READ_OR_ATOMIC)) { + if (unlikely(mask & RXE_READ_OR_ATOMIC_MASK)) { if (check_init_depth(qp, wqe)) goto exit; } mtu = get_mtu(qp); - payload = (mask & RXE_WRITE_OR_SEND) ? wqe->dma.resid : 0; + payload = (mask & RXE_WRITE_OR_SEND_MASK) ? wqe->dma.resid : 0; if (payload > mtu) { if (qp_type(qp) == IB_QPT_UD) { /* C10-93.1.1: If the total sum of all the buffer lengths specified for a @@ -711,7 +694,7 @@ next_wqe: wqe->last_psn = qp->req.psn; qp->req.psn = (qp->req.psn + 1) & BTH_PSN_MASK; qp->req.opcode = IB_OPCODE_UD_SEND_ONLY; - qp->req.wqe_index = next_index(qp->sq.queue, + qp->req.wqe_index = queue_next_index(qp->sq.queue, qp->req.wqe_index); wqe->state = wqe_state_done; wqe->status = IB_WC_SUCCESS; diff --git a/drivers/infiniband/sw/rxe/rxe_resp.c b/drivers/infiniband/sw/rxe/rxe_resp.c index 5501227ddc65..e8f435fa6e4d 100644 --- a/drivers/infiniband/sw/rxe/rxe_resp.c +++ b/drivers/infiniband/sw/rxe/rxe_resp.c @@ -303,10 +303,7 @@ static enum resp_states get_srq_wqe(struct rxe_qp *qp) spin_lock_bh(&srq->rq.consumer_lock); - if (qp->is_user) - wqe = queue_head(q, QUEUE_TYPE_FROM_USER); - else - wqe = queue_head(q, QUEUE_TYPE_KERNEL); + wqe = queue_head(q, QUEUE_TYPE_FROM_CLIENT); if (!wqe) { spin_unlock_bh(&srq->rq.consumer_lock); return RESPST_ERR_RNR; @@ -322,13 +319,8 @@ static enum resp_states get_srq_wqe(struct rxe_qp *qp) memcpy(&qp->resp.srq_wqe, wqe, size); qp->resp.wqe = &qp->resp.srq_wqe.wqe; - if (qp->is_user) { - advance_consumer(q, QUEUE_TYPE_FROM_USER); - count = queue_count(q, QUEUE_TYPE_FROM_USER); - } else { - advance_consumer(q, QUEUE_TYPE_KERNEL); - count = queue_count(q, QUEUE_TYPE_KERNEL); - } + queue_advance_consumer(q, QUEUE_TYPE_FROM_CLIENT); + count = queue_count(q, QUEUE_TYPE_FROM_CLIENT); if (srq->limit && srq->ibsrq.event_handler && (count < srq->limit)) { srq->limit = 0; @@ -357,12 +349,8 @@ static enum resp_states check_resource(struct rxe_qp *qp, qp->resp.status = IB_WC_WR_FLUSH_ERR; return RESPST_COMPLETE; } else if (!srq) { - if (qp->is_user) - qp->resp.wqe = queue_head(qp->rq.queue, - QUEUE_TYPE_FROM_USER); - else - qp->resp.wqe = queue_head(qp->rq.queue, - QUEUE_TYPE_KERNEL); + qp->resp.wqe = queue_head(qp->rq.queue, + QUEUE_TYPE_FROM_CLIENT); if (qp->resp.wqe) { qp->resp.status = IB_WC_WR_FLUSH_ERR; return RESPST_COMPLETE; @@ -374,7 +362,7 @@ static enum resp_states check_resource(struct rxe_qp *qp, } } - if (pkt->mask & RXE_READ_OR_ATOMIC) { + if (pkt->mask & RXE_READ_OR_ATOMIC_MASK) { /* it is the requesters job to not send * too many read/atomic ops, we just * recycle the responder resource queue @@ -389,12 +377,8 @@ static enum resp_states check_resource(struct rxe_qp *qp, if (srq) return get_srq_wqe(qp); - if (qp->is_user) - qp->resp.wqe = queue_head(qp->rq.queue, - QUEUE_TYPE_FROM_USER); - else - qp->resp.wqe = queue_head(qp->rq.queue, - QUEUE_TYPE_KERNEL); + qp->resp.wqe = queue_head(qp->rq.queue, + QUEUE_TYPE_FROM_CLIENT); return (qp->resp.wqe) ? RESPST_CHK_LENGTH : RESPST_ERR_RNR; } @@ -429,7 +413,7 @@ static enum resp_states check_rkey(struct rxe_qp *qp, enum resp_states state; int access; - if (pkt->mask & (RXE_READ_MASK | RXE_WRITE_MASK)) { + if (pkt->mask & RXE_READ_OR_WRITE_MASK) { if (pkt->mask & RXE_RETH_MASK) { qp->resp.va = reth_va(pkt); qp->resp.offset = 0; @@ -450,7 +434,7 @@ static enum resp_states check_rkey(struct rxe_qp *qp, } /* A zero-byte op is not required to set an addr or rkey. */ - if ((pkt->mask & (RXE_READ_MASK | RXE_WRITE_OR_SEND)) && + if ((pkt->mask & RXE_READ_OR_WRITE_MASK) && (pkt->mask & RXE_RETH_MASK) && reth_len(pkt) == 0) { return RESPST_EXECUTE; @@ -876,7 +860,6 @@ static enum resp_states do_complete(struct rxe_qp *qp, wc->opcode = (pkt->mask & RXE_IMMDT_MASK && pkt->mask & RXE_WRITE_MASK) ? IB_WC_RECV_RDMA_WITH_IMM : IB_WC_RECV; - wc->vendor_err = 0; wc->byte_len = (pkt->mask & RXE_IMMDT_MASK && pkt->mask & RXE_WRITE_MASK) ? qp->resp.length : wqe->dma.length - wqe->dma.resid; @@ -897,8 +880,6 @@ static enum resp_states do_complete(struct rxe_qp *qp, uwc->ex.invalidate_rkey = ieth_rkey(pkt); } - uwc->qp_num = qp->ibqp.qp_num; - if (pkt->mask & RXE_DETH_MASK) uwc->src_qp = deth_sqp(pkt); @@ -930,18 +911,13 @@ static enum resp_states do_complete(struct rxe_qp *qp, if (pkt->mask & RXE_DETH_MASK) wc->src_qp = deth_sqp(pkt); - wc->qp = &qp->ibqp; wc->port_num = qp->attr.port_num; } } /* have copy for srq and reference for !srq */ - if (!qp->srq) { - if (qp->is_user) - advance_consumer(qp->rq.queue, QUEUE_TYPE_FROM_USER); - else - advance_consumer(qp->rq.queue, QUEUE_TYPE_KERNEL); - } + if (!qp->srq) + queue_advance_consumer(qp->rq.queue, QUEUE_TYPE_FROM_CLIENT); qp->resp.wqe = NULL; @@ -1213,7 +1189,7 @@ static void rxe_drain_req_pkts(struct rxe_qp *qp, bool notify) return; while (!qp->srq && q && queue_head(q, q->type)) - advance_consumer(q, q->type); + queue_advance_consumer(q, q->type); } int rxe_responder(void *arg) diff --git a/drivers/infiniband/sw/rxe/rxe_srq.c b/drivers/infiniband/sw/rxe/rxe_srq.c index 610c98d24b5c..0c0721f04357 100644 --- a/drivers/infiniband/sw/rxe/rxe_srq.c +++ b/drivers/infiniband/sw/rxe/rxe_srq.c @@ -83,17 +83,16 @@ int rxe_srq_from_init(struct rxe_dev *rxe, struct rxe_srq *srq, srq->ibsrq.event_handler = init->event_handler; srq->ibsrq.srq_context = init->srq_context; srq->limit = init->attr.srq_limit; - srq->srq_num = srq->pelem.index; + srq->srq_num = srq->elem.index; srq->rq.max_wr = init->attr.max_wr; srq->rq.max_sge = init->attr.max_sge; - srq->rq.is_user = srq->is_user; srq_wqe_size = rcv_wqe_size(srq->rq.max_sge); spin_lock_init(&srq->rq.producer_lock); spin_lock_init(&srq->rq.consumer_lock); - type = uresp ? QUEUE_TYPE_FROM_USER : QUEUE_TYPE_KERNEL; + type = QUEUE_TYPE_FROM_CLIENT; q = rxe_queue_init(rxe, &srq->rq.max_wr, srq_wqe_size, type); if (!q) { diff --git a/drivers/infiniband/sw/rxe/rxe_sysfs.c b/drivers/infiniband/sw/rxe/rxe_sysfs.c deleted file mode 100644 index 666202ddff48..000000000000 --- a/drivers/infiniband/sw/rxe/rxe_sysfs.c +++ /dev/null @@ -1,119 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB -/* - * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. - * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved. - */ - -#include "rxe.h" -#include "rxe_net.h" - -/* Copy argument and remove trailing CR. Return the new length. */ -static int sanitize_arg(const char *val, char *intf, int intf_len) -{ - int len; - - if (!val) - return 0; - - /* Remove newline. */ - for (len = 0; len < intf_len - 1 && val[len] && val[len] != '\n'; len++) - intf[len] = val[len]; - intf[len] = 0; - - if (len == 0 || (val[len] != 0 && val[len] != '\n')) - return 0; - - return len; -} - -static int rxe_param_set_add(const char *val, const struct kernel_param *kp) -{ - int len; - int err = 0; - char intf[32]; - struct net_device *ndev; - struct rxe_dev *exists; - - if (!rxe_initialized) { - pr_err("Module parameters are not supported, use rdma link add or rxe_cfg\n"); - return -EAGAIN; - } - - len = sanitize_arg(val, intf, sizeof(intf)); - if (!len) { - pr_err("add: invalid interface name\n"); - return -EINVAL; - } - - ndev = dev_get_by_name(&init_net, intf); - if (!ndev) { - pr_err("interface %s not found\n", intf); - return -EINVAL; - } - - if (is_vlan_dev(ndev)) { - pr_err("rxe creation allowed on top of a real device only\n"); - err = -EPERM; - goto err; - } - - exists = rxe_get_dev_from_net(ndev); - if (exists) { - ib_device_put(&exists->ib_dev); - pr_err("already configured on %s\n", intf); - err = -EINVAL; - goto err; - } - - err = rxe_net_add("rxe%d", ndev); - if (err) { - pr_err("failed to add %s\n", intf); - goto err; - } - -err: - dev_put(ndev); - return err; -} - -static int rxe_param_set_remove(const char *val, const struct kernel_param *kp) -{ - int len; - char intf[32]; - struct ib_device *ib_dev; - - len = sanitize_arg(val, intf, sizeof(intf)); - if (!len) { - pr_err("add: invalid interface name\n"); - return -EINVAL; - } - - if (strncmp("all", intf, len) == 0) { - pr_info("rxe_sys: remove all"); - ib_unregister_driver(RDMA_DRIVER_RXE); - return 0; - } - - ib_dev = ib_device_get_by_name(intf, RDMA_DRIVER_RXE); - if (!ib_dev) { - pr_err("not configured on %s\n", intf); - return -EINVAL; - } - - ib_unregister_device_and_put(ib_dev); - - return 0; -} - -static const struct kernel_param_ops rxe_add_ops = { - .set = rxe_param_set_add, -}; - -static const struct kernel_param_ops rxe_remove_ops = { - .set = rxe_param_set_remove, -}; - -module_param_cb(add, &rxe_add_ops, NULL, 0200); -MODULE_PARM_DESC(add, "DEPRECATED. Create RXE device over network interface"); -module_param_cb(remove, &rxe_remove_ops, NULL, 0200); -MODULE_PARM_DESC(remove, "DEPRECATED. Remove RXE device over network interface"); diff --git a/drivers/infiniband/sw/rxe/rxe_task.c b/drivers/infiniband/sw/rxe/rxe_task.c index 6951fdcb31bf..0c4db5bb17d7 100644 --- a/drivers/infiniband/sw/rxe/rxe_task.c +++ b/drivers/infiniband/sw/rxe/rxe_task.c @@ -32,25 +32,24 @@ void rxe_do_task(struct tasklet_struct *t) { int cont; int ret; - unsigned long flags; struct rxe_task *task = from_tasklet(task, t, tasklet); - spin_lock_irqsave(&task->state_lock, flags); + spin_lock_bh(&task->state_lock); switch (task->state) { case TASK_STATE_START: task->state = TASK_STATE_BUSY; - spin_unlock_irqrestore(&task->state_lock, flags); + spin_unlock_bh(&task->state_lock); break; case TASK_STATE_BUSY: task->state = TASK_STATE_ARMED; fallthrough; case TASK_STATE_ARMED: - spin_unlock_irqrestore(&task->state_lock, flags); + spin_unlock_bh(&task->state_lock); return; default: - spin_unlock_irqrestore(&task->state_lock, flags); + spin_unlock_bh(&task->state_lock); pr_warn("%s failed with bad state %d\n", __func__, task->state); return; } @@ -59,7 +58,7 @@ void rxe_do_task(struct tasklet_struct *t) cont = 0; ret = task->func(task->arg); - spin_lock_irqsave(&task->state_lock, flags); + spin_lock_bh(&task->state_lock); switch (task->state) { case TASK_STATE_BUSY: if (ret) @@ -81,7 +80,7 @@ void rxe_do_task(struct tasklet_struct *t) pr_warn("%s failed with bad state %d\n", __func__, task->state); } - spin_unlock_irqrestore(&task->state_lock, flags); + spin_unlock_bh(&task->state_lock); } while (cont); task->ret = ret; @@ -106,7 +105,6 @@ int rxe_init_task(void *obj, struct rxe_task *task, void rxe_cleanup_task(struct rxe_task *task) { - unsigned long flags; bool idle; /* @@ -116,9 +114,9 @@ void rxe_cleanup_task(struct rxe_task *task) task->destroyed = true; do { - spin_lock_irqsave(&task->state_lock, flags); + spin_lock_bh(&task->state_lock); idle = (task->state == TASK_STATE_START); - spin_unlock_irqrestore(&task->state_lock, flags); + spin_unlock_bh(&task->state_lock); } while (!idle); tasklet_kill(&task->tasklet); diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.c b/drivers/infiniband/sw/rxe/rxe_verbs.c index 267b5a9c345d..915ad6664321 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.c +++ b/drivers/infiniband/sw/rxe/rxe_verbs.c @@ -29,13 +29,10 @@ static int rxe_query_port(struct ib_device *dev, u32 port_num, struct ib_port_attr *attr) { struct rxe_dev *rxe = to_rdev(dev); - struct rxe_port *port; int rc; - port = &rxe->port; - /* *attr being zeroed by the caller, avoid zeroing it here */ - *attr = port->attr; + *attr = rxe->port.attr; mutex_lock(&rxe->usdev_lock); rc = ib_get_eth_speed(dev, port_num, &attr->active_speed, @@ -161,9 +158,19 @@ static int rxe_create_ah(struct ib_ah *ibah, struct ib_udata *udata) { - int err; struct rxe_dev *rxe = to_rdev(ibah->device); struct rxe_ah *ah = to_rah(ibah); + struct rxe_create_ah_resp __user *uresp = NULL; + int err; + + if (udata) { + /* test if new user provider */ + if (udata->outlen >= sizeof(*uresp)) + uresp = udata->outbuf; + ah->is_user = true; + } else { + ah->is_user = false; + } err = rxe_av_chk_attr(rxe, init_attr->ah_attr); if (err) @@ -173,6 +180,24 @@ static int rxe_create_ah(struct ib_ah *ibah, if (err) return err; + /* create index > 0 */ + rxe_add_index(ah); + ah->ah_num = ah->elem.index; + + if (uresp) { + /* only if new user provider */ + err = copy_to_user(&uresp->ah_num, &ah->ah_num, + sizeof(uresp->ah_num)); + if (err) { + rxe_drop_index(ah); + rxe_drop_ref(ah); + return -EFAULT; + } + } else if (ah->is_user) { + /* only if old user provider */ + ah->ah_num = 0; + } + rxe_init_av(init_attr->ah_attr, &ah->av); return 0; } @@ -205,6 +230,7 @@ static int rxe_destroy_ah(struct ib_ah *ibah, u32 flags) { struct rxe_ah *ah = to_rah(ibah); + rxe_drop_index(ah); rxe_drop_ref(ah); return 0; } @@ -218,11 +244,7 @@ static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr) int num_sge = ibwr->num_sge; int full; - if (rq->is_user) - full = queue_full(rq->queue, QUEUE_TYPE_FROM_USER); - else - full = queue_full(rq->queue, QUEUE_TYPE_KERNEL); - + full = queue_full(rq->queue, QUEUE_TYPE_TO_DRIVER); if (unlikely(full)) { err = -ENOMEM; goto err1; @@ -237,11 +259,7 @@ static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr) for (i = 0; i < num_sge; i++) length += ibwr->sg_list[i].length; - if (rq->is_user) - recv_wqe = producer_addr(rq->queue, QUEUE_TYPE_FROM_USER); - else - recv_wqe = producer_addr(rq->queue, QUEUE_TYPE_KERNEL); - + recv_wqe = queue_producer_addr(rq->queue, QUEUE_TYPE_TO_DRIVER); recv_wqe->wr_id = ibwr->wr_id; recv_wqe->num_sge = num_sge; @@ -254,10 +272,7 @@ static int post_one_recv(struct rxe_rq *rq, const struct ib_recv_wr *ibwr) recv_wqe->dma.cur_sge = 0; recv_wqe->dma.sge_offset = 0; - if (rq->is_user) - advance_producer(rq->queue, QUEUE_TYPE_FROM_USER); - else - advance_producer(rq->queue, QUEUE_TYPE_KERNEL); + queue_advance_producer(rq->queue, QUEUE_TYPE_TO_DRIVER); return 0; @@ -281,9 +296,6 @@ static int rxe_create_srq(struct ib_srq *ibsrq, struct ib_srq_init_attr *init, if (udata->outlen < sizeof(*uresp)) return -EINVAL; uresp = udata->outbuf; - srq->is_user = true; - } else { - srq->is_user = false; } err = rxe_srq_chk_attr(rxe, NULL, &init->attr, IB_SRQ_INIT_MASK); @@ -371,10 +383,9 @@ static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, const struct ib_recv_wr **bad_wr) { int err = 0; - unsigned long flags; struct rxe_srq *srq = to_rsrq(ibsrq); - spin_lock_irqsave(&srq->rq.producer_lock, flags); + spin_lock_bh(&srq->rq.producer_lock); while (wr) { err = post_one_recv(&srq->rq, wr); @@ -383,7 +394,7 @@ static int rxe_post_srq_recv(struct ib_srq *ibsrq, const struct ib_recv_wr *wr, wr = wr->next; } - spin_unlock_irqrestore(&srq->rq.producer_lock, flags); + spin_unlock_bh(&srq->rq.producer_lock); if (err) *bad_wr = wr; @@ -457,6 +468,11 @@ static int rxe_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, if (err) goto err1; + if ((mask & IB_QP_AV) && (attr->ah_attr.ah_flags & IB_AH_GRH)) + qp->src_port = rdma_get_udp_sport(attr->ah_attr.grh.flow_label, + qp->ibqp.qp_num, + qp->attr.dest_qp_num); + return 0; err1: @@ -522,8 +538,11 @@ static void init_send_wr(struct rxe_qp *qp, struct rxe_send_wr *wr, if (qp_type(qp) == IB_QPT_UD || qp_type(qp) == IB_QPT_SMI || qp_type(qp) == IB_QPT_GSI) { + struct ib_ah *ibah = ud_wr(ibwr)->ah; + wr->wr.ud.remote_qpn = ud_wr(ibwr)->remote_qpn; wr->wr.ud.remote_qkey = ud_wr(ibwr)->remote_qkey; + wr->wr.ud.ah_num = to_rah(ibah)->ah_num; if (qp_type(qp) == IB_QPT_GSI) wr->wr.ud.pkey_index = ud_wr(ibwr)->pkey_index; if (wr->opcode == IB_WR_SEND_WITH_IMM) @@ -595,11 +614,6 @@ static void init_send_wqe(struct rxe_qp *qp, const struct ib_send_wr *ibwr, return; } - if (qp_type(qp) == IB_QPT_UD || - qp_type(qp) == IB_QPT_SMI || - qp_type(qp) == IB_QPT_GSI) - memcpy(&wqe->av, &to_rah(ud_wr(ibwr)->ah)->av, sizeof(wqe->av)); - if (unlikely(ibwr->send_flags & IB_SEND_INLINE)) copy_inline_data_to_wqe(wqe, ibwr); else @@ -624,38 +638,27 @@ static int post_one_send(struct rxe_qp *qp, const struct ib_send_wr *ibwr, int err; struct rxe_sq *sq = &qp->sq; struct rxe_send_wqe *send_wqe; - unsigned long flags; int full; err = validate_send_wr(qp, ibwr, mask, length); if (err) return err; - spin_lock_irqsave(&qp->sq.sq_lock, flags); + spin_lock_bh(&qp->sq.sq_lock); - if (qp->is_user) - full = queue_full(sq->queue, QUEUE_TYPE_FROM_USER); - else - full = queue_full(sq->queue, QUEUE_TYPE_KERNEL); + full = queue_full(sq->queue, QUEUE_TYPE_TO_DRIVER); if (unlikely(full)) { - spin_unlock_irqrestore(&qp->sq.sq_lock, flags); + spin_unlock_bh(&qp->sq.sq_lock); return -ENOMEM; } - if (qp->is_user) - send_wqe = producer_addr(sq->queue, QUEUE_TYPE_FROM_USER); - else - send_wqe = producer_addr(sq->queue, QUEUE_TYPE_KERNEL); - + send_wqe = queue_producer_addr(sq->queue, QUEUE_TYPE_TO_DRIVER); init_send_wqe(qp, ibwr, mask, length, send_wqe); - if (qp->is_user) - advance_producer(sq->queue, QUEUE_TYPE_FROM_USER); - else - advance_producer(sq->queue, QUEUE_TYPE_KERNEL); + queue_advance_producer(sq->queue, QUEUE_TYPE_TO_DRIVER); - spin_unlock_irqrestore(&qp->sq.sq_lock, flags); + spin_unlock_bh(&qp->sq.sq_lock); return 0; } @@ -735,7 +738,6 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, int err = 0; struct rxe_qp *qp = to_rqp(ibqp); struct rxe_rq *rq = &qp->rq; - unsigned long flags; if (unlikely((qp_state(qp) < IB_QPS_INIT) || !qp->valid)) { *bad_wr = wr; @@ -749,7 +751,7 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, goto err1; } - spin_lock_irqsave(&rq->producer_lock, flags); + spin_lock_bh(&rq->producer_lock); while (wr) { err = post_one_recv(rq, wr); @@ -760,7 +762,7 @@ static int rxe_post_recv(struct ib_qp *ibqp, const struct ib_recv_wr *wr, wr = wr->next; } - spin_unlock_irqrestore(&rq->producer_lock, flags); + spin_unlock_bh(&rq->producer_lock); if (qp->resp.state == QP_STATE_ERROR) rxe_run_task(&qp->resp.task, 1); @@ -841,24 +843,17 @@ static int rxe_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc) int i; struct rxe_cq *cq = to_rcq(ibcq); struct rxe_cqe *cqe; - unsigned long flags; - spin_lock_irqsave(&cq->cq_lock, flags); + spin_lock_bh(&cq->cq_lock); for (i = 0; i < num_entries; i++) { - if (cq->is_user) - cqe = queue_head(cq->queue, QUEUE_TYPE_TO_USER); - else - cqe = queue_head(cq->queue, QUEUE_TYPE_KERNEL); + cqe = queue_head(cq->queue, QUEUE_TYPE_FROM_DRIVER); if (!cqe) break; memcpy(wc++, &cqe->ibwc, sizeof(*wc)); - if (cq->is_user) - advance_consumer(cq->queue, QUEUE_TYPE_TO_USER); - else - advance_consumer(cq->queue, QUEUE_TYPE_KERNEL); + queue_advance_consumer(cq->queue, QUEUE_TYPE_FROM_DRIVER); } - spin_unlock_irqrestore(&cq->cq_lock, flags); + spin_unlock_bh(&cq->cq_lock); return i; } @@ -868,10 +863,7 @@ static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt) struct rxe_cq *cq = to_rcq(ibcq); int count; - if (cq->is_user) - count = queue_count(cq->queue, QUEUE_TYPE_TO_USER); - else - count = queue_count(cq->queue, QUEUE_TYPE_KERNEL); + count = queue_count(cq->queue, QUEUE_TYPE_FROM_DRIVER); return (count > wc_cnt) ? wc_cnt : count; } @@ -879,23 +871,19 @@ static int rxe_peek_cq(struct ib_cq *ibcq, int wc_cnt) static int rxe_req_notify_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags) { struct rxe_cq *cq = to_rcq(ibcq); - unsigned long irq_flags; int ret = 0; int empty; - spin_lock_irqsave(&cq->cq_lock, irq_flags); + spin_lock_bh(&cq->cq_lock); if (cq->notify != IB_CQ_NEXT_COMP) cq->notify = flags & IB_CQ_SOLICITED_MASK; - if (cq->is_user) - empty = queue_empty(cq->queue, QUEUE_TYPE_TO_USER); - else - empty = queue_empty(cq->queue, QUEUE_TYPE_KERNEL); + empty = queue_empty(cq->queue, QUEUE_TYPE_FROM_DRIVER); if ((flags & IB_CQ_REPORT_MISSED_EVENTS) && !empty) ret = 1; - spin_unlock_irqrestore(&cq->cq_lock, irq_flags); + spin_unlock_bh(&cq->cq_lock); return ret; } @@ -987,41 +975,26 @@ err1: return ERR_PTR(err); } -static int rxe_set_page(struct ib_mr *ibmr, u64 addr) -{ - struct rxe_mr *mr = to_rmr(ibmr); - struct rxe_map *map; - struct rxe_phys_buf *buf; - - if (unlikely(mr->nbuf == mr->num_buf)) - return -ENOMEM; - - map = mr->map[mr->nbuf / RXE_BUF_PER_MAP]; - buf = &map->buf[mr->nbuf % RXE_BUF_PER_MAP]; - - buf->addr = addr; - buf->size = ibmr->page_size; - mr->nbuf++; - - return 0; -} - +/* build next_map_set from scatterlist + * The IB_WR_REG_MR WR will swap map_sets + */ static int rxe_map_mr_sg(struct ib_mr *ibmr, struct scatterlist *sg, int sg_nents, unsigned int *sg_offset) { struct rxe_mr *mr = to_rmr(ibmr); + struct rxe_map_set *set = mr->next_map_set; int n; - mr->nbuf = 0; + set->nbuf = 0; - n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_set_page); + n = ib_sg_to_pages(ibmr, sg, sg_nents, sg_offset, rxe_mr_set_page); - mr->va = ibmr->iova; - mr->iova = ibmr->iova; - mr->length = ibmr->length; - mr->page_shift = ilog2(ibmr->page_size); - mr->page_mask = ibmr->page_size - 1; - mr->offset = mr->iova & mr->page_mask; + set->va = ibmr->iova; + set->iova = ibmr->iova; + set->length = ibmr->length; + set->page_shift = ilog2(ibmr->page_size); + set->page_mask = ibmr->page_size - 1; + set->offset = set->iova & set->page_mask; return n; } diff --git a/drivers/infiniband/sw/rxe/rxe_verbs.h b/drivers/infiniband/sw/rxe/rxe_verbs.h index ac2a2148027f..e48969e8d4c8 100644 --- a/drivers/infiniband/sw/rxe/rxe_verbs.h +++ b/drivers/infiniband/sw/rxe/rxe_verbs.h @@ -35,19 +35,20 @@ static inline int psn_compare(u32 psn_a, u32 psn_b) struct rxe_ucontext { struct ib_ucontext ibuc; - struct rxe_pool_entry pelem; + struct rxe_pool_elem elem; }; struct rxe_pd { struct ib_pd ibpd; - struct rxe_pool_entry pelem; + struct rxe_pool_elem elem; }; struct rxe_ah { struct ib_ah ibah; - struct rxe_pool_entry pelem; - struct rxe_pd *pd; + struct rxe_pool_elem elem; struct rxe_av av; + bool is_user; + int ah_num; }; struct rxe_cqe { @@ -59,12 +60,12 @@ struct rxe_cqe { struct rxe_cq { struct ib_cq ibcq; - struct rxe_pool_entry pelem; + struct rxe_pool_elem elem; struct rxe_queue *queue; spinlock_t cq_lock; u8 notify; bool is_dying; - int is_user; + bool is_user; struct tasklet_struct comp_task; }; @@ -77,7 +78,6 @@ enum wqe_state { }; struct rxe_sq { - bool is_user; int max_wr; int max_sge; int max_inline; @@ -86,7 +86,6 @@ struct rxe_sq { }; struct rxe_rq { - bool is_user; int max_wr; int max_sge; spinlock_t producer_lock; /* guard queue producer */ @@ -96,11 +95,10 @@ struct rxe_rq { struct rxe_srq { struct ib_srq ibsrq; - struct rxe_pool_entry pelem; + struct rxe_pool_elem elem; struct rxe_pd *pd; struct rxe_rq rq; u32 srq_num; - bool is_user; int limit; int error; @@ -211,7 +209,7 @@ struct rxe_resp_info { struct rxe_qp { struct ib_qp ibqp; - struct rxe_pool_entry pelem; + struct rxe_pool_elem elem; struct ib_qp_attr attr; unsigned int valid; unsigned int mtu; @@ -240,7 +238,6 @@ struct rxe_qp { struct sk_buff_head req_pkts; struct sk_buff_head resp_pkts; - struct sk_buff_head send_pkts; struct rxe_req_info req; struct rxe_comp_info comp; @@ -267,18 +264,11 @@ struct rxe_qp { }; enum rxe_mr_state { - RXE_MR_STATE_ZOMBIE, RXE_MR_STATE_INVALID, RXE_MR_STATE_FREE, RXE_MR_STATE_VALID, }; -enum rxe_mr_type { - RXE_MR_TYPE_NONE, - RXE_MR_TYPE_DMA, - RXE_MR_TYPE_MR, -}; - enum rxe_mr_copy_dir { RXE_TO_MR_OBJ, RXE_FROM_MR_OBJ, @@ -300,6 +290,17 @@ struct rxe_map { struct rxe_phys_buf buf[RXE_BUF_PER_MAP]; }; +struct rxe_map_set { + struct rxe_map **map; + u64 va; + u64 iova; + size_t length; + u32 offset; + u32 nbuf; + int page_shift; + int page_mask; +}; + static inline int rkey_is_mw(u32 rkey) { u32 index = rkey >> 8; @@ -308,33 +309,29 @@ static inline int rkey_is_mw(u32 rkey) } struct rxe_mr { - struct rxe_pool_entry pelem; + struct rxe_pool_elem elem; struct ib_mr ibmr; struct ib_umem *umem; + u32 lkey; + u32 rkey; enum rxe_mr_state state; - enum rxe_mr_type type; - u64 va; - u64 iova; - size_t length; - u32 offset; + enum ib_mr_type type; int access; - int page_shift; - int page_mask; int map_shift; int map_mask; u32 num_buf; - u32 nbuf; u32 max_buf; u32 num_map; atomic_t num_mw; - struct rxe_map **map; + struct rxe_map_set *cur_map_set; + struct rxe_map_set *next_map_set; }; enum rxe_mw_state { @@ -345,18 +342,19 @@ enum rxe_mw_state { struct rxe_mw { struct ib_mw ibmw; - struct rxe_pool_entry pelem; + struct rxe_pool_elem elem; spinlock_t lock; enum rxe_mw_state state; struct rxe_qp *qp; /* Type 2 only */ struct rxe_mr *mr; + u32 rkey; int access; u64 addr; u64 length; }; struct rxe_mc_grp { - struct rxe_pool_entry pelem; + struct rxe_pool_elem elem; spinlock_t mcg_lock; /* guard group */ struct rxe_dev *rxe; struct list_head qp_list; @@ -367,7 +365,7 @@ struct rxe_mc_grp { }; struct rxe_mc_elem { - struct rxe_pool_entry pelem; + struct rxe_pool_elem elem; struct list_head qp_list; struct list_head grp_list; struct rxe_qp *qp; @@ -394,8 +392,6 @@ struct rxe_dev { struct net_device *ndev; - int xmit_errors; - struct rxe_pool uc_pool; struct rxe_pool pd_pool; struct rxe_pool ah_pool; @@ -469,19 +465,14 @@ static inline struct rxe_mw *to_rmw(struct ib_mw *mw) return mw ? container_of(mw, struct rxe_mw, ibmw) : NULL; } -static inline struct rxe_pd *mr_pd(struct rxe_mr *mr) +static inline struct rxe_pd *rxe_ah_pd(struct rxe_ah *ah) { - return to_rpd(mr->ibmr.pd); + return to_rpd(ah->ibah.pd); } -static inline u32 mr_lkey(struct rxe_mr *mr) -{ - return mr->ibmr.lkey; -} - -static inline u32 mr_rkey(struct rxe_mr *mr) +static inline struct rxe_pd *mr_pd(struct rxe_mr *mr) { - return mr->ibmr.rkey; + return to_rpd(mr->ibmr.pd); } static inline struct rxe_pd *rxe_mw_pd(struct rxe_mw *mw) @@ -489,13 +480,8 @@ static inline struct rxe_pd *rxe_mw_pd(struct rxe_mw *mw) return to_rpd(mw->ibmw.pd); } -static inline u32 rxe_mw_rkey(struct rxe_mw *mw) -{ - return mw->ibmw.rkey; -} - int rxe_register_device(struct rxe_dev *rxe, const char *ibdev_name); -void rxe_mc_cleanup(struct rxe_pool_entry *arg); +void rxe_mc_cleanup(struct rxe_pool_elem *elem); #endif /* RXE_VERBS_H */ diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h index 368959ae9a8c..df03d84c6868 100644 --- a/drivers/infiniband/sw/siw/siw.h +++ b/drivers/infiniband/sw/siw/siw.h @@ -644,14 +644,9 @@ static inline struct siw_sqe *orq_get_current(struct siw_qp *qp) return &qp->orq[qp->orq_get % qp->attrs.orq_size]; } -static inline struct siw_sqe *orq_get_tail(struct siw_qp *qp) -{ - return &qp->orq[qp->orq_put % qp->attrs.orq_size]; -} - static inline struct siw_sqe *orq_get_free(struct siw_qp *qp) { - struct siw_sqe *orq_e = orq_get_tail(qp); + struct siw_sqe *orq_e = &qp->orq[qp->orq_put % qp->attrs.orq_size]; if (READ_ONCE(orq_e->flags) == 0) return orq_e; diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c index 7a5ed86ffc9f..7acdd3c3a599 100644 --- a/drivers/infiniband/sw/siw/siw_cm.c +++ b/drivers/infiniband/sw/siw/siw_cm.c @@ -1951,8 +1951,6 @@ int siw_cm_init(void) void siw_cm_exit(void) { - if (siw_cm_wq) { - flush_workqueue(siw_cm_wq); + if (siw_cm_wq) destroy_workqueue(siw_cm_wq); - } } diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c index 9093e6a80b26..e5c586913d0b 100644 --- a/drivers/infiniband/sw/siw/siw_main.c +++ b/drivers/infiniband/sw/siw/siw_main.c @@ -98,15 +98,14 @@ static int siw_create_tx_threads(void) continue; siw_tx_thread[cpu] = - kthread_create(siw_run_sq, (unsigned long *)(long)cpu, - "siw_tx/%d", cpu); + kthread_run_on_cpu(siw_run_sq, + (unsigned long *)(long)cpu, + cpu, "siw_tx/%u"); if (IS_ERR(siw_tx_thread[cpu])) { siw_tx_thread[cpu] = NULL; continue; } - kthread_bind(siw_tx_thread[cpu], cpu); - wake_up_process(siw_tx_thread[cpu]); assigned++; } return assigned; diff --git a/drivers/infiniband/sw/siw/siw_qp_rx.c b/drivers/infiniband/sw/siw/siw_qp_rx.c index 60116f20653c..875ea6f1b04a 100644 --- a/drivers/infiniband/sw/siw/siw_qp_rx.c +++ b/drivers/infiniband/sw/siw/siw_qp_rx.c @@ -1153,11 +1153,12 @@ static int siw_check_tx_fence(struct siw_qp *qp) spin_lock_irqsave(&qp->orq_lock, flags); - rreq = orq_get_current(qp); - /* free current orq entry */ + rreq = orq_get_current(qp); WRITE_ONCE(rreq->flags, 0); + qp->orq_get++; + if (qp->tx_ctx.orq_fence) { if (unlikely(tx_waiting->wr_status != SIW_WR_QUEUED)) { pr_warn("siw: [QP %u]: fence resume: bad status %d\n", @@ -1165,10 +1166,12 @@ static int siw_check_tx_fence(struct siw_qp *qp) rv = -EPROTO; goto out; } - /* resume SQ processing */ + /* resume SQ processing, if possible */ if (tx_waiting->sqe.opcode == SIW_OP_READ || tx_waiting->sqe.opcode == SIW_OP_READ_LOCAL_INV) { - rreq = orq_get_tail(qp); + + /* SQ processing was stopped because of a full ORQ */ + rreq = orq_get_free(qp); if (unlikely(!rreq)) { pr_warn("siw: [QP %u]: no ORQE\n", qp_id(qp)); rv = -EPROTO; @@ -1181,15 +1184,14 @@ static int siw_check_tx_fence(struct siw_qp *qp) resume_tx = 1; } else if (siw_orq_empty(qp)) { + /* + * SQ processing was stopped by fenced work request. + * Resume since all previous Read's are now completed. + */ qp->tx_ctx.orq_fence = 0; resume_tx = 1; - } else { - pr_warn("siw: [QP %u]: fence resume: orq idx: %d:%d\n", - qp_id(qp), qp->orq_get, qp->orq_put); - rv = -EPROTO; } } - qp->orq_get++; out: spin_unlock_irqrestore(&qp->orq_lock, flags); diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c index 1b36350601fa..54ef367b074a 100644 --- a/drivers/infiniband/sw/siw/siw_verbs.c +++ b/drivers/infiniband/sw/siw/siw_verbs.c @@ -8,6 +8,7 @@ #include <linux/uaccess.h> #include <linux/vmalloc.h> #include <linux/xarray.h> +#include <net/addrconf.h> #include <rdma/iw_cm.h> #include <rdma/ib_verbs.h> @@ -155,7 +156,8 @@ int siw_query_device(struct ib_device *base_dev, struct ib_device_attr *attr, attr->vendor_id = SIW_VENDOR_ID; attr->vendor_part_id = sdev->vendor_part_id; - memcpy(&attr->sys_image_guid, sdev->netdev->dev_addr, 6); + addrconf_addr_eui48((u8 *)&attr->sys_image_guid, + sdev->netdev->dev_addr); return 0; } @@ -311,7 +313,8 @@ int siw_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs, if (atomic_inc_return(&sdev->num_qp) > SIW_MAX_QP) { siw_dbg(base_dev, "too many QP's\n"); - return -ENOMEM; + rv = -ENOMEM; + goto err_atomic; } if (attrs->qp_type != IB_QPT_RC) { siw_dbg(base_dev, "only RC QP's supported\n"); @@ -660,7 +663,7 @@ static int siw_copy_inline_sgl(const struct ib_send_wr *core_wr, kbuf += core_sge->length; core_sge++; } - sqe->sge[0].length = bytes > 0 ? bytes : 0; + sqe->sge[0].length = max(bytes, 0); sqe->num_sge = bytes > 0 ? 1 : 0; return bytes; diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 684c2ddb16f5..fd9d7f2c4d64 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c @@ -1583,6 +1583,7 @@ int ipoib_cm_dev_init(struct net_device *dev) { struct ipoib_dev_priv *priv = ipoib_priv(dev); int max_srq_sge, i; + u8 addr; INIT_LIST_HEAD(&priv->cm.passive_ids); INIT_LIST_HEAD(&priv->cm.reap_list); @@ -1636,7 +1637,8 @@ int ipoib_cm_dev_init(struct net_device *dev) } } - priv->dev->dev_addr[0] = IPOIB_FLAGS_RC; + addr = IPOIB_FLAGS_RC; + dev_addr_mod(dev, 0, &addr, 1); return 0; } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index ceabfb0b0a83..2c3dca41d3bd 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c @@ -1057,13 +1057,11 @@ static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv) { union ib_gid search_gid; union ib_gid gid0; - union ib_gid *netdev_gid; int err; u16 index; u32 port; bool ret = false; - netdev_gid = (union ib_gid *)(priv->dev->dev_addr + 4); if (rdma_query_gid(priv->ca, priv->port, 0, &gid0)) return false; @@ -1073,7 +1071,8 @@ static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv) * to do it later */ priv->local_gid.global.subnet_prefix = gid0.global.subnet_prefix; - netdev_gid->global.subnet_prefix = gid0.global.subnet_prefix; + dev_addr_mod(priv->dev, 4, (u8 *)&gid0.global.subnet_prefix, + sizeof(gid0.global.subnet_prefix)); search_gid.global.subnet_prefix = gid0.global.subnet_prefix; search_gid.global.interface_id = priv->local_gid.global.interface_id; @@ -1135,8 +1134,8 @@ static bool ipoib_dev_addr_changed_valid(struct ipoib_dev_priv *priv) if (!test_bit(IPOIB_FLAG_DEV_ADDR_CTRL, &priv->flags)) { memcpy(&priv->local_gid, &gid0, sizeof(priv->local_gid)); - memcpy(priv->dev->dev_addr + 4, &gid0, - sizeof(priv->local_gid)); + dev_addr_mod(priv->dev, 4, (u8 *)&gid0, + sizeof(priv->local_gid)); ret = true; } } diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 0aa8629fdf62..9934b8bd7f56 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c @@ -1696,6 +1696,7 @@ static void ipoib_dev_uninit_default(struct net_device *dev) static int ipoib_dev_init_default(struct net_device *dev) { struct ipoib_dev_priv *priv = ipoib_priv(dev); + u8 addr_mod[3]; ipoib_napi_add(dev); @@ -1723,9 +1724,10 @@ static int ipoib_dev_init_default(struct net_device *dev) } /* after qp created set dev address */ - priv->dev->dev_addr[1] = (priv->qp->qp_num >> 16) & 0xff; - priv->dev->dev_addr[2] = (priv->qp->qp_num >> 8) & 0xff; - priv->dev->dev_addr[3] = (priv->qp->qp_num) & 0xff; + addr_mod[0] = (priv->qp->qp_num >> 16) & 0xff; + addr_mod[1] = (priv->qp->qp_num >> 8) & 0xff; + addr_mod[2] = (priv->qp->qp_num) & 0xff; + dev_addr_mod(priv->dev, 1, addr_mod, sizeof(addr_mod)); return 0; @@ -1886,8 +1888,7 @@ static int ipoib_parent_init(struct net_device *ndev) priv->ca->name, priv->port, result); return result; } - memcpy(priv->dev->dev_addr + 4, priv->local_gid.raw, - sizeof(union ib_gid)); + dev_addr_mod(priv->dev, 4, priv->local_gid.raw, sizeof(union ib_gid)); SET_NETDEV_DEV(priv->dev, priv->ca->dev.parent); priv->dev->dev_port = priv->port - 1; @@ -1908,8 +1909,8 @@ static void ipoib_child_init(struct net_device *ndev) memcpy(&priv->local_gid, priv->dev->dev_addr + 4, sizeof(priv->local_gid)); else { - memcpy(priv->dev->dev_addr, ppriv->dev->dev_addr, - INFINIBAND_ALEN); + __dev_addr_set(priv->dev, ppriv->dev->dev_addr, + INFINIBAND_ALEN); memcpy(&priv->local_gid, &ppriv->local_gid, sizeof(priv->local_gid)); } @@ -1997,7 +1998,6 @@ static void ipoib_ndo_uninit(struct net_device *dev) if (priv->wq) { /* See ipoib_mcast_carrier_on_task() */ WARN_ON(test_bit(IPOIB_FLAG_OPER_UP, &priv->flags)); - flush_workqueue(priv->wq); destroy_workqueue(priv->wq); priv->wq = NULL; } @@ -2327,7 +2327,7 @@ static void set_base_guid(struct ipoib_dev_priv *priv, union ib_gid *gid) memcpy(&priv->local_gid.global.interface_id, &gid->global.interface_id, sizeof(gid->global.interface_id)); - memcpy(netdev->dev_addr + 4, &priv->local_gid, sizeof(priv->local_gid)); + dev_addr_mod(netdev, 4, (u8 *)&priv->local_gid, sizeof(priv->local_gid)); clear_bit(IPOIB_FLAG_DEV_ADDR_SET, &priv->flags); netif_addr_unlock_bh(netdev); diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c index 776e46ee95da..07e47021a71f 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.c +++ b/drivers/infiniband/ulp/iser/iscsi_iser.c @@ -113,10 +113,6 @@ bool iser_pi_enable = false; module_param_named(pi_enable, iser_pi_enable, bool, S_IRUGO); MODULE_PARM_DESC(pi_enable, "Enable T10-PI offload support (default:disabled)"); -int iser_pi_guard; -module_param_named(pi_guard, iser_pi_guard, int, S_IRUGO); -MODULE_PARM_DESC(pi_guard, "T10-PI guard_type [deprecated]"); - static int iscsi_iser_set(const char *val, const struct kernel_param *kp) { int ret; @@ -139,9 +135,8 @@ static int iscsi_iser_set(const char *val, const struct kernel_param *kp) * Notes: In case of data length errors or iscsi PDU completion failures * this routine will signal iscsi layer of connection failure. */ -void -iscsi_iser_recv(struct iscsi_conn *conn, struct iscsi_hdr *hdr, - char *rx_data, int rx_data_len) +void iscsi_iser_recv(struct iscsi_conn *conn, struct iscsi_hdr *hdr, + char *rx_data, int rx_data_len) { int rc = 0; int datalen; @@ -176,8 +171,7 @@ error: * Netes: This routine can't fail, just assign iscsi task * hdr and max hdr size. */ -static int -iscsi_iser_pdu_alloc(struct iscsi_task *task, uint8_t opcode) +static int iscsi_iser_pdu_alloc(struct iscsi_task *task, uint8_t opcode) { struct iscsi_iser_task *iser_task = task->dd_data; @@ -198,9 +192,8 @@ iscsi_iser_pdu_alloc(struct iscsi_task *task, uint8_t opcode) * state mutex to avoid dereferencing the IB device which * may have already been terminated. */ -int -iser_initialize_task_headers(struct iscsi_task *task, - struct iser_tx_desc *tx_desc) +int iser_initialize_task_headers(struct iscsi_task *task, + struct iser_tx_desc *tx_desc) { struct iser_conn *iser_conn = task->conn->dd_data; struct iser_device *device = iser_conn->ib_conn.device; @@ -237,8 +230,7 @@ iser_initialize_task_headers(struct iscsi_task *task, * Return: Returns zero on success or -ENOMEM when failing * to init task headers (dma mapping error). */ -static int -iscsi_iser_task_init(struct iscsi_task *task) +static int iscsi_iser_task_init(struct iscsi_task *task) { struct iscsi_iser_task *iser_task = task->dd_data; int ret; @@ -272,8 +264,8 @@ iscsi_iser_task_init(struct iscsi_task *task) * xmit. * **/ -static int -iscsi_iser_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task) +static int iscsi_iser_mtask_xmit(struct iscsi_conn *conn, + struct iscsi_task *task) { int error = 0; @@ -290,9 +282,8 @@ iscsi_iser_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task) return error; } -static int -iscsi_iser_task_xmit_unsol_data(struct iscsi_conn *conn, - struct iscsi_task *task) +static int iscsi_iser_task_xmit_unsol_data(struct iscsi_conn *conn, + struct iscsi_task *task) { struct iscsi_r2t_info *r2t = &task->unsol_r2t; struct iscsi_data hdr; @@ -326,8 +317,7 @@ iscsi_iser_task_xmit_unsol_data_exit: * * Return: zero on success or escalates $error on failure. */ -static int -iscsi_iser_task_xmit(struct iscsi_task *task) +static int iscsi_iser_task_xmit(struct iscsi_task *task) { struct iscsi_conn *conn = task->conn; struct iscsi_iser_task *iser_task = task->dd_data; @@ -410,8 +400,7 @@ static void iscsi_iser_cleanup_task(struct iscsi_task *task) * * In addition the error sector is marked. */ -static u8 -iscsi_iser_check_protection(struct iscsi_task *task, sector_t *sector) +static u8 iscsi_iser_check_protection(struct iscsi_task *task, sector_t *sector) { struct iscsi_iser_task *iser_task = task->dd_data; enum iser_data_dir dir = iser_task->dir[ISER_DIR_IN] ? @@ -460,11 +449,9 @@ iscsi_iser_conn_create(struct iscsi_cls_session *cls_session, * -EINVAL in case end-point doesn't exsits anymore or iser connection * state is not UP (teardown already started). */ -static int -iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session, - struct iscsi_cls_conn *cls_conn, - uint64_t transport_eph, - int is_leading) +static int iscsi_iser_conn_bind(struct iscsi_cls_session *cls_session, + struct iscsi_cls_conn *cls_conn, + uint64_t transport_eph, int is_leading) { struct iscsi_conn *conn = cls_conn->dd_data; struct iser_conn *iser_conn; @@ -519,8 +506,7 @@ out: * from this point iscsi must call conn_stop in session/connection * teardown so iser transport must wait for it. */ -static int -iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn) +static int iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn) { struct iscsi_conn *iscsi_conn; struct iser_conn *iser_conn; @@ -542,8 +528,7 @@ iscsi_iser_conn_start(struct iscsi_cls_conn *cls_conn) * handle, so we call it under iser the state lock to protect against * this kind of race. */ -static void -iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) +static void iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) { struct iscsi_conn *conn = cls_conn->dd_data; struct iser_conn *iser_conn = conn->dd_data; @@ -578,8 +563,7 @@ iscsi_iser_conn_stop(struct iscsi_cls_conn *cls_conn, int flag) * * Removes and free iscsi host. */ -static void -iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session) +static void iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session) { struct Scsi_Host *shost = iscsi_session_to_shost(cls_session); @@ -588,8 +572,7 @@ iscsi_iser_session_destroy(struct iscsi_cls_session *cls_session) iscsi_host_free(shost); } -static inline unsigned int -iser_dif_prot_caps(int prot_caps) +static inline unsigned int iser_dif_prot_caps(int prot_caps) { int ret = 0; @@ -708,9 +691,8 @@ free_host: return NULL; } -static int -iscsi_iser_set_param(struct iscsi_cls_conn *cls_conn, - enum iscsi_param param, char *buf, int buflen) +static int iscsi_iser_set_param(struct iscsi_cls_conn *cls_conn, + enum iscsi_param param, char *buf, int buflen) { int value; @@ -760,8 +742,8 @@ iscsi_iser_set_param(struct iscsi_cls_conn *cls_conn, * * Output connection statistics. */ -static void -iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *stats) +static void iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, + struct iscsi_stats *stats) { struct iscsi_conn *conn = cls_conn->dd_data; @@ -812,9 +794,9 @@ static int iscsi_iser_get_ep_param(struct iscsi_endpoint *ep, * Return: iscsi_endpoint created by iscsi layer or ERR_PTR(error) * if fails. */ -static struct iscsi_endpoint * -iscsi_iser_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr, - int non_blocking) +static struct iscsi_endpoint *iscsi_iser_ep_connect(struct Scsi_Host *shost, + struct sockaddr *dst_addr, + int non_blocking) { int err; struct iser_conn *iser_conn; @@ -857,8 +839,7 @@ failure: * or more likely iser connection state transitioned to TEMINATING or * DOWN during the wait period. */ -static int -iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) +static int iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) { struct iser_conn *iser_conn = ep->dd_data; int rc; @@ -893,8 +874,7 @@ iscsi_iser_ep_poll(struct iscsi_endpoint *ep, int timeout_ms) * and cleanup or actually call it immediately in case we didn't pass * iscsi conn bind/start stage, thus it is safe. */ -static void -iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep) +static void iscsi_iser_ep_disconnect(struct iscsi_endpoint *ep) { struct iser_conn *iser_conn = ep->dd_data; diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h index 9f6ac0a09a78..20af46c4e954 100644 --- a/drivers/infiniband/ulp/iser/iscsi_iser.h +++ b/drivers/infiniband/ulp/iser/iscsi_iser.h @@ -119,8 +119,6 @@ #define ISER_QP_MAX_RECV_DTOS (ISER_DEF_XMIT_CMDS_MAX) -#define ISER_MIN_POSTED_RX (ISER_DEF_XMIT_CMDS_MAX >> 2) - /* the max TX (send) WR supported by the iSER QP is defined by * * max_send_wr = T * (1 + D) + C ; D is how many inflight dataouts we expect * * to have at max for SCSI command. The tx posting & completion handling code * @@ -148,8 +146,6 @@ - ISER_MAX_RX_MISC_PDUS) / \ (1 + ISER_INFLIGHT_DATAOUTS)) -#define ISER_SIGNAL_CMD_COUNT 32 - /* Constant PDU lengths calculations */ #define ISER_HEADERS_LEN (sizeof(struct iser_ctrl) + sizeof(struct iscsi_hdr)) @@ -366,9 +362,6 @@ struct iser_fr_pool { * @qp: Connection Queue-pair * @cq: Connection completion queue * @cq_size: The number of max outstanding completions - * @post_recv_buf_count: post receive counter - * @sig_count: send work request signal count - * @rx_wr: receive work request for batch posts * @device: reference to iser device * @fr_pool: connection fast registration poool * @pi_support: Indicate device T10-PI support @@ -379,9 +372,6 @@ struct ib_conn { struct ib_qp *qp; struct ib_cq *cq; u32 cq_size; - int post_recv_buf_count; - u8 sig_count; - struct ib_recv_wr rx_wr[ISER_MIN_POSTED_RX]; struct iser_device *device; struct iser_fr_pool fr_pool; bool pi_support; @@ -397,8 +387,6 @@ struct ib_conn { * @state: connection logical state * @qp_max_recv_dtos: maximum number of data outs, corresponds * to max number of post recvs - * @qp_max_recv_dtos_mask: (qp_max_recv_dtos - 1) - * @min_posted_rx: (qp_max_recv_dtos >> 2) * @max_cmds: maximum cmds allowed for this connection * @name: connection peer portal * @release_work: deffered work for release job @@ -409,7 +397,6 @@ struct ib_conn { * (state is ISER_CONN_UP) * @conn_list: entry in ig conn list * @login_desc: login descriptor - * @rx_desc_head: head of rx_descs cyclic buffer * @rx_descs: rx buffers array (cyclic buffer) * @num_rx_descs: number of rx descriptors * @scsi_sg_tablesize: scsi host sg_tablesize @@ -422,8 +409,6 @@ struct iser_conn { struct iscsi_endpoint *ep; enum iser_conn_state state; unsigned qp_max_recv_dtos; - unsigned qp_max_recv_dtos_mask; - unsigned min_posted_rx; u16 max_cmds; char name[ISER_OBJECT_NAME_SIZE]; struct work_struct release_work; @@ -433,7 +418,6 @@ struct iser_conn { struct completion up_completion; struct list_head conn_list; struct iser_login_desc login_desc; - unsigned int rx_desc_head; struct iser_rx_desc *rx_descs; u32 num_rx_descs; unsigned short scsi_sg_tablesize; @@ -486,7 +470,6 @@ struct iser_global { extern struct iser_global ig; extern int iser_debug_level; extern bool iser_pi_enable; -extern int iser_pi_guard; extern unsigned int iser_max_sectors; extern bool iser_always_reg; @@ -543,9 +526,9 @@ int iser_connect(struct iser_conn *iser_conn, int non_blocking); int iser_post_recvl(struct iser_conn *iser_conn); -int iser_post_recvm(struct iser_conn *iser_conn, int count); -int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc, - bool signal); +int iser_post_recvm(struct iser_conn *iser_conn, + struct iser_rx_desc *rx_desc); +int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc); int iser_dma_map_task_data(struct iscsi_iser_task *iser_task, struct iser_data_buf *data, diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c index 27a6f75a9912..2490150d3085 100644 --- a/drivers/infiniband/ulp/iser/iser_initiator.c +++ b/drivers/infiniband/ulp/iser/iser_initiator.c @@ -95,11 +95,8 @@ static int iser_prepare_read_cmd(struct iscsi_task *task) * task->data[ISER_DIR_OUT].data_len, Protection size * is stored at task->prot[ISER_DIR_OUT].data_len */ -static int -iser_prepare_write_cmd(struct iscsi_task *task, - unsigned int imm_sz, - unsigned int unsol_sz, - unsigned int edtl) +static int iser_prepare_write_cmd(struct iscsi_task *task, unsigned int imm_sz, + unsigned int unsol_sz, unsigned int edtl) { struct iscsi_iser_task *iser_task = task->dd_data; struct iser_mem_reg *mem_reg; @@ -160,8 +157,8 @@ iser_prepare_write_cmd(struct iscsi_task *task, } /* creates a new tx descriptor and adds header regd buffer */ -static void iser_create_send_desc(struct iser_conn *iser_conn, - struct iser_tx_desc *tx_desc) +static void iser_create_send_desc(struct iser_conn *iser_conn, + struct iser_tx_desc *tx_desc) { struct iser_device *device = iser_conn->ib_conn.device; @@ -247,8 +244,6 @@ int iser_alloc_rx_descriptors(struct iser_conn *iser_conn, struct iser_device *device = ib_conn->device; iser_conn->qp_max_recv_dtos = session->cmds_max; - iser_conn->qp_max_recv_dtos_mask = session->cmds_max - 1; /* cmds_max is 2^N */ - iser_conn->min_posted_rx = iser_conn->qp_max_recv_dtos >> 2; if (iser_alloc_fastreg_pool(ib_conn, session->scsi_cmds_max, iser_conn->pages_per_mr)) @@ -280,7 +275,6 @@ int iser_alloc_rx_descriptors(struct iser_conn *iser_conn, rx_sg->lkey = device->pd->local_dma_lkey; } - iser_conn->rx_desc_head = 0; return 0; rx_desc_dma_map_failed: @@ -322,37 +316,35 @@ void iser_free_rx_descriptors(struct iser_conn *iser_conn) static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req) { struct iser_conn *iser_conn = conn->dd_data; - struct ib_conn *ib_conn = &iser_conn->ib_conn; struct iscsi_session *session = conn->session; + int err = 0; + int i; iser_dbg("req op %x flags %x\n", req->opcode, req->flags); /* check if this is the last login - going to full feature phase */ if ((req->flags & ISCSI_FULL_FEATURE_PHASE) != ISCSI_FULL_FEATURE_PHASE) - return 0; - - /* - * Check that there is one posted recv buffer - * (for the last login response). - */ - WARN_ON(ib_conn->post_recv_buf_count != 1); + goto out; if (session->discovery_sess) { iser_info("Discovery session, re-using login RX buffer\n"); - return 0; - } else - iser_info("Normal session, posting batch of RX %d buffers\n", - iser_conn->min_posted_rx); - - /* Initial post receive buffers */ - if (iser_post_recvm(iser_conn, iser_conn->min_posted_rx)) - return -ENOMEM; + goto out; + } - return 0; -} + iser_info("Normal session, posting batch of RX %d buffers\n", + iser_conn->qp_max_recv_dtos - 1); -static inline bool iser_signal_comp(u8 sig_count) -{ - return ((sig_count % ISER_SIGNAL_CMD_COUNT) == 0); + /* + * Initial post receive buffers. + * There is one already posted recv buffer (for the last login + * response). Therefore, the first recv buffer is skipped here. + */ + for (i = 1; i < iser_conn->qp_max_recv_dtos; i++) { + err = iser_post_recvm(iser_conn, &iser_conn->rx_descs[i]); + if (err) + goto out; + } +out: + return err; } /** @@ -360,8 +352,7 @@ static inline bool iser_signal_comp(u8 sig_count) * @conn: link to matching iscsi connection * @task: SCSI command task */ -int iser_send_command(struct iscsi_conn *conn, - struct iscsi_task *task) +int iser_send_command(struct iscsi_conn *conn, struct iscsi_task *task) { struct iser_conn *iser_conn = conn->dd_data; struct iscsi_iser_task *iser_task = task->dd_data; @@ -371,7 +362,6 @@ int iser_send_command(struct iscsi_conn *conn, struct iscsi_scsi_req *hdr = (struct iscsi_scsi_req *)task->hdr; struct scsi_cmnd *sc = task->sc; struct iser_tx_desc *tx_desc = &iser_task->desc; - u8 sig_count = ++iser_conn->ib_conn.sig_count; edtl = ntohl(hdr->data_length); @@ -418,8 +408,7 @@ int iser_send_command(struct iscsi_conn *conn, iser_task->status = ISER_TASK_STATUS_STARTED; - err = iser_post_send(&iser_conn->ib_conn, tx_desc, - iser_signal_comp(sig_count)); + err = iser_post_send(&iser_conn->ib_conn, tx_desc); if (!err) return 0; @@ -434,8 +423,7 @@ send_command_error: * @task: SCSI command task * @hdr: pointer to the LLD's iSCSI message header */ -int iser_send_data_out(struct iscsi_conn *conn, - struct iscsi_task *task, +int iser_send_data_out(struct iscsi_conn *conn, struct iscsi_task *task, struct iscsi_data *hdr) { struct iser_conn *iser_conn = conn->dd_data; @@ -487,7 +475,7 @@ int iser_send_data_out(struct iscsi_conn *conn, itt, buf_offset, data_seg_len); - err = iser_post_send(&iser_conn->ib_conn, tx_desc, true); + err = iser_post_send(&iser_conn->ib_conn, tx_desc); if (!err) return 0; @@ -497,8 +485,7 @@ send_data_out_error: return err; } -int iser_send_control(struct iscsi_conn *conn, - struct iscsi_task *task) +int iser_send_control(struct iscsi_conn *conn, struct iscsi_task *task) { struct iser_conn *iser_conn = conn->dd_data; struct iscsi_iser_task *iser_task = task->dd_data; @@ -550,7 +537,7 @@ int iser_send_control(struct iscsi_conn *conn, goto send_control_error; } - err = iser_post_send(&iser_conn->ib_conn, mdesc, true); + err = iser_post_send(&iser_conn->ib_conn, mdesc); if (!err) return 0; @@ -590,11 +577,14 @@ void iser_login_rsp(struct ib_cq *cq, struct ib_wc *wc) desc->rsp_dma, ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE); - ib_conn->post_recv_buf_count--; + if (iser_conn->iscsi_conn->session->discovery_sess) + return; + + /* Post the first RX buffer that is skipped in iser_post_rx_bufs() */ + iser_post_recvm(iser_conn, iser_conn->rx_descs); } -static inline int -iser_inv_desc(struct iser_fr_desc *desc, u32 rkey) +static inline int iser_inv_desc(struct iser_fr_desc *desc, u32 rkey) { if (unlikely((!desc->sig_protected && rkey != desc->rsc.mr->rkey) || (desc->sig_protected && rkey != desc->rsc.sig_mr->rkey))) { @@ -607,10 +597,8 @@ iser_inv_desc(struct iser_fr_desc *desc, u32 rkey) return 0; } -static int -iser_check_remote_inv(struct iser_conn *iser_conn, - struct ib_wc *wc, - struct iscsi_hdr *hdr) +static int iser_check_remote_inv(struct iser_conn *iser_conn, struct ib_wc *wc, + struct iscsi_hdr *hdr) { if (wc->wc_flags & IB_WC_WITH_INVALIDATE) { struct iscsi_task *task; @@ -657,8 +645,7 @@ void iser_task_rsp(struct ib_cq *cq, struct ib_wc *wc) struct iser_conn *iser_conn = to_iser_conn(ib_conn); struct iser_rx_desc *desc = iser_rx(wc->wr_cqe); struct iscsi_hdr *hdr; - int length; - int outstanding, count, err; + int length, err; if (unlikely(wc->status != IB_WC_SUCCESS)) { iser_err_comp(wc, "task_rsp"); @@ -687,20 +674,9 @@ void iser_task_rsp(struct ib_cq *cq, struct ib_wc *wc) desc->dma_addr, ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE); - /* decrementing conn->post_recv_buf_count only --after-- freeing the * - * task eliminates the need to worry on tasks which are completed in * - * parallel to the execution of iser_conn_term. So the code that waits * - * for the posted rx bufs refcount to become zero handles everything */ - ib_conn->post_recv_buf_count--; - - outstanding = ib_conn->post_recv_buf_count; - if (outstanding + iser_conn->min_posted_rx <= iser_conn->qp_max_recv_dtos) { - count = min(iser_conn->qp_max_recv_dtos - outstanding, - iser_conn->min_posted_rx); - err = iser_post_recvm(iser_conn, count); - if (err) - iser_err("posting %d rx bufs err %d\n", count, err); - } + err = iser_post_recvm(iser_conn, desc); + if (err) + iser_err("posting rx buffer err %d\n", err); } void iser_cmd_comp(struct ib_cq *cq, struct ib_wc *wc) diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c index 9776b755d848..660982625488 100644 --- a/drivers/infiniband/ulp/iser/iser_memory.c +++ b/drivers/infiniband/ulp/iser/iser_memory.c @@ -44,8 +44,7 @@ void iser_reg_comp(struct ib_cq *cq, struct ib_wc *wc) iser_err_comp(wc, "memreg"); } -static struct iser_fr_desc * -iser_reg_desc_get_fr(struct ib_conn *ib_conn) +static struct iser_fr_desc *iser_reg_desc_get_fr(struct ib_conn *ib_conn) { struct iser_fr_pool *fr_pool = &ib_conn->fr_pool; struct iser_fr_desc *desc; @@ -60,9 +59,8 @@ iser_reg_desc_get_fr(struct ib_conn *ib_conn) return desc; } -static void -iser_reg_desc_put_fr(struct ib_conn *ib_conn, - struct iser_fr_desc *desc) +static void iser_reg_desc_put_fr(struct ib_conn *ib_conn, + struct iser_fr_desc *desc) { struct iser_fr_pool *fr_pool = &ib_conn->fr_pool; unsigned long flags; @@ -73,9 +71,9 @@ iser_reg_desc_put_fr(struct ib_conn *ib_conn, } int iser_dma_map_task_data(struct iscsi_iser_task *iser_task, - struct iser_data_buf *data, - enum iser_data_dir iser_dir, - enum dma_data_direction dma_dir) + struct iser_data_buf *data, + enum iser_data_dir iser_dir, + enum dma_data_direction dma_dir) { struct ib_device *dev; @@ -100,9 +98,8 @@ void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task, ib_dma_unmap_sg(dev, data->sg, data->size, dir); } -static int -iser_reg_dma(struct iser_device *device, struct iser_data_buf *mem, - struct iser_mem_reg *reg) +static int iser_reg_dma(struct iser_device *device, struct iser_data_buf *mem, + struct iser_mem_reg *reg) { struct scatterlist *sg = mem->sg; @@ -154,8 +151,8 @@ void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task, reg->mem_h = NULL; } -static void -iser_set_dif_domain(struct scsi_cmnd *sc, struct ib_sig_domain *domain) +static void iser_set_dif_domain(struct scsi_cmnd *sc, + struct ib_sig_domain *domain) { domain->sig_type = IB_SIG_TYPE_T10_DIF; domain->sig.dif.pi_interval = scsi_prot_interval(sc); @@ -171,8 +168,8 @@ iser_set_dif_domain(struct scsi_cmnd *sc, struct ib_sig_domain *domain) domain->sig.dif.ref_remap = true; } -static int -iser_set_sig_attrs(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs) +static int iser_set_sig_attrs(struct scsi_cmnd *sc, + struct ib_sig_attrs *sig_attrs) { switch (scsi_get_prot_op(sc)) { case SCSI_PROT_WRITE_INSERT: @@ -205,8 +202,7 @@ iser_set_sig_attrs(struct scsi_cmnd *sc, struct ib_sig_attrs *sig_attrs) return 0; } -static inline void -iser_set_prot_checks(struct scsi_cmnd *sc, u8 *mask) +static inline void iser_set_prot_checks(struct scsi_cmnd *sc, u8 *mask) { *mask = 0; if (sc->prot_flags & SCSI_PROT_REF_CHECK) @@ -215,11 +211,8 @@ iser_set_prot_checks(struct scsi_cmnd *sc, u8 *mask) *mask |= IB_SIG_CHECK_GUARD; } -static inline void -iser_inv_rkey(struct ib_send_wr *inv_wr, - struct ib_mr *mr, - struct ib_cqe *cqe, - struct ib_send_wr *next_wr) +static inline void iser_inv_rkey(struct ib_send_wr *inv_wr, struct ib_mr *mr, + struct ib_cqe *cqe, struct ib_send_wr *next_wr) { inv_wr->opcode = IB_WR_LOCAL_INV; inv_wr->wr_cqe = cqe; @@ -229,12 +222,11 @@ iser_inv_rkey(struct ib_send_wr *inv_wr, inv_wr->next = next_wr; } -static int -iser_reg_sig_mr(struct iscsi_iser_task *iser_task, - struct iser_data_buf *mem, - struct iser_data_buf *sig_mem, - struct iser_reg_resources *rsc, - struct iser_mem_reg *sig_reg) +static int iser_reg_sig_mr(struct iscsi_iser_task *iser_task, + struct iser_data_buf *mem, + struct iser_data_buf *sig_mem, + struct iser_reg_resources *rsc, + struct iser_mem_reg *sig_reg) { struct iser_tx_desc *tx_desc = &iser_task->desc; struct ib_cqe *cqe = &iser_task->iser_conn->ib_conn.reg_cqe; @@ -335,12 +327,10 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task, return 0; } -static int -iser_reg_data_sg(struct iscsi_iser_task *task, - struct iser_data_buf *mem, - struct iser_fr_desc *desc, - bool use_dma_key, - struct iser_mem_reg *reg) +static int iser_reg_data_sg(struct iscsi_iser_task *task, + struct iser_data_buf *mem, + struct iser_fr_desc *desc, bool use_dma_key, + struct iser_mem_reg *reg) { struct iser_device *device = task->iser_conn->ib_conn.device; diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index b566f7cb7797..8bf87b073d9b 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c @@ -265,14 +265,14 @@ static int iser_create_ib_conn_res(struct ib_conn *ib_conn) memset(&init_attr, 0, sizeof(init_attr)); init_attr.event_handler = iser_qp_event_callback; - init_attr.qp_context = (void *)ib_conn; - init_attr.send_cq = ib_conn->cq; - init_attr.recv_cq = ib_conn->cq; - init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS; + init_attr.qp_context = (void *)ib_conn; + init_attr.send_cq = ib_conn->cq; + init_attr.recv_cq = ib_conn->cq; + init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS; init_attr.cap.max_send_sge = 2; init_attr.cap.max_recv_sge = 1; - init_attr.sq_sig_type = IB_SIGNAL_REQ_WR; - init_attr.qp_type = IB_QPT_RC; + init_attr.sq_sig_type = IB_SIGNAL_REQ_WR; + init_attr.qp_type = IB_QPT_RC; init_attr.cap.max_send_wr = max_send_wr; if (ib_conn->pi_support) init_attr.create_flags |= IB_QP_CREATE_INTEGRITY_EN; @@ -283,9 +283,8 @@ static int iser_create_ib_conn_res(struct ib_conn *ib_conn) goto out_err; ib_conn->qp = ib_conn->cma_id->qp; - iser_info("setting conn %p cma_id %p qp %p max_send_wr %d\n", - ib_conn, ib_conn->cma_id, - ib_conn->cma_id->qp, max_send_wr); + iser_info("setting conn %p cma_id %p qp %p max_send_wr %d\n", ib_conn, + ib_conn->cma_id, ib_conn->cma_id->qp, max_send_wr); return ret; out_err: @@ -313,7 +312,7 @@ struct iser_device *iser_device_find_by_ib_device(struct rdma_cm_id *cma_id) goto inc_refcnt; device = kzalloc(sizeof *device, GFP_KERNEL); - if (device == NULL) + if (!device) goto out; /* assign this device to the device */ @@ -392,8 +391,7 @@ void iser_release_work(struct work_struct *work) * so the cm_id removal is out of here. It is Safe to * be invoked multiple times. */ -static void iser_free_ib_conn_res(struct iser_conn *iser_conn, - bool destroy) +static void iser_free_ib_conn_res(struct iser_conn *iser_conn, bool destroy) { struct ib_conn *ib_conn = &iser_conn->ib_conn; struct iser_device *device = ib_conn->device; @@ -401,7 +399,7 @@ static void iser_free_ib_conn_res(struct iser_conn *iser_conn, iser_info("freeing conn %p cma_id %p qp %p\n", iser_conn, ib_conn->cma_id, ib_conn->qp); - if (ib_conn->qp != NULL) { + if (ib_conn->qp) { rdma_destroy_qp(ib_conn->cma_id); ib_cq_pool_put(ib_conn->cq, ib_conn->cq_size); ib_conn->qp = NULL; @@ -411,7 +409,7 @@ static void iser_free_ib_conn_res(struct iser_conn *iser_conn, if (iser_conn->rx_descs) iser_free_rx_descriptors(iser_conn); - if (device != NULL) { + if (device) { iser_device_try_release(device); ib_conn->device = NULL; } @@ -445,7 +443,7 @@ void iser_conn_release(struct iser_conn *iser_conn) iser_free_ib_conn_res(iser_conn, true); mutex_unlock(&iser_conn->state_mutex); - if (ib_conn->cma_id != NULL) { + if (ib_conn->cma_id) { rdma_destroy_id(ib_conn->cma_id); ib_conn->cma_id = NULL; } @@ -501,13 +499,12 @@ static void iser_connect_error(struct rdma_cm_id *cma_id) { struct iser_conn *iser_conn; - iser_conn = (struct iser_conn *)cma_id->context; + iser_conn = cma_id->context; iser_conn->state = ISER_CONN_TERMINATING; } -static void -iser_calc_scsi_params(struct iser_conn *iser_conn, - unsigned int max_sectors) +static void iser_calc_scsi_params(struct iser_conn *iser_conn, + unsigned int max_sectors) { struct iser_device *device = iser_conn->ib_conn.device; struct ib_device_attr *attr = &device->ib_device->attrs; @@ -545,11 +542,11 @@ iser_calc_scsi_params(struct iser_conn *iser_conn, static void iser_addr_handler(struct rdma_cm_id *cma_id) { struct iser_device *device; - struct iser_conn *iser_conn; - struct ib_conn *ib_conn; + struct iser_conn *iser_conn; + struct ib_conn *ib_conn; int ret; - iser_conn = (struct iser_conn *)cma_id->context; + iser_conn = cma_id->context; if (iser_conn->state != ISER_CONN_PENDING) /* bailout */ return; @@ -593,9 +590,9 @@ static void iser_addr_handler(struct rdma_cm_id *cma_id) static void iser_route_handler(struct rdma_cm_id *cma_id) { struct rdma_conn_param conn_param; - int ret; + int ret; struct iser_cm_hdr req_hdr; - struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context; + struct iser_conn *iser_conn = cma_id->context; struct ib_conn *ib_conn = &iser_conn->ib_conn; struct ib_device *ib_dev = ib_conn->device->ib_device; @@ -609,9 +606,9 @@ static void iser_route_handler(struct rdma_cm_id *cma_id) memset(&conn_param, 0, sizeof conn_param); conn_param.responder_resources = ib_dev->attrs.max_qp_rd_atom; - conn_param.initiator_depth = 1; - conn_param.retry_count = 7; - conn_param.rnr_retry_count = 6; + conn_param.initiator_depth = 1; + conn_param.retry_count = 7; + conn_param.rnr_retry_count = 6; memset(&req_hdr, 0, sizeof(req_hdr)); req_hdr.flags = ISER_ZBVA_NOT_SUP; @@ -638,7 +635,7 @@ static void iser_connected_handler(struct rdma_cm_id *cma_id, struct ib_qp_attr attr; struct ib_qp_init_attr init_attr; - iser_conn = (struct iser_conn *)cma_id->context; + iser_conn = cma_id->context; if (iser_conn->state != ISER_CONN_PENDING) /* bailout */ return; @@ -661,7 +658,7 @@ static void iser_connected_handler(struct rdma_cm_id *cma_id, static void iser_disconnected_handler(struct rdma_cm_id *cma_id) { - struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context; + struct iser_conn *iser_conn = cma_id->context; if (iser_conn_terminate(iser_conn)) { if (iser_conn->iscsi_conn) @@ -675,7 +672,7 @@ static void iser_disconnected_handler(struct rdma_cm_id *cma_id) static void iser_cleanup_handler(struct rdma_cm_id *cma_id, bool destroy) { - struct iser_conn *iser_conn = (struct iser_conn *)cma_id->context; + struct iser_conn *iser_conn = cma_id->context; /* * We are not guaranteed that we visited disconnected_handler @@ -687,12 +684,13 @@ static void iser_cleanup_handler(struct rdma_cm_id *cma_id, complete(&iser_conn->ib_completion); } -static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) +static int iser_cma_handler(struct rdma_cm_id *cma_id, + struct rdma_cm_event *event) { struct iser_conn *iser_conn; int ret = 0; - iser_conn = (struct iser_conn *)cma_id->context; + iser_conn = cma_id->context; iser_info("%s (%d): status %d conn %p id %p\n", rdma_event_msg(event->event), event->event, event->status, cma_id->context, cma_id); @@ -757,7 +755,6 @@ void iser_conn_init(struct iser_conn *iser_conn) INIT_LIST_HEAD(&iser_conn->conn_list); mutex_init(&iser_conn->state_mutex); - ib_conn->post_recv_buf_count = 0; ib_conn->reg_cqe.done = iser_reg_comp; } @@ -765,10 +762,8 @@ void iser_conn_init(struct iser_conn *iser_conn) * starts the process of connecting to the target * sleeps until the connection is established or rejected */ -int iser_connect(struct iser_conn *iser_conn, - struct sockaddr *src_addr, - struct sockaddr *dst_addr, - int non_blocking) +int iser_connect(struct iser_conn *iser_conn, struct sockaddr *src_addr, + struct sockaddr *dst_addr, int non_blocking) { struct ib_conn *ib_conn = &iser_conn->ib_conn; int err = 0; @@ -785,8 +780,7 @@ int iser_connect(struct iser_conn *iser_conn, iser_conn->state = ISER_CONN_PENDING; ib_conn->cma_id = rdma_create_id(&init_net, iser_cma_handler, - (void *)iser_conn, - RDMA_PS_TCP, IB_QPT_RC); + iser_conn, RDMA_PS_TCP, IB_QPT_RC); if (IS_ERR(ib_conn->cma_id)) { err = PTR_ERR(ib_conn->cma_id); iser_err("rdma_create_id failed: %d\n", err); @@ -829,7 +823,7 @@ int iser_post_recvl(struct iser_conn *iser_conn) struct ib_conn *ib_conn = &iser_conn->ib_conn; struct iser_login_desc *desc = &iser_conn->login_desc; struct ib_recv_wr wr; - int ib_ret; + int ret; desc->sge.addr = desc->rsp_dma; desc->sge.length = ISER_RX_LOGIN_SIZE; @@ -841,46 +835,30 @@ int iser_post_recvl(struct iser_conn *iser_conn) wr.num_sge = 1; wr.next = NULL; - ib_conn->post_recv_buf_count++; - ib_ret = ib_post_recv(ib_conn->qp, &wr, NULL); - if (ib_ret) { - iser_err("ib_post_recv failed ret=%d\n", ib_ret); - ib_conn->post_recv_buf_count--; - } + ret = ib_post_recv(ib_conn->qp, &wr, NULL); + if (unlikely(ret)) + iser_err("ib_post_recv login failed ret=%d\n", ret); - return ib_ret; + return ret; } -int iser_post_recvm(struct iser_conn *iser_conn, int count) +int iser_post_recvm(struct iser_conn *iser_conn, struct iser_rx_desc *rx_desc) { struct ib_conn *ib_conn = &iser_conn->ib_conn; - unsigned int my_rx_head = iser_conn->rx_desc_head; - struct iser_rx_desc *rx_desc; - struct ib_recv_wr *wr; - int i, ib_ret; - - for (wr = ib_conn->rx_wr, i = 0; i < count; i++, wr++) { - rx_desc = &iser_conn->rx_descs[my_rx_head]; - rx_desc->cqe.done = iser_task_rsp; - wr->wr_cqe = &rx_desc->cqe; - wr->sg_list = &rx_desc->rx_sg; - wr->num_sge = 1; - wr->next = wr + 1; - my_rx_head = (my_rx_head + 1) & iser_conn->qp_max_recv_dtos_mask; - } + struct ib_recv_wr wr; + int ret; - wr--; - wr->next = NULL; /* mark end of work requests list */ + rx_desc->cqe.done = iser_task_rsp; + wr.wr_cqe = &rx_desc->cqe; + wr.sg_list = &rx_desc->rx_sg; + wr.num_sge = 1; + wr.next = NULL; - ib_conn->post_recv_buf_count += count; - ib_ret = ib_post_recv(ib_conn->qp, ib_conn->rx_wr, NULL); - if (unlikely(ib_ret)) { - iser_err("ib_post_recv failed ret=%d\n", ib_ret); - ib_conn->post_recv_buf_count -= count; - } else - iser_conn->rx_desc_head = my_rx_head; + ret = ib_post_recv(ib_conn->qp, &wr, NULL); + if (unlikely(ret)) + iser_err("ib_post_recv failed ret=%d\n", ret); - return ib_ret; + return ret; } @@ -888,16 +866,14 @@ int iser_post_recvm(struct iser_conn *iser_conn, int count) * iser_post_send - Initiate a Send DTO operation * @ib_conn: connection RDMA resources * @tx_desc: iSER TX descriptor - * @signal: true to send work request as SIGNALED * * Return: 0 on success, -1 on failure */ -int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc, - bool signal) +int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc) { struct ib_send_wr *wr = &tx_desc->send_wr; struct ib_send_wr *first_wr; - int ib_ret; + int ret; ib_dma_sync_single_for_device(ib_conn->device->ib_device, tx_desc->dma_addr, ISER_HEADERS_LEN, @@ -908,7 +884,7 @@ int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc, wr->sg_list = tx_desc->tx_sg; wr->num_sge = tx_desc->num_sge; wr->opcode = IB_WR_SEND; - wr->send_flags = signal ? IB_SEND_SIGNALED : 0; + wr->send_flags = IB_SEND_SIGNALED; if (tx_desc->inv_wr.next) first_wr = &tx_desc->inv_wr; @@ -917,12 +893,12 @@ int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc, else first_wr = wr; - ib_ret = ib_post_send(ib_conn->qp, first_wr, NULL); - if (unlikely(ib_ret)) + ret = ib_post_send(ib_conn->qp, first_wr, NULL); + if (unlikely(ret)) iser_err("ib_post_send failed, ret:%d opcode:%d\n", - ib_ret, wr->opcode); + ret, wr->opcode); - return ib_ret; + return ret; } u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task, diff --git a/drivers/infiniband/ulp/opa_vnic/Kconfig b/drivers/infiniband/ulp/opa_vnic/Kconfig index e84248587187..4d43d055fa8e 100644 --- a/drivers/infiniband/ulp/opa_vnic/Kconfig +++ b/drivers/infiniband/ulp/opa_vnic/Kconfig @@ -1,9 +1,9 @@ # SPDX-License-Identifier: GPL-2.0-only config INFINIBAND_OPA_VNIC - tristate "Intel OPA VNIC support" + tristate "Cornelis OPX VNIC support" depends on X86_64 && INFINIBAND help - This is Omni-Path (OPA) Virtual Network Interface Controller (VNIC) + This is Omni-Path Express (OPX) Virtual Network Interface Controller (VNIC) driver for Ethernet over Omni-Path feature. It implements the HW independent VNIC functionality. It interfaces with Linux stack for data path and IB MAD for the control path. diff --git a/drivers/infiniband/ulp/opa_vnic/Makefile b/drivers/infiniband/ulp/opa_vnic/Makefile index a8c21d140ccb..196183817cdc 100644 --- a/drivers/infiniband/ulp/opa_vnic/Makefile +++ b/drivers/infiniband/ulp/opa_vnic/Makefile @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only -# Makefile - Intel Omni-Path Virtual Network Controller driver +# Makefile - Cornelis Omni-Path Express Virtual Network Controller driver # Copyright(c) 2017, Intel Corporation. +# Copyright(c) 2021, Cornelis Networks. # obj-$(CONFIG_INFINIBAND_OPA_VNIC) += opa_vnic.o diff --git a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c index cecf0f7cadf9..21c6cea8b1db 100644 --- a/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c +++ b/drivers/infiniband/ulp/opa_vnic/opa_vnic_vema.c @@ -1,5 +1,6 @@ /* * Copyright(c) 2017 Intel Corporation. + * Copyright(c) 2021 Cornelis Networks. * * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. @@ -46,7 +47,7 @@ */ /* - * This file contains OPA Virtual Network Interface Controller (VNIC) + * This file contains OPX Virtual Network Interface Controller (VNIC) * Ethernet Management Agent (EMA) driver */ @@ -1051,5 +1052,5 @@ static void opa_vnic_deinit(void) module_exit(opa_vnic_deinit); MODULE_LICENSE("Dual BSD/GPL"); -MODULE_AUTHOR("Intel Corporation"); -MODULE_DESCRIPTION("Intel OPA Virtual Network driver"); +MODULE_AUTHOR("Cornelis Networks"); +MODULE_DESCRIPTION("Cornelis OPX Virtual Network driver"); diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c b/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c index 5e780bdd763d..385a19846c24 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c +++ b/drivers/infiniband/ulp/rtrs/rtrs-clt-stats.c @@ -13,13 +13,13 @@ void rtrs_clt_update_wc_stats(struct rtrs_clt_con *con) { - struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); - struct rtrs_clt_stats *stats = sess->stats; + struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); + struct rtrs_clt_stats *stats = clt_path->stats; struct rtrs_clt_stats_pcpu *s; int cpu; cpu = raw_smp_processor_id(); - s = this_cpu_ptr(stats->pcpu_stats); + s = get_cpu_ptr(stats->pcpu_stats); if (con->cpu != cpu) { s->cpu_migr.to++; @@ -27,56 +27,62 @@ void rtrs_clt_update_wc_stats(struct rtrs_clt_con *con) s = per_cpu_ptr(stats->pcpu_stats, con->cpu); atomic_inc(&s->cpu_migr.from); } + put_cpu_ptr(stats->pcpu_stats); } void rtrs_clt_inc_failover_cnt(struct rtrs_clt_stats *stats) { struct rtrs_clt_stats_pcpu *s; - s = this_cpu_ptr(stats->pcpu_stats); + s = get_cpu_ptr(stats->pcpu_stats); s->rdma.failover_cnt++; + put_cpu_ptr(stats->pcpu_stats); } -int rtrs_clt_stats_migration_cnt_to_str(struct rtrs_clt_stats *stats, - char *buf, size_t len) +int rtrs_clt_stats_migration_from_cnt_to_str(struct rtrs_clt_stats *stats, char *buf) { struct rtrs_clt_stats_pcpu *s; size_t used; int cpu; - used = scnprintf(buf, len, " "); - for_each_possible_cpu(cpu) - used += scnprintf(buf + used, len - used, " CPU%u", cpu); - - used += scnprintf(buf + used, len - used, "\nfrom:"); + used = 0; for_each_possible_cpu(cpu) { s = per_cpu_ptr(stats->pcpu_stats, cpu); - used += scnprintf(buf + used, len - used, " %d", + used += sysfs_emit_at(buf, used, "%d ", atomic_read(&s->cpu_migr.from)); } - used += scnprintf(buf + used, len - used, "\nto :"); + used += sysfs_emit_at(buf, used, "\n"); + + return used; +} + +int rtrs_clt_stats_migration_to_cnt_to_str(struct rtrs_clt_stats *stats, char *buf) +{ + struct rtrs_clt_stats_pcpu *s; + + size_t used; + int cpu; + + used = 0; for_each_possible_cpu(cpu) { s = per_cpu_ptr(stats->pcpu_stats, cpu); - used += scnprintf(buf + used, len - used, " %d", - s->cpu_migr.to); + used += sysfs_emit_at(buf, used, "%d ", s->cpu_migr.to); } - used += scnprintf(buf + used, len - used, "\n"); + + used += sysfs_emit_at(buf, used, "\n"); return used; } -int rtrs_clt_stats_reconnects_to_str(struct rtrs_clt_stats *stats, char *buf, - size_t len) +int rtrs_clt_stats_reconnects_to_str(struct rtrs_clt_stats *stats, char *buf) { - return scnprintf(buf, len, "%d %d\n", - stats->reconnects.successful_cnt, - stats->reconnects.fail_cnt); + return sysfs_emit(buf, "%d %d\n", stats->reconnects.successful_cnt, + stats->reconnects.fail_cnt); } -ssize_t rtrs_clt_stats_rdma_to_str(struct rtrs_clt_stats *stats, - char *page, size_t len) +ssize_t rtrs_clt_stats_rdma_to_str(struct rtrs_clt_stats *stats, char *page) { struct rtrs_clt_stats_rdma sum; struct rtrs_clt_stats_rdma *r; @@ -94,16 +100,15 @@ ssize_t rtrs_clt_stats_rdma_to_str(struct rtrs_clt_stats *stats, sum.failover_cnt += r->failover_cnt; } - return scnprintf(page, len, "%llu %llu %llu %llu %u %llu\n", + return sysfs_emit(page, "%llu %llu %llu %llu %u %llu\n", sum.dir[READ].cnt, sum.dir[READ].size_total, sum.dir[WRITE].cnt, sum.dir[WRITE].size_total, atomic_read(&stats->inflight), sum.failover_cnt); } -ssize_t rtrs_clt_reset_all_help(struct rtrs_clt_stats *s, - char *page, size_t len) +ssize_t rtrs_clt_reset_all_help(struct rtrs_clt_stats *s, char *page) { - return scnprintf(page, len, "echo 1 to reset all statistics\n"); + return sysfs_emit(page, "echo 1 to reset all statistics\n"); } int rtrs_clt_reset_rdma_stats(struct rtrs_clt_stats *stats, bool enable) @@ -166,16 +171,17 @@ static inline void rtrs_clt_update_rdma_stats(struct rtrs_clt_stats *stats, { struct rtrs_clt_stats_pcpu *s; - s = this_cpu_ptr(stats->pcpu_stats); + s = get_cpu_ptr(stats->pcpu_stats); s->rdma.dir[d].cnt++; s->rdma.dir[d].size_total += size; + put_cpu_ptr(stats->pcpu_stats); } void rtrs_clt_update_all_stats(struct rtrs_clt_io_req *req, int dir) { struct rtrs_clt_con *con = req->con; - struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); - struct rtrs_clt_stats *stats = sess->stats; + struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); + struct rtrs_clt_stats *stats = clt_path->stats; unsigned int len; len = req->usr_len + req->data_len; diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c index 4ee592ccf979..b4fa473b7888 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c +++ b/drivers/infiniband/ulp/rtrs/rtrs-clt-sysfs.c @@ -16,21 +16,21 @@ #define MIN_MAX_RECONN_ATT -1 #define MAX_MAX_RECONN_ATT 9999 -static void rtrs_clt_sess_release(struct kobject *kobj) +static void rtrs_clt_path_release(struct kobject *kobj) { - struct rtrs_clt_sess *sess; + struct rtrs_clt_path *clt_path; - sess = container_of(kobj, struct rtrs_clt_sess, kobj); + clt_path = container_of(kobj, struct rtrs_clt_path, kobj); - free_sess(sess); + free_path(clt_path); } static struct kobj_type ktype_sess = { .sysfs_ops = &kobj_sysfs_ops, - .release = rtrs_clt_sess_release + .release = rtrs_clt_path_release }; -static void rtrs_clt_sess_stats_release(struct kobject *kobj) +static void rtrs_clt_path_stats_release(struct kobject *kobj) { struct rtrs_clt_stats *stats; @@ -43,14 +43,15 @@ static void rtrs_clt_sess_stats_release(struct kobject *kobj) static struct kobj_type ktype_stats = { .sysfs_ops = &kobj_sysfs_ops, - .release = rtrs_clt_sess_stats_release, + .release = rtrs_clt_path_stats_release, }; static ssize_t max_reconnect_attempts_show(struct device *dev, struct device_attribute *attr, char *page) { - struct rtrs_clt *clt = container_of(dev, struct rtrs_clt, dev); + struct rtrs_clt_sess *clt = container_of(dev, struct rtrs_clt_sess, + dev); return sysfs_emit(page, "%d\n", rtrs_clt_get_max_reconnect_attempts(clt)); @@ -63,7 +64,8 @@ static ssize_t max_reconnect_attempts_store(struct device *dev, { int value; int ret; - struct rtrs_clt *clt = container_of(dev, struct rtrs_clt, dev); + struct rtrs_clt_sess *clt = container_of(dev, struct rtrs_clt_sess, + dev); ret = kstrtoint(buf, 10, &value); if (ret) { @@ -90,9 +92,9 @@ static ssize_t mpath_policy_show(struct device *dev, struct device_attribute *attr, char *page) { - struct rtrs_clt *clt; + struct rtrs_clt_sess *clt; - clt = container_of(dev, struct rtrs_clt, dev); + clt = container_of(dev, struct rtrs_clt_sess, dev); switch (clt->mp_policy) { case MP_POLICY_RR: @@ -114,12 +116,12 @@ static ssize_t mpath_policy_store(struct device *dev, const char *buf, size_t count) { - struct rtrs_clt *clt; + struct rtrs_clt_sess *clt; int value; int ret; size_t len = 0; - clt = container_of(dev, struct rtrs_clt, dev); + clt = container_of(dev, struct rtrs_clt_sess, dev); ret = kstrtoint(buf, 10, &value); if (!ret && (value == MP_POLICY_RR || @@ -169,12 +171,12 @@ static ssize_t add_path_store(struct device *dev, .src = &srcaddr, .dst = &dstaddr }; - struct rtrs_clt *clt; + struct rtrs_clt_sess *clt; const char *nl; size_t len; int err; - clt = container_of(dev, struct rtrs_clt, dev); + clt = container_of(dev, struct rtrs_clt_sess, dev); nl = strchr(buf, '\n'); if (nl) @@ -197,10 +199,10 @@ static DEVICE_ATTR_RW(add_path); static ssize_t rtrs_clt_state_show(struct kobject *kobj, struct kobj_attribute *attr, char *page) { - struct rtrs_clt_sess *sess; + struct rtrs_clt_path *clt_path; - sess = container_of(kobj, struct rtrs_clt_sess, kobj); - if (sess->state == RTRS_CLT_CONNECTED) + clt_path = container_of(kobj, struct rtrs_clt_path, kobj); + if (clt_path->state == RTRS_CLT_CONNECTED) return sysfs_emit(page, "connected\n"); return sysfs_emit(page, "disconnected\n"); @@ -219,16 +221,16 @@ static ssize_t rtrs_clt_reconnect_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { - struct rtrs_clt_sess *sess; + struct rtrs_clt_path *clt_path; int ret; - sess = container_of(kobj, struct rtrs_clt_sess, kobj); + clt_path = container_of(kobj, struct rtrs_clt_path, kobj); if (!sysfs_streq(buf, "1")) { - rtrs_err(sess->clt, "%s: unknown value: '%s'\n", + rtrs_err(clt_path->clt, "%s: unknown value: '%s'\n", attr->attr.name, buf); return -EINVAL; } - ret = rtrs_clt_reconnect_from_sysfs(sess); + ret = rtrs_clt_reconnect_from_sysfs(clt_path); if (ret) return ret; @@ -249,15 +251,15 @@ static ssize_t rtrs_clt_disconnect_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { - struct rtrs_clt_sess *sess; + struct rtrs_clt_path *clt_path; - sess = container_of(kobj, struct rtrs_clt_sess, kobj); + clt_path = container_of(kobj, struct rtrs_clt_path, kobj); if (!sysfs_streq(buf, "1")) { - rtrs_err(sess->clt, "%s: unknown value: '%s'\n", + rtrs_err(clt_path->clt, "%s: unknown value: '%s'\n", attr->attr.name, buf); return -EINVAL; } - rtrs_clt_close_conns(sess, true); + rtrs_clt_close_conns(clt_path, true); return count; } @@ -276,16 +278,16 @@ static ssize_t rtrs_clt_remove_path_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { - struct rtrs_clt_sess *sess; + struct rtrs_clt_path *clt_path; int ret; - sess = container_of(kobj, struct rtrs_clt_sess, kobj); + clt_path = container_of(kobj, struct rtrs_clt_path, kobj); if (!sysfs_streq(buf, "1")) { - rtrs_err(sess->clt, "%s: unknown value: '%s'\n", + rtrs_err(clt_path->clt, "%s: unknown value: '%s'\n", attr->attr.name, buf); return -EINVAL; } - ret = rtrs_clt_remove_path_from_sysfs(sess, &attr->attr); + ret = rtrs_clt_remove_path_from_sysfs(clt_path, &attr->attr); if (ret) return ret; @@ -296,8 +298,12 @@ static struct kobj_attribute rtrs_clt_remove_path_attr = __ATTR(remove_path, 0644, rtrs_clt_remove_path_show, rtrs_clt_remove_path_store); -STAT_ATTR(struct rtrs_clt_stats, cpu_migration, - rtrs_clt_stats_migration_cnt_to_str, +STAT_ATTR(struct rtrs_clt_stats, cpu_migration_from, + rtrs_clt_stats_migration_from_cnt_to_str, + rtrs_clt_reset_cpu_migr_stats); + +STAT_ATTR(struct rtrs_clt_stats, cpu_migration_to, + rtrs_clt_stats_migration_to_cnt_to_str, rtrs_clt_reset_cpu_migr_stats); STAT_ATTR(struct rtrs_clt_stats, reconnects, @@ -313,7 +319,8 @@ STAT_ATTR(struct rtrs_clt_stats, reset_all, rtrs_clt_reset_all_stats); static struct attribute *rtrs_clt_stats_attrs[] = { - &cpu_migration_attr.attr, + &cpu_migration_from_attr.attr, + &cpu_migration_to_attr.attr, &reconnects_attr.attr, &rdma_attr.attr, &reset_all_attr.attr, @@ -328,11 +335,11 @@ static ssize_t rtrs_clt_hca_port_show(struct kobject *kobj, struct kobj_attribute *attr, char *page) { - struct rtrs_clt_sess *sess; + struct rtrs_clt_path *clt_path; - sess = container_of(kobj, typeof(*sess), kobj); + clt_path = container_of(kobj, typeof(*clt_path), kobj); - return sysfs_emit(page, "%u\n", sess->hca_port); + return sysfs_emit(page, "%u\n", clt_path->hca_port); } static struct kobj_attribute rtrs_clt_hca_port_attr = @@ -342,11 +349,11 @@ static ssize_t rtrs_clt_hca_name_show(struct kobject *kobj, struct kobj_attribute *attr, char *page) { - struct rtrs_clt_sess *sess; + struct rtrs_clt_path *clt_path; - sess = container_of(kobj, struct rtrs_clt_sess, kobj); + clt_path = container_of(kobj, struct rtrs_clt_path, kobj); - return sysfs_emit(page, "%s\n", sess->hca_name); + return sysfs_emit(page, "%s\n", clt_path->hca_name); } static struct kobj_attribute rtrs_clt_hca_name_attr = @@ -356,12 +363,12 @@ static ssize_t rtrs_clt_cur_latency_show(struct kobject *kobj, struct kobj_attribute *attr, char *page) { - struct rtrs_clt_sess *sess; + struct rtrs_clt_path *clt_path; - sess = container_of(kobj, struct rtrs_clt_sess, kobj); + clt_path = container_of(kobj, struct rtrs_clt_path, kobj); return sysfs_emit(page, "%lld ns\n", - ktime_to_ns(sess->s.hb_cur_latency)); + ktime_to_ns(clt_path->s.hb_cur_latency)); } static struct kobj_attribute rtrs_clt_cur_latency_attr = @@ -371,11 +378,11 @@ static ssize_t rtrs_clt_src_addr_show(struct kobject *kobj, struct kobj_attribute *attr, char *page) { - struct rtrs_clt_sess *sess; + struct rtrs_clt_path *clt_path; int len; - sess = container_of(kobj, struct rtrs_clt_sess, kobj); - len = sockaddr_to_str((struct sockaddr *)&sess->s.src_addr, page, + clt_path = container_of(kobj, struct rtrs_clt_path, kobj); + len = sockaddr_to_str((struct sockaddr *)&clt_path->s.src_addr, page, PAGE_SIZE); len += sysfs_emit_at(page, len, "\n"); return len; @@ -388,11 +395,11 @@ static ssize_t rtrs_clt_dst_addr_show(struct kobject *kobj, struct kobj_attribute *attr, char *page) { - struct rtrs_clt_sess *sess; + struct rtrs_clt_path *clt_path; int len; - sess = container_of(kobj, struct rtrs_clt_sess, kobj); - len = sockaddr_to_str((struct sockaddr *)&sess->s.dst_addr, page, + clt_path = container_of(kobj, struct rtrs_clt_path, kobj); + len = sockaddr_to_str((struct sockaddr *)&clt_path->s.dst_addr, page, PAGE_SIZE); len += sysfs_emit_at(page, len, "\n"); return len; @@ -401,7 +408,7 @@ static ssize_t rtrs_clt_dst_addr_show(struct kobject *kobj, static struct kobj_attribute rtrs_clt_dst_addr_attr = __ATTR(dst_addr, 0444, rtrs_clt_dst_addr_show, NULL); -static struct attribute *rtrs_clt_sess_attrs[] = { +static struct attribute *rtrs_clt_path_attrs[] = { &rtrs_clt_hca_name_attr.attr, &rtrs_clt_hca_port_attr.attr, &rtrs_clt_src_addr_attr.attr, @@ -414,42 +421,43 @@ static struct attribute *rtrs_clt_sess_attrs[] = { NULL, }; -static const struct attribute_group rtrs_clt_sess_attr_group = { - .attrs = rtrs_clt_sess_attrs, +static const struct attribute_group rtrs_clt_path_attr_group = { + .attrs = rtrs_clt_path_attrs, }; -int rtrs_clt_create_sess_files(struct rtrs_clt_sess *sess) +int rtrs_clt_create_path_files(struct rtrs_clt_path *clt_path) { - struct rtrs_clt *clt = sess->clt; + struct rtrs_clt_sess *clt = clt_path->clt; char str[NAME_MAX]; int err; struct rtrs_addr path = { - .src = &sess->s.src_addr, - .dst = &sess->s.dst_addr, + .src = &clt_path->s.src_addr, + .dst = &clt_path->s.dst_addr, }; rtrs_addr_to_str(&path, str, sizeof(str)); - err = kobject_init_and_add(&sess->kobj, &ktype_sess, clt->kobj_paths, + err = kobject_init_and_add(&clt_path->kobj, &ktype_sess, + clt->kobj_paths, "%s", str); if (err) { pr_err("kobject_init_and_add: %d\n", err); - kobject_put(&sess->kobj); + kobject_put(&clt_path->kobj); return err; } - err = sysfs_create_group(&sess->kobj, &rtrs_clt_sess_attr_group); + err = sysfs_create_group(&clt_path->kobj, &rtrs_clt_path_attr_group); if (err) { pr_err("sysfs_create_group(): %d\n", err); goto put_kobj; } - err = kobject_init_and_add(&sess->stats->kobj_stats, &ktype_stats, - &sess->kobj, "stats"); + err = kobject_init_and_add(&clt_path->stats->kobj_stats, &ktype_stats, + &clt_path->kobj, "stats"); if (err) { pr_err("kobject_init_and_add: %d\n", err); - kobject_put(&sess->stats->kobj_stats); + kobject_put(&clt_path->stats->kobj_stats); goto remove_group; } - err = sysfs_create_group(&sess->stats->kobj_stats, + err = sysfs_create_group(&clt_path->stats->kobj_stats, &rtrs_clt_stats_attr_group); if (err) { pr_err("failed to create stats sysfs group, err: %d\n", err); @@ -459,25 +467,25 @@ int rtrs_clt_create_sess_files(struct rtrs_clt_sess *sess) return 0; put_kobj_stats: - kobject_del(&sess->stats->kobj_stats); - kobject_put(&sess->stats->kobj_stats); + kobject_del(&clt_path->stats->kobj_stats); + kobject_put(&clt_path->stats->kobj_stats); remove_group: - sysfs_remove_group(&sess->kobj, &rtrs_clt_sess_attr_group); + sysfs_remove_group(&clt_path->kobj, &rtrs_clt_path_attr_group); put_kobj: - kobject_del(&sess->kobj); - kobject_put(&sess->kobj); + kobject_del(&clt_path->kobj); + kobject_put(&clt_path->kobj); return err; } -void rtrs_clt_destroy_sess_files(struct rtrs_clt_sess *sess, +void rtrs_clt_destroy_path_files(struct rtrs_clt_path *clt_path, const struct attribute *sysfs_self) { - kobject_del(&sess->stats->kobj_stats); - kobject_put(&sess->stats->kobj_stats); + kobject_del(&clt_path->stats->kobj_stats); + kobject_put(&clt_path->stats->kobj_stats); if (sysfs_self) - sysfs_remove_file_self(&sess->kobj, sysfs_self); - kobject_del(&sess->kobj); + sysfs_remove_file_self(&clt_path->kobj, sysfs_self); + kobject_del(&clt_path->kobj); } static struct attribute *rtrs_clt_attrs[] = { @@ -491,12 +499,12 @@ static const struct attribute_group rtrs_clt_attr_group = { .attrs = rtrs_clt_attrs, }; -int rtrs_clt_create_sysfs_root_files(struct rtrs_clt *clt) +int rtrs_clt_create_sysfs_root_files(struct rtrs_clt_sess *clt) { return sysfs_create_group(&clt->dev.kobj, &rtrs_clt_attr_group); } -void rtrs_clt_destroy_sysfs_root(struct rtrs_clt *clt) +void rtrs_clt_destroy_sysfs_root(struct rtrs_clt_sess *clt) { sysfs_remove_group(&clt->dev.kobj, &rtrs_clt_attr_group); diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.c b/drivers/infiniband/ulp/rtrs/rtrs-clt.c index bc8824b4ee0d..7c3f98e57889 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs-clt.c +++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.c @@ -46,21 +46,21 @@ static struct rtrs_rdma_dev_pd dev_pd = { static struct workqueue_struct *rtrs_wq; static struct class *rtrs_clt_dev_class; -static inline bool rtrs_clt_is_connected(const struct rtrs_clt *clt) +static inline bool rtrs_clt_is_connected(const struct rtrs_clt_sess *clt) { - struct rtrs_clt_sess *sess; + struct rtrs_clt_path *clt_path; bool connected = false; rcu_read_lock(); - list_for_each_entry_rcu(sess, &clt->paths_list, s.entry) - connected |= READ_ONCE(sess->state) == RTRS_CLT_CONNECTED; + list_for_each_entry_rcu(clt_path, &clt->paths_list, s.entry) + connected |= READ_ONCE(clt_path->state) == RTRS_CLT_CONNECTED; rcu_read_unlock(); return connected; } static struct rtrs_permit * -__rtrs_get_permit(struct rtrs_clt *clt, enum rtrs_clt_con_type con_type) +__rtrs_get_permit(struct rtrs_clt_sess *clt, enum rtrs_clt_con_type con_type) { size_t max_depth = clt->queue_depth; struct rtrs_permit *permit; @@ -87,7 +87,7 @@ __rtrs_get_permit(struct rtrs_clt *clt, enum rtrs_clt_con_type con_type) return permit; } -static inline void __rtrs_put_permit(struct rtrs_clt *clt, +static inline void __rtrs_put_permit(struct rtrs_clt_sess *clt, struct rtrs_permit *permit) { clear_bit_unlock(permit->mem_id, clt->permits_map); @@ -107,7 +107,7 @@ static inline void __rtrs_put_permit(struct rtrs_clt *clt, * Context: * Can sleep if @wait == RTRS_PERMIT_WAIT */ -struct rtrs_permit *rtrs_clt_get_permit(struct rtrs_clt *clt, +struct rtrs_permit *rtrs_clt_get_permit(struct rtrs_clt_sess *clt, enum rtrs_clt_con_type con_type, enum wait_type can_wait) { @@ -142,7 +142,8 @@ EXPORT_SYMBOL(rtrs_clt_get_permit); * Context: * Does not matter */ -void rtrs_clt_put_permit(struct rtrs_clt *clt, struct rtrs_permit *permit) +void rtrs_clt_put_permit(struct rtrs_clt_sess *clt, + struct rtrs_permit *permit) { if (WARN_ON(!test_bit(permit->mem_id, clt->permits_map))) return; @@ -163,29 +164,29 @@ EXPORT_SYMBOL(rtrs_clt_put_permit); /** * rtrs_permit_to_clt_con() - returns RDMA connection pointer by the permit - * @sess: client session pointer + * @clt_path: client path pointer * @permit: permit for the allocation of the RDMA buffer * Note: * IO connection starts from 1. * 0 connection is for user messages. */ static -struct rtrs_clt_con *rtrs_permit_to_clt_con(struct rtrs_clt_sess *sess, +struct rtrs_clt_con *rtrs_permit_to_clt_con(struct rtrs_clt_path *clt_path, struct rtrs_permit *permit) { int id = 0; if (permit->con_type == RTRS_IO_CON) - id = (permit->cpu_id % (sess->s.irq_con_num - 1)) + 1; + id = (permit->cpu_id % (clt_path->s.irq_con_num - 1)) + 1; - return to_clt_con(sess->s.con[id]); + return to_clt_con(clt_path->s.con[id]); } /** * rtrs_clt_change_state() - change the session state through session state * machine. * - * @sess: client session to change the state of. + * @clt_path: client path to change the state of. * @new_state: state to change to. * * returns true if sess's state is changed to new state, otherwise return false. @@ -193,15 +194,15 @@ struct rtrs_clt_con *rtrs_permit_to_clt_con(struct rtrs_clt_sess *sess, * Locks: * state_wq lock must be hold. */ -static bool rtrs_clt_change_state(struct rtrs_clt_sess *sess, +static bool rtrs_clt_change_state(struct rtrs_clt_path *clt_path, enum rtrs_clt_state new_state) { enum rtrs_clt_state old_state; bool changed = false; - lockdep_assert_held(&sess->state_wq.lock); + lockdep_assert_held(&clt_path->state_wq.lock); - old_state = sess->state; + old_state = clt_path->state; switch (new_state) { case RTRS_CLT_CONNECTING: switch (old_state) { @@ -275,42 +276,42 @@ static bool rtrs_clt_change_state(struct rtrs_clt_sess *sess, break; } if (changed) { - sess->state = new_state; - wake_up_locked(&sess->state_wq); + clt_path->state = new_state; + wake_up_locked(&clt_path->state_wq); } return changed; } -static bool rtrs_clt_change_state_from_to(struct rtrs_clt_sess *sess, +static bool rtrs_clt_change_state_from_to(struct rtrs_clt_path *clt_path, enum rtrs_clt_state old_state, enum rtrs_clt_state new_state) { bool changed = false; - spin_lock_irq(&sess->state_wq.lock); - if (sess->state == old_state) - changed = rtrs_clt_change_state(sess, new_state); - spin_unlock_irq(&sess->state_wq.lock); + spin_lock_irq(&clt_path->state_wq.lock); + if (clt_path->state == old_state) + changed = rtrs_clt_change_state(clt_path, new_state); + spin_unlock_irq(&clt_path->state_wq.lock); return changed; } static void rtrs_rdma_error_recovery(struct rtrs_clt_con *con) { - struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); + struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); - if (rtrs_clt_change_state_from_to(sess, + if (rtrs_clt_change_state_from_to(clt_path, RTRS_CLT_CONNECTED, RTRS_CLT_RECONNECTING)) { - struct rtrs_clt *clt = sess->clt; + struct rtrs_clt_sess *clt = clt_path->clt; unsigned int delay_ms; /* * Normal scenario, reconnect if we were successfully connected */ delay_ms = clt->reconnect_delay_sec * 1000; - queue_delayed_work(rtrs_wq, &sess->reconnect_dwork, + queue_delayed_work(rtrs_wq, &clt_path->reconnect_dwork, msecs_to_jiffies(delay_ms + prandom_u32() % RTRS_RECONNECT_SEED)); } else { @@ -319,7 +320,7 @@ static void rtrs_rdma_error_recovery(struct rtrs_clt_con *con) * so notify waiter with error state, waiter is responsible * for cleaning the rest and reconnect if needed. */ - rtrs_clt_change_state_from_to(sess, + rtrs_clt_change_state_from_to(clt_path, RTRS_CLT_CONNECTING, RTRS_CLT_CONNECTING_ERR); } @@ -330,7 +331,7 @@ static void rtrs_clt_fast_reg_done(struct ib_cq *cq, struct ib_wc *wc) struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context); if (wc->status != IB_WC_SUCCESS) { - rtrs_err(con->c.sess, "Failed IB_WR_REG_MR: %s\n", + rtrs_err(con->c.path, "Failed IB_WR_REG_MR: %s\n", ib_wc_status_msg(wc->status)); rtrs_rdma_error_recovery(con); } @@ -350,7 +351,7 @@ static void rtrs_clt_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc) struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context); if (wc->status != IB_WC_SUCCESS) { - rtrs_err(con->c.sess, "Failed IB_WR_LOCAL_INV: %s\n", + rtrs_err(con->c.path, "Failed IB_WR_LOCAL_INV: %s\n", ib_wc_status_msg(wc->status)); rtrs_rdma_error_recovery(con); } @@ -380,14 +381,14 @@ static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno, bool notify, bool can_wait) { struct rtrs_clt_con *con = req->con; - struct rtrs_clt_sess *sess; + struct rtrs_clt_path *clt_path; int err; if (WARN_ON(!req->in_use)) return; if (WARN_ON(!req->con)) return; - sess = to_clt_sess(con->c.sess); + clt_path = to_clt_path(con->c.path); if (req->sg_cnt) { if (req->dir == DMA_FROM_DEVICE && req->need_inv) { @@ -417,7 +418,7 @@ static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno, refcount_inc(&req->ref); err = rtrs_inv_rkey(req); if (err) { - rtrs_err(con->c.sess, "Send INV WR key=%#x: %d\n", + rtrs_err(con->c.path, "Send INV WR key=%#x: %d\n", req->mr->rkey, err); } else if (can_wait) { wait_for_completion(&req->inv_comp); @@ -433,21 +434,21 @@ static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno, if (!refcount_dec_and_test(&req->ref)) return; } - ib_dma_unmap_sg(sess->s.dev->ib_dev, req->sglist, + ib_dma_unmap_sg(clt_path->s.dev->ib_dev, req->sglist, req->sg_cnt, req->dir); } if (!refcount_dec_and_test(&req->ref)) return; if (req->mp_policy == MP_POLICY_MIN_INFLIGHT) - atomic_dec(&sess->stats->inflight); + atomic_dec(&clt_path->stats->inflight); req->in_use = false; req->con = NULL; if (errno) { - rtrs_err_rl(con->c.sess, "IO request failed: error=%d path=%s [%s:%u] notify=%d\n", - errno, kobject_name(&sess->kobj), sess->hca_name, - sess->hca_port, notify); + rtrs_err_rl(con->c.path, "IO request failed: error=%d path=%s [%s:%u] notify=%d\n", + errno, kobject_name(&clt_path->kobj), clt_path->hca_name, + clt_path->hca_port, notify); } if (notify) @@ -459,12 +460,12 @@ static int rtrs_post_send_rdma(struct rtrs_clt_con *con, struct rtrs_rbuf *rbuf, u32 off, u32 imm, struct ib_send_wr *wr) { - struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); + struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); enum ib_send_flags flags; struct ib_sge sge; if (!req->sg_size) { - rtrs_wrn(con->c.sess, + rtrs_wrn(con->c.path, "Doing RDMA Write failed, no data supplied\n"); return -EINVAL; } @@ -472,16 +473,17 @@ static int rtrs_post_send_rdma(struct rtrs_clt_con *con, /* user data and user message in the first list element */ sge.addr = req->iu->dma_addr; sge.length = req->sg_size; - sge.lkey = sess->s.dev->ib_pd->local_dma_lkey; + sge.lkey = clt_path->s.dev->ib_pd->local_dma_lkey; /* * From time to time we have to post signalled sends, * or send queue will fill up and only QP reset can help. */ - flags = atomic_inc_return(&con->c.wr_cnt) % sess->s.signal_interval ? + flags = atomic_inc_return(&con->c.wr_cnt) % clt_path->s.signal_interval ? 0 : IB_SEND_SIGNALED; - ib_dma_sync_single_for_device(sess->s.dev->ib_dev, req->iu->dma_addr, + ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev, + req->iu->dma_addr, req->sg_size, DMA_TO_DEVICE); return rtrs_iu_post_rdma_write_imm(&con->c, req->iu, &sge, 1, @@ -489,15 +491,15 @@ static int rtrs_post_send_rdma(struct rtrs_clt_con *con, imm, flags, wr, NULL); } -static void process_io_rsp(struct rtrs_clt_sess *sess, u32 msg_id, +static void process_io_rsp(struct rtrs_clt_path *clt_path, u32 msg_id, s16 errno, bool w_inval) { struct rtrs_clt_io_req *req; - if (WARN_ON(msg_id >= sess->queue_depth)) + if (WARN_ON(msg_id >= clt_path->queue_depth)) return; - req = &sess->reqs[msg_id]; + req = &clt_path->reqs[msg_id]; /* Drop need_inv if server responded with send with invalidation */ req->need_inv &= !w_inval; complete_rdma_req(req, errno, true, false); @@ -507,21 +509,21 @@ static void rtrs_clt_recv_done(struct rtrs_clt_con *con, struct ib_wc *wc) { struct rtrs_iu *iu; int err; - struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); + struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); - WARN_ON((sess->flags & RTRS_MSG_NEW_RKEY_F) == 0); + WARN_ON((clt_path->flags & RTRS_MSG_NEW_RKEY_F) == 0); iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe); err = rtrs_iu_post_recv(&con->c, iu); if (err) { - rtrs_err(con->c.sess, "post iu failed %d\n", err); + rtrs_err(con->c.path, "post iu failed %d\n", err); rtrs_rdma_error_recovery(con); } } static void rtrs_clt_rkey_rsp_done(struct rtrs_clt_con *con, struct ib_wc *wc) { - struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); + struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); struct rtrs_msg_rkey_rsp *msg; u32 imm_type, imm_payload; bool w_inval = false; @@ -529,25 +531,26 @@ static void rtrs_clt_rkey_rsp_done(struct rtrs_clt_con *con, struct ib_wc *wc) u32 buf_id; int err; - WARN_ON((sess->flags & RTRS_MSG_NEW_RKEY_F) == 0); + WARN_ON((clt_path->flags & RTRS_MSG_NEW_RKEY_F) == 0); iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe); if (wc->byte_len < sizeof(*msg)) { - rtrs_err(con->c.sess, "rkey response is malformed: size %d\n", + rtrs_err(con->c.path, "rkey response is malformed: size %d\n", wc->byte_len); goto out; } - ib_dma_sync_single_for_cpu(sess->s.dev->ib_dev, iu->dma_addr, + ib_dma_sync_single_for_cpu(clt_path->s.dev->ib_dev, iu->dma_addr, iu->size, DMA_FROM_DEVICE); msg = iu->buf; if (le16_to_cpu(msg->type) != RTRS_MSG_RKEY_RSP) { - rtrs_err(sess->clt, "rkey response is malformed: type %d\n", + rtrs_err(clt_path->clt, + "rkey response is malformed: type %d\n", le16_to_cpu(msg->type)); goto out; } buf_id = le16_to_cpu(msg->buf_id); - if (WARN_ON(buf_id >= sess->queue_depth)) + if (WARN_ON(buf_id >= clt_path->queue_depth)) goto out; rtrs_from_imm(be32_to_cpu(wc->ex.imm_data), &imm_type, &imm_payload); @@ -560,10 +563,10 @@ static void rtrs_clt_rkey_rsp_done(struct rtrs_clt_con *con, struct ib_wc *wc) if (WARN_ON(buf_id != msg_id)) goto out; - sess->rbufs[buf_id].rkey = le32_to_cpu(msg->rkey); - process_io_rsp(sess, msg_id, err, w_inval); + clt_path->rbufs[buf_id].rkey = le32_to_cpu(msg->rkey); + process_io_rsp(clt_path, msg_id, err, w_inval); } - ib_dma_sync_single_for_device(sess->s.dev->ib_dev, iu->dma_addr, + ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev, iu->dma_addr, iu->size, DMA_FROM_DEVICE); return rtrs_clt_recv_done(con, wc); out: @@ -600,14 +603,14 @@ static int rtrs_post_recv_empty_x2(struct rtrs_con *con, struct ib_cqe *cqe) static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc) { struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context); - struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); + struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); u32 imm_type, imm_payload; bool w_inval = false; int err; if (wc->status != IB_WC_SUCCESS) { if (wc->status != IB_WC_WR_FLUSH_ERR) { - rtrs_err(sess->clt, "RDMA failed: %s\n", + rtrs_err(clt_path->clt, "RDMA failed: %s\n", ib_wc_status_msg(wc->status)); rtrs_rdma_error_recovery(con); } @@ -632,21 +635,21 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc) w_inval = (imm_type == RTRS_IO_RSP_W_INV_IMM); rtrs_from_io_rsp_imm(imm_payload, &msg_id, &err); - process_io_rsp(sess, msg_id, err, w_inval); + process_io_rsp(clt_path, msg_id, err, w_inval); } else if (imm_type == RTRS_HB_MSG_IMM) { WARN_ON(con->c.cid); - rtrs_send_hb_ack(&sess->s); - if (sess->flags & RTRS_MSG_NEW_RKEY_F) + rtrs_send_hb_ack(&clt_path->s); + if (clt_path->flags & RTRS_MSG_NEW_RKEY_F) return rtrs_clt_recv_done(con, wc); } else if (imm_type == RTRS_HB_ACK_IMM) { WARN_ON(con->c.cid); - sess->s.hb_missed_cnt = 0; - sess->s.hb_cur_latency = - ktime_sub(ktime_get(), sess->s.hb_last_sent); - if (sess->flags & RTRS_MSG_NEW_RKEY_F) + clt_path->s.hb_missed_cnt = 0; + clt_path->s.hb_cur_latency = + ktime_sub(ktime_get(), clt_path->s.hb_last_sent); + if (clt_path->flags & RTRS_MSG_NEW_RKEY_F) return rtrs_clt_recv_done(con, wc); } else { - rtrs_wrn(con->c.sess, "Unknown IMM type %u\n", + rtrs_wrn(con->c.path, "Unknown IMM type %u\n", imm_type); } if (w_inval) @@ -658,7 +661,7 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc) else err = rtrs_post_recv_empty(&con->c, &io_comp_cqe); if (err) { - rtrs_err(con->c.sess, "rtrs_post_recv_empty(): %d\n", + rtrs_err(con->c.path, "rtrs_post_recv_empty(): %d\n", err); rtrs_rdma_error_recovery(con); } @@ -670,7 +673,7 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc) WARN_ON(!(wc->wc_flags & IB_WC_WITH_INVALIDATE || wc->wc_flags & IB_WC_WITH_IMM)); WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done); - if (sess->flags & RTRS_MSG_NEW_RKEY_F) { + if (clt_path->flags & RTRS_MSG_NEW_RKEY_F) { if (wc->wc_flags & IB_WC_WITH_INVALIDATE) return rtrs_clt_recv_done(con, wc); @@ -685,7 +688,7 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc) break; default: - rtrs_wrn(sess->clt, "Unexpected WC type: %d\n", wc->opcode); + rtrs_wrn(clt_path->clt, "Unexpected WC type: %d\n", wc->opcode); return; } } @@ -693,10 +696,10 @@ static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc) static int post_recv_io(struct rtrs_clt_con *con, size_t q_size) { int err, i; - struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); + struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); for (i = 0; i < q_size; i++) { - if (sess->flags & RTRS_MSG_NEW_RKEY_F) { + if (clt_path->flags & RTRS_MSG_NEW_RKEY_F) { struct rtrs_iu *iu = &con->rsp_ius[i]; err = rtrs_iu_post_recv(&con->c, iu); @@ -710,16 +713,16 @@ static int post_recv_io(struct rtrs_clt_con *con, size_t q_size) return 0; } -static int post_recv_sess(struct rtrs_clt_sess *sess) +static int post_recv_path(struct rtrs_clt_path *clt_path) { size_t q_size = 0; int err, cid; - for (cid = 0; cid < sess->s.con_num; cid++) { + for (cid = 0; cid < clt_path->s.con_num; cid++) { if (cid == 0) q_size = SERVICE_CON_QUEUE_DEPTH; else - q_size = sess->queue_depth; + q_size = clt_path->queue_depth; /* * x2 for RDMA read responses + FR key invalidations, @@ -727,9 +730,10 @@ static int post_recv_sess(struct rtrs_clt_sess *sess) */ q_size *= 2; - err = post_recv_io(to_clt_con(sess->s.con[cid]), q_size); + err = post_recv_io(to_clt_con(clt_path->s.con[cid]), q_size); if (err) { - rtrs_err(sess->clt, "post_recv_io(), err: %d\n", err); + rtrs_err(clt_path->clt, "post_recv_io(), err: %d\n", + err); return err; } } @@ -740,8 +744,8 @@ static int post_recv_sess(struct rtrs_clt_sess *sess) struct path_it { int i; struct list_head skip_list; - struct rtrs_clt *clt; - struct rtrs_clt_sess *(*next_path)(struct path_it *it); + struct rtrs_clt_sess *clt; + struct rtrs_clt_path *(*next_path)(struct path_it *it); }; /** @@ -773,11 +777,11 @@ struct path_it { * Locks: * rcu_read_lock() must be hold. */ -static struct rtrs_clt_sess *get_next_path_rr(struct path_it *it) +static struct rtrs_clt_path *get_next_path_rr(struct path_it *it) { - struct rtrs_clt_sess __rcu **ppcpu_path; - struct rtrs_clt_sess *path; - struct rtrs_clt *clt; + struct rtrs_clt_path __rcu **ppcpu_path; + struct rtrs_clt_path *path; + struct rtrs_clt_sess *clt; clt = it->clt; @@ -811,26 +815,26 @@ static struct rtrs_clt_sess *get_next_path_rr(struct path_it *it) * Locks: * rcu_read_lock() must be hold. */ -static struct rtrs_clt_sess *get_next_path_min_inflight(struct path_it *it) +static struct rtrs_clt_path *get_next_path_min_inflight(struct path_it *it) { - struct rtrs_clt_sess *min_path = NULL; - struct rtrs_clt *clt = it->clt; - struct rtrs_clt_sess *sess; + struct rtrs_clt_path *min_path = NULL; + struct rtrs_clt_sess *clt = it->clt; + struct rtrs_clt_path *clt_path; int min_inflight = INT_MAX; int inflight; - list_for_each_entry_rcu(sess, &clt->paths_list, s.entry) { - if (READ_ONCE(sess->state) != RTRS_CLT_CONNECTED) + list_for_each_entry_rcu(clt_path, &clt->paths_list, s.entry) { + if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED) continue; - if (!list_empty(raw_cpu_ptr(sess->mp_skip_entry))) + if (!list_empty(raw_cpu_ptr(clt_path->mp_skip_entry))) continue; - inflight = atomic_read(&sess->stats->inflight); + inflight = atomic_read(&clt_path->stats->inflight); if (inflight < min_inflight) { min_inflight = inflight; - min_path = sess; + min_path = clt_path; } } @@ -862,26 +866,26 @@ static struct rtrs_clt_sess *get_next_path_min_inflight(struct path_it *it) * Therefore the caller MUST check the returned * path is NULL and trigger the IO error. */ -static struct rtrs_clt_sess *get_next_path_min_latency(struct path_it *it) +static struct rtrs_clt_path *get_next_path_min_latency(struct path_it *it) { - struct rtrs_clt_sess *min_path = NULL; - struct rtrs_clt *clt = it->clt; - struct rtrs_clt_sess *sess; - ktime_t min_latency = INT_MAX; + struct rtrs_clt_path *min_path = NULL; + struct rtrs_clt_sess *clt = it->clt; + struct rtrs_clt_path *clt_path; + ktime_t min_latency = KTIME_MAX; ktime_t latency; - list_for_each_entry_rcu(sess, &clt->paths_list, s.entry) { - if (READ_ONCE(sess->state) != RTRS_CLT_CONNECTED) + list_for_each_entry_rcu(clt_path, &clt->paths_list, s.entry) { + if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED) continue; - if (!list_empty(raw_cpu_ptr(sess->mp_skip_entry))) + if (!list_empty(raw_cpu_ptr(clt_path->mp_skip_entry))) continue; - latency = sess->s.hb_cur_latency; + latency = clt_path->s.hb_cur_latency; if (latency < min_latency) { min_latency = latency; - min_path = sess; + min_path = clt_path; } } @@ -895,7 +899,7 @@ static struct rtrs_clt_sess *get_next_path_min_latency(struct path_it *it) return min_path; } -static inline void path_it_init(struct path_it *it, struct rtrs_clt *clt) +static inline void path_it_init(struct path_it *it, struct rtrs_clt_sess *clt) { INIT_LIST_HEAD(&it->skip_list); it->clt = clt; @@ -928,7 +932,7 @@ static inline void path_it_deinit(struct path_it *it) * the corresponding buffer of rtrs_iu (req->iu->buf), which later on will * also hold the control message of rtrs. * @req: an io request holding information about IO. - * @sess: client session + * @clt_path: client path * @conf: conformation callback function to notify upper layer. * @permit: permit for allocation of RDMA remote buffer * @priv: private pointer @@ -940,7 +944,7 @@ static inline void path_it_deinit(struct path_it *it) * @dir: direction of the IO. */ static void rtrs_clt_init_req(struct rtrs_clt_io_req *req, - struct rtrs_clt_sess *sess, + struct rtrs_clt_path *clt_path, void (*conf)(void *priv, int errno), struct rtrs_permit *permit, void *priv, const struct kvec *vec, size_t usr_len, @@ -958,13 +962,13 @@ static void rtrs_clt_init_req(struct rtrs_clt_io_req *req, req->sg_cnt = sg_cnt; req->priv = priv; req->dir = dir; - req->con = rtrs_permit_to_clt_con(sess, permit); + req->con = rtrs_permit_to_clt_con(clt_path, permit); req->conf = conf; req->need_inv = false; req->need_inv_comp = false; req->inv_errno = 0; refcount_set(&req->ref, 1); - req->mp_policy = sess->clt->mp_policy; + req->mp_policy = clt_path->clt->mp_policy; iov_iter_kvec(&iter, READ, vec, 1, usr_len); len = _copy_from_iter(req->iu->buf, usr_len, &iter); @@ -974,7 +978,7 @@ static void rtrs_clt_init_req(struct rtrs_clt_io_req *req, } static struct rtrs_clt_io_req * -rtrs_clt_get_req(struct rtrs_clt_sess *sess, +rtrs_clt_get_req(struct rtrs_clt_path *clt_path, void (*conf)(void *priv, int errno), struct rtrs_permit *permit, void *priv, const struct kvec *vec, size_t usr_len, @@ -983,14 +987,14 @@ rtrs_clt_get_req(struct rtrs_clt_sess *sess, { struct rtrs_clt_io_req *req; - req = &sess->reqs[permit->mem_id]; - rtrs_clt_init_req(req, sess, conf, permit, priv, vec, usr_len, + req = &clt_path->reqs[permit->mem_id]; + rtrs_clt_init_req(req, clt_path, conf, permit, priv, vec, usr_len, sg, sg_cnt, data_len, dir); return req; } static struct rtrs_clt_io_req * -rtrs_clt_get_copy_req(struct rtrs_clt_sess *alive_sess, +rtrs_clt_get_copy_req(struct rtrs_clt_path *alive_path, struct rtrs_clt_io_req *fail_req) { struct rtrs_clt_io_req *req; @@ -999,8 +1003,8 @@ rtrs_clt_get_copy_req(struct rtrs_clt_sess *alive_sess, .iov_len = fail_req->usr_len }; - req = &alive_sess->reqs[fail_req->permit->mem_id]; - rtrs_clt_init_req(req, alive_sess, fail_req->conf, fail_req->permit, + req = &alive_path->reqs[fail_req->permit->mem_id]; + rtrs_clt_init_req(req, alive_path, fail_req->conf, fail_req->permit, fail_req->priv, &vec, fail_req->usr_len, fail_req->sglist, fail_req->sg_cnt, fail_req->data_len, fail_req->dir); @@ -1013,7 +1017,7 @@ static int rtrs_post_rdma_write_sg(struct rtrs_clt_con *con, u32 size, u32 imm, struct ib_send_wr *wr, struct ib_send_wr *tail) { - struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); + struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); struct ib_sge *sge = req->sge; enum ib_send_flags flags; struct scatterlist *sg; @@ -1033,22 +1037,23 @@ static int rtrs_post_rdma_write_sg(struct rtrs_clt_con *con, for_each_sg(req->sglist, sg, req->sg_cnt, i) { sge[i].addr = sg_dma_address(sg); sge[i].length = sg_dma_len(sg); - sge[i].lkey = sess->s.dev->ib_pd->local_dma_lkey; + sge[i].lkey = clt_path->s.dev->ib_pd->local_dma_lkey; } num_sge = 1 + req->sg_cnt; } sge[i].addr = req->iu->dma_addr; sge[i].length = size; - sge[i].lkey = sess->s.dev->ib_pd->local_dma_lkey; + sge[i].lkey = clt_path->s.dev->ib_pd->local_dma_lkey; /* * From time to time we have to post signalled sends, * or send queue will fill up and only QP reset can help. */ - flags = atomic_inc_return(&con->c.wr_cnt) % sess->s.signal_interval ? + flags = atomic_inc_return(&con->c.wr_cnt) % clt_path->s.signal_interval ? 0 : IB_SEND_SIGNALED; - ib_dma_sync_single_for_device(sess->s.dev->ib_dev, req->iu->dma_addr, + ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev, + req->iu->dma_addr, size, DMA_TO_DEVICE); return rtrs_iu_post_rdma_write_imm(&con->c, req->iu, sge, num_sge, @@ -1074,8 +1079,8 @@ static int rtrs_map_sg_fr(struct rtrs_clt_io_req *req, size_t count) static int rtrs_clt_write_req(struct rtrs_clt_io_req *req) { struct rtrs_clt_con *con = req->con; - struct rtrs_sess *s = con->c.sess; - struct rtrs_clt_sess *sess = to_clt_sess(s); + struct rtrs_path *s = con->c.path; + struct rtrs_clt_path *clt_path = to_clt_path(s); struct rtrs_msg_rdma_write *msg; struct rtrs_rbuf *rbuf; @@ -1088,13 +1093,13 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req) const size_t tsize = sizeof(*msg) + req->data_len + req->usr_len; - if (tsize > sess->chunk_size) { + if (tsize > clt_path->chunk_size) { rtrs_wrn(s, "Write request failed, size too big %zu > %d\n", - tsize, sess->chunk_size); + tsize, clt_path->chunk_size); return -EMSGSIZE; } if (req->sg_cnt) { - count = ib_dma_map_sg(sess->s.dev->ib_dev, req->sglist, + count = ib_dma_map_sg(clt_path->s.dev->ib_dev, req->sglist, req->sg_cnt, req->dir); if (!count) { rtrs_wrn(s, "Write request failed, map failed\n"); @@ -1111,7 +1116,7 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req) imm = rtrs_to_io_req_imm(imm); buf_id = req->permit->mem_id; req->sg_size = tsize; - rbuf = &sess->rbufs[buf_id]; + rbuf = &clt_path->rbufs[buf_id]; if (count) { ret = rtrs_map_sg_fr(req, count); @@ -1119,7 +1124,7 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req) rtrs_err_rl(s, "Write request failed, failed to map fast reg. data, err: %d\n", ret); - ib_dma_unmap_sg(sess->s.dev->ib_dev, req->sglist, + ib_dma_unmap_sg(clt_path->s.dev->ib_dev, req->sglist, req->sg_cnt, req->dir); return ret; } @@ -1153,12 +1158,12 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req) if (ret) { rtrs_err_rl(s, "Write request failed: error=%d path=%s [%s:%u]\n", - ret, kobject_name(&sess->kobj), sess->hca_name, - sess->hca_port); + ret, kobject_name(&clt_path->kobj), clt_path->hca_name, + clt_path->hca_port); if (req->mp_policy == MP_POLICY_MIN_INFLIGHT) - atomic_dec(&sess->stats->inflight); + atomic_dec(&clt_path->stats->inflight); if (req->sg_cnt) - ib_dma_unmap_sg(sess->s.dev->ib_dev, req->sglist, + ib_dma_unmap_sg(clt_path->s.dev->ib_dev, req->sglist, req->sg_cnt, req->dir); } @@ -1168,10 +1173,10 @@ static int rtrs_clt_write_req(struct rtrs_clt_io_req *req) static int rtrs_clt_read_req(struct rtrs_clt_io_req *req) { struct rtrs_clt_con *con = req->con; - struct rtrs_sess *s = con->c.sess; - struct rtrs_clt_sess *sess = to_clt_sess(s); + struct rtrs_path *s = con->c.path; + struct rtrs_clt_path *clt_path = to_clt_path(s); struct rtrs_msg_rdma_read *msg; - struct rtrs_ib_dev *dev = sess->s.dev; + struct rtrs_ib_dev *dev = clt_path->s.dev; struct ib_reg_wr rwr; struct ib_send_wr *wr = NULL; @@ -1181,10 +1186,10 @@ static int rtrs_clt_read_req(struct rtrs_clt_io_req *req) const size_t tsize = sizeof(*msg) + req->data_len + req->usr_len; - if (tsize > sess->chunk_size) { + if (tsize > clt_path->chunk_size) { rtrs_wrn(s, "Read request failed, message size is %zu, bigger than CHUNK_SIZE %d\n", - tsize, sess->chunk_size); + tsize, clt_path->chunk_size); return -EMSGSIZE; } @@ -1254,15 +1259,15 @@ static int rtrs_clt_read_req(struct rtrs_clt_io_req *req) */ rtrs_clt_update_all_stats(req, READ); - ret = rtrs_post_send_rdma(req->con, req, &sess->rbufs[buf_id], + ret = rtrs_post_send_rdma(req->con, req, &clt_path->rbufs[buf_id], req->data_len, imm, wr); if (ret) { rtrs_err_rl(s, "Read request failed: error=%d path=%s [%s:%u]\n", - ret, kobject_name(&sess->kobj), sess->hca_name, - sess->hca_port); + ret, kobject_name(&clt_path->kobj), clt_path->hca_name, + clt_path->hca_port); if (req->mp_policy == MP_POLICY_MIN_INFLIGHT) - atomic_dec(&sess->stats->inflight); + atomic_dec(&clt_path->stats->inflight); req->need_inv = false; if (req->sg_cnt) ib_dma_unmap_sg(dev->ib_dev, req->sglist, @@ -1277,21 +1282,21 @@ static int rtrs_clt_read_req(struct rtrs_clt_io_req *req) * @clt: clt context * @fail_req: a failed io request. */ -static int rtrs_clt_failover_req(struct rtrs_clt *clt, +static int rtrs_clt_failover_req(struct rtrs_clt_sess *clt, struct rtrs_clt_io_req *fail_req) { - struct rtrs_clt_sess *alive_sess; + struct rtrs_clt_path *alive_path; struct rtrs_clt_io_req *req; int err = -ECONNABORTED; struct path_it it; rcu_read_lock(); for (path_it_init(&it, clt); - (alive_sess = it.next_path(&it)) && it.i < it.clt->paths_num; + (alive_path = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) { - if (READ_ONCE(alive_sess->state) != RTRS_CLT_CONNECTED) + if (READ_ONCE(alive_path->state) != RTRS_CLT_CONNECTED) continue; - req = rtrs_clt_get_copy_req(alive_sess, fail_req); + req = rtrs_clt_get_copy_req(alive_path, fail_req); if (req->dir == DMA_TO_DEVICE) err = rtrs_clt_write_req(req); else @@ -1301,7 +1306,7 @@ static int rtrs_clt_failover_req(struct rtrs_clt *clt, continue; } /* Success path */ - rtrs_clt_inc_failover_cnt(alive_sess->stats); + rtrs_clt_inc_failover_cnt(alive_path->stats); break; } path_it_deinit(&it); @@ -1310,16 +1315,16 @@ static int rtrs_clt_failover_req(struct rtrs_clt *clt, return err; } -static void fail_all_outstanding_reqs(struct rtrs_clt_sess *sess) +static void fail_all_outstanding_reqs(struct rtrs_clt_path *clt_path) { - struct rtrs_clt *clt = sess->clt; + struct rtrs_clt_sess *clt = clt_path->clt; struct rtrs_clt_io_req *req; int i, err; - if (!sess->reqs) + if (!clt_path->reqs) return; - for (i = 0; i < sess->queue_depth; ++i) { - req = &sess->reqs[i]; + for (i = 0; i < clt_path->queue_depth; ++i) { + req = &clt_path->reqs[i]; if (!req->in_use) continue; @@ -1337,38 +1342,39 @@ static void fail_all_outstanding_reqs(struct rtrs_clt_sess *sess) } } -static void free_sess_reqs(struct rtrs_clt_sess *sess) +static void free_path_reqs(struct rtrs_clt_path *clt_path) { struct rtrs_clt_io_req *req; int i; - if (!sess->reqs) + if (!clt_path->reqs) return; - for (i = 0; i < sess->queue_depth; ++i) { - req = &sess->reqs[i]; + for (i = 0; i < clt_path->queue_depth; ++i) { + req = &clt_path->reqs[i]; if (req->mr) ib_dereg_mr(req->mr); kfree(req->sge); - rtrs_iu_free(req->iu, sess->s.dev->ib_dev, 1); + rtrs_iu_free(req->iu, clt_path->s.dev->ib_dev, 1); } - kfree(sess->reqs); - sess->reqs = NULL; + kfree(clt_path->reqs); + clt_path->reqs = NULL; } -static int alloc_sess_reqs(struct rtrs_clt_sess *sess) +static int alloc_path_reqs(struct rtrs_clt_path *clt_path) { struct rtrs_clt_io_req *req; int i, err = -ENOMEM; - sess->reqs = kcalloc(sess->queue_depth, sizeof(*sess->reqs), - GFP_KERNEL); - if (!sess->reqs) + clt_path->reqs = kcalloc(clt_path->queue_depth, + sizeof(*clt_path->reqs), + GFP_KERNEL); + if (!clt_path->reqs) return -ENOMEM; - for (i = 0; i < sess->queue_depth; ++i) { - req = &sess->reqs[i]; - req->iu = rtrs_iu_alloc(1, sess->max_hdr_size, GFP_KERNEL, - sess->s.dev->ib_dev, + for (i = 0; i < clt_path->queue_depth; ++i) { + req = &clt_path->reqs[i]; + req->iu = rtrs_iu_alloc(1, clt_path->max_hdr_size, GFP_KERNEL, + clt_path->s.dev->ib_dev, DMA_TO_DEVICE, rtrs_clt_rdma_done); if (!req->iu) @@ -1378,13 +1384,14 @@ static int alloc_sess_reqs(struct rtrs_clt_sess *sess) if (!req->sge) goto out; - req->mr = ib_alloc_mr(sess->s.dev->ib_pd, IB_MR_TYPE_MEM_REG, - sess->max_pages_per_mr); + req->mr = ib_alloc_mr(clt_path->s.dev->ib_pd, + IB_MR_TYPE_MEM_REG, + clt_path->max_pages_per_mr); if (IS_ERR(req->mr)) { err = PTR_ERR(req->mr); req->mr = NULL; - pr_err("Failed to alloc sess->max_pages_per_mr %d\n", - sess->max_pages_per_mr); + pr_err("Failed to alloc clt_path->max_pages_per_mr %d\n", + clt_path->max_pages_per_mr); goto out; } @@ -1394,12 +1401,12 @@ static int alloc_sess_reqs(struct rtrs_clt_sess *sess) return 0; out: - free_sess_reqs(sess); + free_path_reqs(clt_path); return err; } -static int alloc_permits(struct rtrs_clt *clt) +static int alloc_permits(struct rtrs_clt_sess *clt) { unsigned int chunk_bits; int err, i; @@ -1433,7 +1440,7 @@ out_err: return err; } -static void free_permits(struct rtrs_clt *clt) +static void free_permits(struct rtrs_clt_sess *clt) { if (clt->permits_map) { size_t sz = clt->queue_depth; @@ -1447,13 +1454,13 @@ static void free_permits(struct rtrs_clt *clt) clt->permits = NULL; } -static void query_fast_reg_mode(struct rtrs_clt_sess *sess) +static void query_fast_reg_mode(struct rtrs_clt_path *clt_path) { struct ib_device *ib_dev; u64 max_pages_per_mr; int mr_page_shift; - ib_dev = sess->s.dev->ib_dev; + ib_dev = clt_path->s.dev->ib_dev; /* * Use the smallest page size supported by the HCA, down to a @@ -1463,24 +1470,24 @@ static void query_fast_reg_mode(struct rtrs_clt_sess *sess) mr_page_shift = max(12, ffs(ib_dev->attrs.page_size_cap) - 1); max_pages_per_mr = ib_dev->attrs.max_mr_size; do_div(max_pages_per_mr, (1ull << mr_page_shift)); - sess->max_pages_per_mr = - min3(sess->max_pages_per_mr, (u32)max_pages_per_mr, + clt_path->max_pages_per_mr = + min3(clt_path->max_pages_per_mr, (u32)max_pages_per_mr, ib_dev->attrs.max_fast_reg_page_list_len); - sess->clt->max_segments = - min(sess->max_pages_per_mr, sess->clt->max_segments); + clt_path->clt->max_segments = + min(clt_path->max_pages_per_mr, clt_path->clt->max_segments); } -static bool rtrs_clt_change_state_get_old(struct rtrs_clt_sess *sess, +static bool rtrs_clt_change_state_get_old(struct rtrs_clt_path *clt_path, enum rtrs_clt_state new_state, enum rtrs_clt_state *old_state) { bool changed; - spin_lock_irq(&sess->state_wq.lock); + spin_lock_irq(&clt_path->state_wq.lock); if (old_state) - *old_state = sess->state; - changed = rtrs_clt_change_state(sess, new_state); - spin_unlock_irq(&sess->state_wq.lock); + *old_state = clt_path->state; + changed = rtrs_clt_change_state(clt_path, new_state); + spin_unlock_irq(&clt_path->state_wq.lock); return changed; } @@ -1492,9 +1499,9 @@ static void rtrs_clt_hb_err_handler(struct rtrs_con *c) rtrs_rdma_error_recovery(con); } -static void rtrs_clt_init_hb(struct rtrs_clt_sess *sess) +static void rtrs_clt_init_hb(struct rtrs_clt_path *clt_path) { - rtrs_init_hb(&sess->s, &io_comp_cqe, + rtrs_init_hb(&clt_path->s, &io_comp_cqe, RTRS_HB_INTERVAL_MS, RTRS_HB_MISSED_MAX, rtrs_clt_hb_err_handler, @@ -1504,17 +1511,17 @@ static void rtrs_clt_init_hb(struct rtrs_clt_sess *sess) static void rtrs_clt_reconnect_work(struct work_struct *work); static void rtrs_clt_close_work(struct work_struct *work); -static struct rtrs_clt_sess *alloc_sess(struct rtrs_clt *clt, +static struct rtrs_clt_path *alloc_path(struct rtrs_clt_sess *clt, const struct rtrs_addr *path, size_t con_num, u32 nr_poll_queues) { - struct rtrs_clt_sess *sess; + struct rtrs_clt_path *clt_path; int err = -ENOMEM; int cpu; size_t total_con; - sess = kzalloc(sizeof(*sess), GFP_KERNEL); - if (!sess) + clt_path = kzalloc(sizeof(*clt_path), GFP_KERNEL); + if (!clt_path) goto err; /* @@ -1522,20 +1529,21 @@ static struct rtrs_clt_sess *alloc_sess(struct rtrs_clt *clt, * +1: Extra connection for user messages */ total_con = con_num + nr_poll_queues + 1; - sess->s.con = kcalloc(total_con, sizeof(*sess->s.con), GFP_KERNEL); - if (!sess->s.con) - goto err_free_sess; + clt_path->s.con = kcalloc(total_con, sizeof(*clt_path->s.con), + GFP_KERNEL); + if (!clt_path->s.con) + goto err_free_path; - sess->s.con_num = total_con; - sess->s.irq_con_num = con_num + 1; + clt_path->s.con_num = total_con; + clt_path->s.irq_con_num = con_num + 1; - sess->stats = kzalloc(sizeof(*sess->stats), GFP_KERNEL); - if (!sess->stats) + clt_path->stats = kzalloc(sizeof(*clt_path->stats), GFP_KERNEL); + if (!clt_path->stats) goto err_free_con; - mutex_init(&sess->init_mutex); - uuid_gen(&sess->s.uuid); - memcpy(&sess->s.dst_addr, path->dst, + mutex_init(&clt_path->init_mutex); + uuid_gen(&clt_path->s.uuid); + memcpy(&clt_path->s.dst_addr, path->dst, rdma_addr_size((struct sockaddr *)path->dst)); /* @@ -1544,53 +1552,54 @@ static struct rtrs_clt_sess *alloc_sess(struct rtrs_clt *clt, * the sess->src_addr will contain only zeros, which is then fine. */ if (path->src) - memcpy(&sess->s.src_addr, path->src, + memcpy(&clt_path->s.src_addr, path->src, rdma_addr_size((struct sockaddr *)path->src)); - strscpy(sess->s.sessname, clt->sessname, sizeof(sess->s.sessname)); - sess->clt = clt; - sess->max_pages_per_mr = RTRS_MAX_SEGMENTS; - init_waitqueue_head(&sess->state_wq); - sess->state = RTRS_CLT_CONNECTING; - atomic_set(&sess->connected_cnt, 0); - INIT_WORK(&sess->close_work, rtrs_clt_close_work); - INIT_DELAYED_WORK(&sess->reconnect_dwork, rtrs_clt_reconnect_work); - rtrs_clt_init_hb(sess); - - sess->mp_skip_entry = alloc_percpu(typeof(*sess->mp_skip_entry)); - if (!sess->mp_skip_entry) + strscpy(clt_path->s.sessname, clt->sessname, + sizeof(clt_path->s.sessname)); + clt_path->clt = clt; + clt_path->max_pages_per_mr = RTRS_MAX_SEGMENTS; + init_waitqueue_head(&clt_path->state_wq); + clt_path->state = RTRS_CLT_CONNECTING; + atomic_set(&clt_path->connected_cnt, 0); + INIT_WORK(&clt_path->close_work, rtrs_clt_close_work); + INIT_DELAYED_WORK(&clt_path->reconnect_dwork, rtrs_clt_reconnect_work); + rtrs_clt_init_hb(clt_path); + + clt_path->mp_skip_entry = alloc_percpu(typeof(*clt_path->mp_skip_entry)); + if (!clt_path->mp_skip_entry) goto err_free_stats; for_each_possible_cpu(cpu) - INIT_LIST_HEAD(per_cpu_ptr(sess->mp_skip_entry, cpu)); + INIT_LIST_HEAD(per_cpu_ptr(clt_path->mp_skip_entry, cpu)); - err = rtrs_clt_init_stats(sess->stats); + err = rtrs_clt_init_stats(clt_path->stats); if (err) goto err_free_percpu; - return sess; + return clt_path; err_free_percpu: - free_percpu(sess->mp_skip_entry); + free_percpu(clt_path->mp_skip_entry); err_free_stats: - kfree(sess->stats); + kfree(clt_path->stats); err_free_con: - kfree(sess->s.con); -err_free_sess: - kfree(sess); + kfree(clt_path->s.con); +err_free_path: + kfree(clt_path); err: return ERR_PTR(err); } -void free_sess(struct rtrs_clt_sess *sess) +void free_path(struct rtrs_clt_path *clt_path) { - free_percpu(sess->mp_skip_entry); - mutex_destroy(&sess->init_mutex); - kfree(sess->s.con); - kfree(sess->rbufs); - kfree(sess); + free_percpu(clt_path->mp_skip_entry); + mutex_destroy(&clt_path->init_mutex); + kfree(clt_path->s.con); + kfree(clt_path->rbufs); + kfree(clt_path); } -static int create_con(struct rtrs_clt_sess *sess, unsigned int cid) +static int create_con(struct rtrs_clt_path *clt_path, unsigned int cid) { struct rtrs_clt_con *con; @@ -1601,28 +1610,28 @@ static int create_con(struct rtrs_clt_sess *sess, unsigned int cid) /* Map first two connections to the first CPU */ con->cpu = (cid ? cid - 1 : 0) % nr_cpu_ids; con->c.cid = cid; - con->c.sess = &sess->s; + con->c.path = &clt_path->s; /* Align with srv, init as 1 */ atomic_set(&con->c.wr_cnt, 1); mutex_init(&con->con_mutex); - sess->s.con[cid] = &con->c; + clt_path->s.con[cid] = &con->c; return 0; } static void destroy_con(struct rtrs_clt_con *con) { - struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); + struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); - sess->s.con[con->c.cid] = NULL; + clt_path->s.con[con->c.cid] = NULL; mutex_destroy(&con->con_mutex); kfree(con); } static int create_con_cq_qp(struct rtrs_clt_con *con) { - struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); + struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); u32 max_send_wr, max_recv_wr, cq_num, max_send_sge, wr_limit; int err, cq_vector; struct rtrs_msg_rkey_rsp *rsp; @@ -1631,7 +1640,7 @@ static int create_con_cq_qp(struct rtrs_clt_con *con) if (con->c.cid == 0) { max_send_sge = 1; /* We must be the first here */ - if (WARN_ON(sess->s.dev)) + if (WARN_ON(clt_path->s.dev)) return -EINVAL; /* @@ -1639,16 +1648,16 @@ static int create_con_cq_qp(struct rtrs_clt_con *con) * Be careful not to close user connection before ib dev * is gracefully put. */ - sess->s.dev = rtrs_ib_dev_find_or_add(con->c.cm_id->device, + clt_path->s.dev = rtrs_ib_dev_find_or_add(con->c.cm_id->device, &dev_pd); - if (!sess->s.dev) { - rtrs_wrn(sess->clt, + if (!clt_path->s.dev) { + rtrs_wrn(clt_path->clt, "rtrs_ib_dev_find_get_or_add(): no memory\n"); return -ENOMEM; } - sess->s.dev_ref = 1; - query_fast_reg_mode(sess); - wr_limit = sess->s.dev->ib_dev->attrs.max_qp_wr; + clt_path->s.dev_ref = 1; + query_fast_reg_mode(clt_path); + wr_limit = clt_path->s.dev->ib_dev->attrs.max_qp_wr; /* * Two (request + registration) completion for send * Two for recv if always_invalidate is set on server @@ -1665,27 +1674,28 @@ static int create_con_cq_qp(struct rtrs_clt_con *con) * This is always true if user connection (cid == 0) is * established first. */ - if (WARN_ON(!sess->s.dev)) + if (WARN_ON(!clt_path->s.dev)) return -EINVAL; - if (WARN_ON(!sess->queue_depth)) + if (WARN_ON(!clt_path->queue_depth)) return -EINVAL; - wr_limit = sess->s.dev->ib_dev->attrs.max_qp_wr; + wr_limit = clt_path->s.dev->ib_dev->attrs.max_qp_wr; /* Shared between connections */ - sess->s.dev_ref++; + clt_path->s.dev_ref++; max_send_wr = min_t(int, wr_limit, /* QD * (REQ + RSP + FR REGS or INVS) + drain */ - sess->queue_depth * 3 + 1); + clt_path->queue_depth * 3 + 1); max_recv_wr = min_t(int, wr_limit, - sess->queue_depth * 3 + 1); + clt_path->queue_depth * 3 + 1); max_send_sge = 2; } atomic_set(&con->c.sq_wr_avail, max_send_wr); cq_num = max_send_wr + max_recv_wr; /* alloc iu to recv new rkey reply when server reports flags set */ - if (sess->flags & RTRS_MSG_NEW_RKEY_F || con->c.cid == 0) { + if (clt_path->flags & RTRS_MSG_NEW_RKEY_F || con->c.cid == 0) { con->rsp_ius = rtrs_iu_alloc(cq_num, sizeof(*rsp), - GFP_KERNEL, sess->s.dev->ib_dev, + GFP_KERNEL, + clt_path->s.dev->ib_dev, DMA_FROM_DEVICE, rtrs_clt_rdma_done); if (!con->rsp_ius) @@ -1693,13 +1703,13 @@ static int create_con_cq_qp(struct rtrs_clt_con *con) con->queue_num = cq_num; } cq_num = max_send_wr + max_recv_wr; - cq_vector = con->cpu % sess->s.dev->ib_dev->num_comp_vectors; - if (con->c.cid >= sess->s.irq_con_num) - err = rtrs_cq_qp_create(&sess->s, &con->c, max_send_sge, + cq_vector = con->cpu % clt_path->s.dev->ib_dev->num_comp_vectors; + if (con->c.cid >= clt_path->s.irq_con_num) + err = rtrs_cq_qp_create(&clt_path->s, &con->c, max_send_sge, cq_vector, cq_num, max_send_wr, max_recv_wr, IB_POLL_DIRECT); else - err = rtrs_cq_qp_create(&sess->s, &con->c, max_send_sge, + err = rtrs_cq_qp_create(&clt_path->s, &con->c, max_send_sge, cq_vector, cq_num, max_send_wr, max_recv_wr, IB_POLL_SOFTIRQ); /* @@ -1711,7 +1721,7 @@ static int create_con_cq_qp(struct rtrs_clt_con *con) static void destroy_con_cq_qp(struct rtrs_clt_con *con) { - struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); + struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); /* * Be careful here: destroy_con_cq_qp() can be called even @@ -1720,13 +1730,14 @@ static void destroy_con_cq_qp(struct rtrs_clt_con *con) lockdep_assert_held(&con->con_mutex); rtrs_cq_qp_destroy(&con->c); if (con->rsp_ius) { - rtrs_iu_free(con->rsp_ius, sess->s.dev->ib_dev, con->queue_num); + rtrs_iu_free(con->rsp_ius, clt_path->s.dev->ib_dev, + con->queue_num); con->rsp_ius = NULL; con->queue_num = 0; } - if (sess->s.dev_ref && !--sess->s.dev_ref) { - rtrs_ib_dev_put(sess->s.dev); - sess->s.dev = NULL; + if (clt_path->s.dev_ref && !--clt_path->s.dev_ref) { + rtrs_ib_dev_put(clt_path->s.dev); + clt_path->s.dev = NULL; } } @@ -1745,7 +1756,7 @@ static void destroy_cm(struct rtrs_clt_con *con) static int rtrs_rdma_addr_resolved(struct rtrs_clt_con *con) { - struct rtrs_sess *s = con->c.sess; + struct rtrs_path *s = con->c.path; int err; mutex_lock(&con->con_mutex); @@ -1764,8 +1775,8 @@ static int rtrs_rdma_addr_resolved(struct rtrs_clt_con *con) static int rtrs_rdma_route_resolved(struct rtrs_clt_con *con) { - struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); - struct rtrs_clt *clt = sess->clt; + struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); + struct rtrs_clt_sess *clt = clt_path->clt; struct rtrs_msg_conn_req msg; struct rdma_conn_param param; @@ -1782,11 +1793,11 @@ static int rtrs_rdma_route_resolved(struct rtrs_clt_con *con) .magic = cpu_to_le16(RTRS_MAGIC), .version = cpu_to_le16(RTRS_PROTO_VER), .cid = cpu_to_le16(con->c.cid), - .cid_num = cpu_to_le16(sess->s.con_num), - .recon_cnt = cpu_to_le16(sess->s.recon_cnt), + .cid_num = cpu_to_le16(clt_path->s.con_num), + .recon_cnt = cpu_to_le16(clt_path->s.recon_cnt), }; - msg.first_conn = sess->for_new_clt ? FIRST_CONN : 0; - uuid_copy(&msg.sess_uuid, &sess->s.uuid); + msg.first_conn = clt_path->for_new_clt ? FIRST_CONN : 0; + uuid_copy(&msg.sess_uuid, &clt_path->s.uuid); uuid_copy(&msg.paths_uuid, &clt->paths_uuid); err = rdma_connect_locked(con->c.cm_id, ¶m); @@ -1799,8 +1810,8 @@ static int rtrs_rdma_route_resolved(struct rtrs_clt_con *con) static int rtrs_rdma_conn_established(struct rtrs_clt_con *con, struct rdma_cm_event *ev) { - struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); - struct rtrs_clt *clt = sess->clt; + struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); + struct rtrs_clt_sess *clt = clt_path->clt; const struct rtrs_msg_conn_rsp *msg; u16 version, queue_depth; int errno; @@ -1831,31 +1842,32 @@ static int rtrs_rdma_conn_established(struct rtrs_clt_con *con, if (con->c.cid == 0) { queue_depth = le16_to_cpu(msg->queue_depth); - if (sess->queue_depth > 0 && queue_depth != sess->queue_depth) { + if (clt_path->queue_depth > 0 && queue_depth != clt_path->queue_depth) { rtrs_err(clt, "Error: queue depth changed\n"); /* * Stop any more reconnection attempts */ - sess->reconnect_attempts = -1; + clt_path->reconnect_attempts = -1; rtrs_err(clt, "Disabling auto-reconnect. Trigger a manual reconnect after issue is resolved\n"); return -ECONNRESET; } - if (!sess->rbufs) { - sess->rbufs = kcalloc(queue_depth, sizeof(*sess->rbufs), - GFP_KERNEL); - if (!sess->rbufs) + if (!clt_path->rbufs) { + clt_path->rbufs = kcalloc(queue_depth, + sizeof(*clt_path->rbufs), + GFP_KERNEL); + if (!clt_path->rbufs) return -ENOMEM; } - sess->queue_depth = queue_depth; - sess->s.signal_interval = min_not_zero(queue_depth, + clt_path->queue_depth = queue_depth; + clt_path->s.signal_interval = min_not_zero(queue_depth, (unsigned short) SERVICE_CON_QUEUE_DEPTH); - sess->max_hdr_size = le32_to_cpu(msg->max_hdr_size); - sess->max_io_size = le32_to_cpu(msg->max_io_size); - sess->flags = le32_to_cpu(msg->flags); - sess->chunk_size = sess->max_io_size + sess->max_hdr_size; + clt_path->max_hdr_size = le32_to_cpu(msg->max_hdr_size); + clt_path->max_io_size = le32_to_cpu(msg->max_io_size); + clt_path->flags = le32_to_cpu(msg->flags); + clt_path->chunk_size = clt_path->max_io_size + clt_path->max_hdr_size; /* * Global IO size is always a minimum. @@ -1866,20 +1878,20 @@ static int rtrs_rdma_conn_established(struct rtrs_clt_con *con, * connections in parallel, use lock. */ mutex_lock(&clt->paths_mutex); - clt->queue_depth = sess->queue_depth; - clt->max_io_size = min_not_zero(sess->max_io_size, + clt->queue_depth = clt_path->queue_depth; + clt->max_io_size = min_not_zero(clt_path->max_io_size, clt->max_io_size); mutex_unlock(&clt->paths_mutex); /* * Cache the hca_port and hca_name for sysfs */ - sess->hca_port = con->c.cm_id->port_num; - scnprintf(sess->hca_name, sizeof(sess->hca_name), - sess->s.dev->ib_dev->name); - sess->s.src_addr = con->c.cm_id->route.addr.src_addr; + clt_path->hca_port = con->c.cm_id->port_num; + scnprintf(clt_path->hca_name, sizeof(clt_path->hca_name), + clt_path->s.dev->ib_dev->name); + clt_path->s.src_addr = con->c.cm_id->route.addr.src_addr; /* set for_new_clt, to allow future reconnect on any path */ - sess->for_new_clt = 1; + clt_path->for_new_clt = 1; } return 0; @@ -1887,16 +1899,16 @@ static int rtrs_rdma_conn_established(struct rtrs_clt_con *con, static inline void flag_success_on_conn(struct rtrs_clt_con *con) { - struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); + struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); - atomic_inc(&sess->connected_cnt); + atomic_inc(&clt_path->connected_cnt); con->cm_err = 1; } static int rtrs_rdma_conn_rejected(struct rtrs_clt_con *con, struct rdma_cm_event *ev) { - struct rtrs_sess *s = con->c.sess; + struct rtrs_path *s = con->c.path; const struct rtrs_msg_conn_rsp *msg; const char *rej_msg; int status, errno; @@ -1924,23 +1936,23 @@ static int rtrs_rdma_conn_rejected(struct rtrs_clt_con *con, return -ECONNRESET; } -void rtrs_clt_close_conns(struct rtrs_clt_sess *sess, bool wait) +void rtrs_clt_close_conns(struct rtrs_clt_path *clt_path, bool wait) { - if (rtrs_clt_change_state_get_old(sess, RTRS_CLT_CLOSING, NULL)) - queue_work(rtrs_wq, &sess->close_work); + if (rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CLOSING, NULL)) + queue_work(rtrs_wq, &clt_path->close_work); if (wait) - flush_work(&sess->close_work); + flush_work(&clt_path->close_work); } static inline void flag_error_on_conn(struct rtrs_clt_con *con, int cm_err) { if (con->cm_err == 1) { - struct rtrs_clt_sess *sess; + struct rtrs_clt_path *clt_path; - sess = to_clt_sess(con->c.sess); - if (atomic_dec_and_test(&sess->connected_cnt)) + clt_path = to_clt_path(con->c.path); + if (atomic_dec_and_test(&clt_path->connected_cnt)) - wake_up(&sess->state_wq); + wake_up(&clt_path->state_wq); } con->cm_err = cm_err; } @@ -1949,8 +1961,8 @@ static int rtrs_clt_rdma_cm_handler(struct rdma_cm_id *cm_id, struct rdma_cm_event *ev) { struct rtrs_clt_con *con = cm_id->context; - struct rtrs_sess *s = con->c.sess; - struct rtrs_clt_sess *sess = to_clt_sess(s); + struct rtrs_path *s = con->c.path; + struct rtrs_clt_path *clt_path = to_clt_path(s); int cm_err = 0; switch (ev->event) { @@ -1968,7 +1980,7 @@ static int rtrs_clt_rdma_cm_handler(struct rdma_cm_id *cm_id, * i.e. wake up without state change, but we set cm_err. */ flag_success_on_conn(con); - wake_up(&sess->state_wq); + wake_up(&clt_path->state_wq); return 0; } break; @@ -1997,7 +2009,7 @@ static int rtrs_clt_rdma_cm_handler(struct rdma_cm_id *cm_id, /* * Device removal is a special case. Queue close and return 0. */ - rtrs_clt_close_conns(sess, false); + rtrs_clt_close_conns(clt_path, false); return 0; default: rtrs_err(s, "Unexpected RDMA CM error (CM event: %s, err: %d)\n", @@ -2020,13 +2032,13 @@ static int rtrs_clt_rdma_cm_handler(struct rdma_cm_id *cm_id, static int create_cm(struct rtrs_clt_con *con) { - struct rtrs_sess *s = con->c.sess; - struct rtrs_clt_sess *sess = to_clt_sess(s); + struct rtrs_path *s = con->c.path; + struct rtrs_clt_path *clt_path = to_clt_path(s); struct rdma_cm_id *cm_id; int err; cm_id = rdma_create_id(&init_net, rtrs_clt_rdma_cm_handler, con, - sess->s.dst_addr.ss_family == AF_IB ? + clt_path->s.dst_addr.ss_family == AF_IB ? RDMA_PS_IB : RDMA_PS_TCP, IB_QPT_RC); if (IS_ERR(cm_id)) { err = PTR_ERR(cm_id); @@ -2042,8 +2054,8 @@ static int create_cm(struct rtrs_clt_con *con) rtrs_err(s, "Set address reuse failed, err: %d\n", err); goto destroy_cm; } - err = rdma_resolve_addr(cm_id, (struct sockaddr *)&sess->s.src_addr, - (struct sockaddr *)&sess->s.dst_addr, + err = rdma_resolve_addr(cm_id, (struct sockaddr *)&clt_path->s.src_addr, + (struct sockaddr *)&clt_path->s.dst_addr, RTRS_CONNECT_TIMEOUT_MS); if (err) { rtrs_err(s, "Failed to resolve address, err: %d\n", err); @@ -2055,8 +2067,8 @@ static int create_cm(struct rtrs_clt_con *con) * or session state was really changed to error by device removal. */ err = wait_event_interruptible_timeout( - sess->state_wq, - con->cm_err || sess->state != RTRS_CLT_CONNECTING, + clt_path->state_wq, + con->cm_err || clt_path->state != RTRS_CLT_CONNECTING, msecs_to_jiffies(RTRS_CONNECT_TIMEOUT_MS)); if (err == 0 || err == -ERESTARTSYS) { if (err == 0) @@ -2068,7 +2080,7 @@ static int create_cm(struct rtrs_clt_con *con) err = con->cm_err; goto errr; } - if (READ_ONCE(sess->state) != RTRS_CLT_CONNECTING) { + if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTING) { /* Device removal */ err = -ECONNABORTED; goto errr; @@ -2087,9 +2099,9 @@ destroy_cm: return err; } -static void rtrs_clt_sess_up(struct rtrs_clt_sess *sess) +static void rtrs_clt_path_up(struct rtrs_clt_path *clt_path) { - struct rtrs_clt *clt = sess->clt; + struct rtrs_clt_sess *clt = clt_path->clt; int up; /* @@ -2113,19 +2125,19 @@ static void rtrs_clt_sess_up(struct rtrs_clt_sess *sess) mutex_unlock(&clt->paths_ev_mutex); /* Mark session as established */ - sess->established = true; - sess->reconnect_attempts = 0; - sess->stats->reconnects.successful_cnt++; + clt_path->established = true; + clt_path->reconnect_attempts = 0; + clt_path->stats->reconnects.successful_cnt++; } -static void rtrs_clt_sess_down(struct rtrs_clt_sess *sess) +static void rtrs_clt_path_down(struct rtrs_clt_path *clt_path) { - struct rtrs_clt *clt = sess->clt; + struct rtrs_clt_sess *clt = clt_path->clt; - if (!sess->established) + if (!clt_path->established) return; - sess->established = false; + clt_path->established = false; mutex_lock(&clt->paths_ev_mutex); WARN_ON(!clt->paths_up); if (--clt->paths_up == 0) @@ -2133,19 +2145,19 @@ static void rtrs_clt_sess_down(struct rtrs_clt_sess *sess) mutex_unlock(&clt->paths_ev_mutex); } -static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_sess *sess) +static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_path *clt_path) { struct rtrs_clt_con *con; unsigned int cid; - WARN_ON(READ_ONCE(sess->state) == RTRS_CLT_CONNECTED); + WARN_ON(READ_ONCE(clt_path->state) == RTRS_CLT_CONNECTED); /* * Possible race with rtrs_clt_open(), when DEVICE_REMOVAL comes * exactly in between. Start destroying after it finishes. */ - mutex_lock(&sess->init_mutex); - mutex_unlock(&sess->init_mutex); + mutex_lock(&clt_path->init_mutex); + mutex_unlock(&clt_path->init_mutex); /* * All IO paths must observe !CONNECTED state before we @@ -2153,7 +2165,7 @@ static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_sess *sess) */ synchronize_rcu(); - rtrs_stop_hb(&sess->s); + rtrs_stop_hb(&clt_path->s); /* * The order it utterly crucial: firstly disconnect and complete all @@ -2162,15 +2174,15 @@ static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_sess *sess) * eventually notify upper layer about session disconnection. */ - for (cid = 0; cid < sess->s.con_num; cid++) { - if (!sess->s.con[cid]) + for (cid = 0; cid < clt_path->s.con_num; cid++) { + if (!clt_path->s.con[cid]) break; - con = to_clt_con(sess->s.con[cid]); + con = to_clt_con(clt_path->s.con[cid]); stop_cm(con); } - fail_all_outstanding_reqs(sess); - free_sess_reqs(sess); - rtrs_clt_sess_down(sess); + fail_all_outstanding_reqs(clt_path); + free_path_reqs(clt_path); + rtrs_clt_path_down(clt_path); /* * Wait for graceful shutdown, namely when peer side invokes @@ -2180,13 +2192,14 @@ static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_sess *sess) * since CM does not fire anything. That is fine, we are not in * hurry. */ - wait_event_timeout(sess->state_wq, !atomic_read(&sess->connected_cnt), + wait_event_timeout(clt_path->state_wq, + !atomic_read(&clt_path->connected_cnt), msecs_to_jiffies(RTRS_CONNECT_TIMEOUT_MS)); - for (cid = 0; cid < sess->s.con_num; cid++) { - if (!sess->s.con[cid]) + for (cid = 0; cid < clt_path->s.con_num; cid++) { + if (!clt_path->s.con[cid]) break; - con = to_clt_con(sess->s.con[cid]); + con = to_clt_con(clt_path->s.con[cid]); mutex_lock(&con->con_mutex); destroy_con_cq_qp(con); mutex_unlock(&con->con_mutex); @@ -2195,26 +2208,26 @@ static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_sess *sess) } } -static inline bool xchg_sessions(struct rtrs_clt_sess __rcu **rcu_ppcpu_path, - struct rtrs_clt_sess *sess, - struct rtrs_clt_sess *next) +static inline bool xchg_paths(struct rtrs_clt_path __rcu **rcu_ppcpu_path, + struct rtrs_clt_path *clt_path, + struct rtrs_clt_path *next) { - struct rtrs_clt_sess **ppcpu_path; + struct rtrs_clt_path **ppcpu_path; /* Call cmpxchg() without sparse warnings */ ppcpu_path = (typeof(ppcpu_path))rcu_ppcpu_path; - return sess == cmpxchg(ppcpu_path, sess, next); + return clt_path == cmpxchg(ppcpu_path, clt_path, next); } -static void rtrs_clt_remove_path_from_arr(struct rtrs_clt_sess *sess) +static void rtrs_clt_remove_path_from_arr(struct rtrs_clt_path *clt_path) { - struct rtrs_clt *clt = sess->clt; - struct rtrs_clt_sess *next; + struct rtrs_clt_sess *clt = clt_path->clt; + struct rtrs_clt_path *next; bool wait_for_grace = false; int cpu; mutex_lock(&clt->paths_mutex); - list_del_rcu(&sess->s.entry); + list_del_rcu(&clt_path->s.entry); /* Make sure everybody observes path removal. */ synchronize_rcu(); @@ -2255,7 +2268,7 @@ static void rtrs_clt_remove_path_from_arr(struct rtrs_clt_sess *sess) * removed. If @sess is the last element, then @next is NULL. */ rcu_read_lock(); - next = list_next_or_null_rr_rcu(&clt->paths_list, &sess->s.entry, + next = list_next_or_null_rr_rcu(&clt->paths_list, &clt_path->s.entry, typeof(*next), s.entry); rcu_read_unlock(); @@ -2264,11 +2277,11 @@ static void rtrs_clt_remove_path_from_arr(struct rtrs_clt_sess *sess) * removed, so change the pointer manually. */ for_each_possible_cpu(cpu) { - struct rtrs_clt_sess __rcu **ppcpu_path; + struct rtrs_clt_path __rcu **ppcpu_path; ppcpu_path = per_cpu_ptr(clt->pcpu_path, cpu); if (rcu_dereference_protected(*ppcpu_path, - lockdep_is_held(&clt->paths_mutex)) != sess) + lockdep_is_held(&clt->paths_mutex)) != clt_path) /* * synchronize_rcu() was called just after deleting * entry from the list, thus IO code path cannot @@ -2281,7 +2294,7 @@ static void rtrs_clt_remove_path_from_arr(struct rtrs_clt_sess *sess) * We race with IO code path, which also changes pointer, * thus we have to be careful not to overwrite it. */ - if (xchg_sessions(ppcpu_path, sess, next)) + if (xchg_paths(ppcpu_path, clt_path, next)) /* * @ppcpu_path was successfully replaced with @next, * that means that someone could also pick up the @@ -2296,29 +2309,29 @@ static void rtrs_clt_remove_path_from_arr(struct rtrs_clt_sess *sess) mutex_unlock(&clt->paths_mutex); } -static void rtrs_clt_add_path_to_arr(struct rtrs_clt_sess *sess) +static void rtrs_clt_add_path_to_arr(struct rtrs_clt_path *clt_path) { - struct rtrs_clt *clt = sess->clt; + struct rtrs_clt_sess *clt = clt_path->clt; mutex_lock(&clt->paths_mutex); clt->paths_num++; - list_add_tail_rcu(&sess->s.entry, &clt->paths_list); + list_add_tail_rcu(&clt_path->s.entry, &clt->paths_list); mutex_unlock(&clt->paths_mutex); } static void rtrs_clt_close_work(struct work_struct *work) { - struct rtrs_clt_sess *sess; + struct rtrs_clt_path *clt_path; - sess = container_of(work, struct rtrs_clt_sess, close_work); + clt_path = container_of(work, struct rtrs_clt_path, close_work); - cancel_delayed_work_sync(&sess->reconnect_dwork); - rtrs_clt_stop_and_destroy_conns(sess); - rtrs_clt_change_state_get_old(sess, RTRS_CLT_CLOSED, NULL); + cancel_delayed_work_sync(&clt_path->reconnect_dwork); + rtrs_clt_stop_and_destroy_conns(clt_path); + rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CLOSED, NULL); } -static int init_conns(struct rtrs_clt_sess *sess) +static int init_conns(struct rtrs_clt_path *clt_path) { unsigned int cid; int err; @@ -2328,31 +2341,31 @@ static int init_conns(struct rtrs_clt_sess *sess) * to avoid clashes with previous sessions not yet closed * sessions on a server side. */ - sess->s.recon_cnt++; + clt_path->s.recon_cnt++; /* Establish all RDMA connections */ - for (cid = 0; cid < sess->s.con_num; cid++) { - err = create_con(sess, cid); + for (cid = 0; cid < clt_path->s.con_num; cid++) { + err = create_con(clt_path, cid); if (err) goto destroy; - err = create_cm(to_clt_con(sess->s.con[cid])); + err = create_cm(to_clt_con(clt_path->s.con[cid])); if (err) { - destroy_con(to_clt_con(sess->s.con[cid])); + destroy_con(to_clt_con(clt_path->s.con[cid])); goto destroy; } } - err = alloc_sess_reqs(sess); + err = alloc_path_reqs(clt_path); if (err) goto destroy; - rtrs_start_hb(&sess->s); + rtrs_start_hb(&clt_path->s); return 0; destroy: while (cid--) { - struct rtrs_clt_con *con = to_clt_con(sess->s.con[cid]); + struct rtrs_clt_con *con = to_clt_con(clt_path->s.con[cid]); stop_cm(con); @@ -2367,7 +2380,7 @@ destroy: * doing rdma_resolve_addr(), switch to CONNECTION_ERR state * manually to keep reconnecting. */ - rtrs_clt_change_state_get_old(sess, RTRS_CLT_CONNECTING_ERR, NULL); + rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CONNECTING_ERR, NULL); return err; } @@ -2375,31 +2388,32 @@ destroy: static void rtrs_clt_info_req_done(struct ib_cq *cq, struct ib_wc *wc) { struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context); - struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); + struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); struct rtrs_iu *iu; iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe); - rtrs_iu_free(iu, sess->s.dev->ib_dev, 1); + rtrs_iu_free(iu, clt_path->s.dev->ib_dev, 1); if (wc->status != IB_WC_SUCCESS) { - rtrs_err(sess->clt, "Sess info request send failed: %s\n", + rtrs_err(clt_path->clt, "Path info request send failed: %s\n", ib_wc_status_msg(wc->status)); - rtrs_clt_change_state_get_old(sess, RTRS_CLT_CONNECTING_ERR, NULL); + rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CONNECTING_ERR, NULL); return; } rtrs_clt_update_wc_stats(con); } -static int process_info_rsp(struct rtrs_clt_sess *sess, +static int process_info_rsp(struct rtrs_clt_path *clt_path, const struct rtrs_msg_info_rsp *msg) { unsigned int sg_cnt, total_len; int i, sgi; sg_cnt = le16_to_cpu(msg->sg_cnt); - if (!sg_cnt || (sess->queue_depth % sg_cnt)) { - rtrs_err(sess->clt, "Incorrect sg_cnt %d, is not multiple\n", + if (!sg_cnt || (clt_path->queue_depth % sg_cnt)) { + rtrs_err(clt_path->clt, + "Incorrect sg_cnt %d, is not multiple\n", sg_cnt); return -EINVAL; } @@ -2408,15 +2422,15 @@ static int process_info_rsp(struct rtrs_clt_sess *sess, * Check if IB immediate data size is enough to hold the mem_id and * the offset inside the memory chunk. */ - if ((ilog2(sg_cnt - 1) + 1) + (ilog2(sess->chunk_size - 1) + 1) > + if ((ilog2(sg_cnt - 1) + 1) + (ilog2(clt_path->chunk_size - 1) + 1) > MAX_IMM_PAYL_BITS) { - rtrs_err(sess->clt, + rtrs_err(clt_path->clt, "RDMA immediate size (%db) not enough to encode %d buffers of size %dB\n", - MAX_IMM_PAYL_BITS, sg_cnt, sess->chunk_size); + MAX_IMM_PAYL_BITS, sg_cnt, clt_path->chunk_size); return -EINVAL; } total_len = 0; - for (sgi = 0, i = 0; sgi < sg_cnt && i < sess->queue_depth; sgi++) { + for (sgi = 0, i = 0; sgi < sg_cnt && i < clt_path->queue_depth; sgi++) { const struct rtrs_sg_desc *desc = &msg->desc[sgi]; u32 len, rkey; u64 addr; @@ -2427,26 +2441,28 @@ static int process_info_rsp(struct rtrs_clt_sess *sess, total_len += len; - if (!len || (len % sess->chunk_size)) { - rtrs_err(sess->clt, "Incorrect [%d].len %d\n", sgi, + if (!len || (len % clt_path->chunk_size)) { + rtrs_err(clt_path->clt, "Incorrect [%d].len %d\n", + sgi, len); return -EINVAL; } - for ( ; len && i < sess->queue_depth; i++) { - sess->rbufs[i].addr = addr; - sess->rbufs[i].rkey = rkey; + for ( ; len && i < clt_path->queue_depth; i++) { + clt_path->rbufs[i].addr = addr; + clt_path->rbufs[i].rkey = rkey; - len -= sess->chunk_size; - addr += sess->chunk_size; + len -= clt_path->chunk_size; + addr += clt_path->chunk_size; } } /* Sanity check */ - if (sgi != sg_cnt || i != sess->queue_depth) { - rtrs_err(sess->clt, "Incorrect sg vector, not fully mapped\n"); + if (sgi != sg_cnt || i != clt_path->queue_depth) { + rtrs_err(clt_path->clt, + "Incorrect sg vector, not fully mapped\n"); return -EINVAL; } - if (total_len != sess->chunk_size * sess->queue_depth) { - rtrs_err(sess->clt, "Incorrect total_len %d\n", total_len); + if (total_len != clt_path->chunk_size * clt_path->queue_depth) { + rtrs_err(clt_path->clt, "Incorrect total_len %d\n", total_len); return -EINVAL; } @@ -2456,7 +2472,7 @@ static int process_info_rsp(struct rtrs_clt_sess *sess, static void rtrs_clt_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc) { struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context); - struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); + struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); struct rtrs_msg_info_rsp *msg; enum rtrs_clt_state state; struct rtrs_iu *iu; @@ -2468,37 +2484,37 @@ static void rtrs_clt_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc) WARN_ON(con->c.cid); iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe); if (wc->status != IB_WC_SUCCESS) { - rtrs_err(sess->clt, "Sess info response recv failed: %s\n", + rtrs_err(clt_path->clt, "Path info response recv failed: %s\n", ib_wc_status_msg(wc->status)); goto out; } WARN_ON(wc->opcode != IB_WC_RECV); if (wc->byte_len < sizeof(*msg)) { - rtrs_err(sess->clt, "Sess info response is malformed: size %d\n", + rtrs_err(clt_path->clt, "Path info response is malformed: size %d\n", wc->byte_len); goto out; } - ib_dma_sync_single_for_cpu(sess->s.dev->ib_dev, iu->dma_addr, + ib_dma_sync_single_for_cpu(clt_path->s.dev->ib_dev, iu->dma_addr, iu->size, DMA_FROM_DEVICE); msg = iu->buf; if (le16_to_cpu(msg->type) != RTRS_MSG_INFO_RSP) { - rtrs_err(sess->clt, "Sess info response is malformed: type %d\n", + rtrs_err(clt_path->clt, "Path info response is malformed: type %d\n", le16_to_cpu(msg->type)); goto out; } rx_sz = sizeof(*msg); rx_sz += sizeof(msg->desc[0]) * le16_to_cpu(msg->sg_cnt); if (wc->byte_len < rx_sz) { - rtrs_err(sess->clt, "Sess info response is malformed: size %d\n", + rtrs_err(clt_path->clt, "Path info response is malformed: size %d\n", wc->byte_len); goto out; } - err = process_info_rsp(sess, msg); + err = process_info_rsp(clt_path, msg); if (err) goto out; - err = post_recv_sess(sess); + err = post_recv_path(clt_path); if (err) goto out; @@ -2506,25 +2522,25 @@ static void rtrs_clt_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc) out: rtrs_clt_update_wc_stats(con); - rtrs_iu_free(iu, sess->s.dev->ib_dev, 1); - rtrs_clt_change_state_get_old(sess, state, NULL); + rtrs_iu_free(iu, clt_path->s.dev->ib_dev, 1); + rtrs_clt_change_state_get_old(clt_path, state, NULL); } -static int rtrs_send_sess_info(struct rtrs_clt_sess *sess) +static int rtrs_send_path_info(struct rtrs_clt_path *clt_path) { - struct rtrs_clt_con *usr_con = to_clt_con(sess->s.con[0]); + struct rtrs_clt_con *usr_con = to_clt_con(clt_path->s.con[0]); struct rtrs_msg_info_req *msg; struct rtrs_iu *tx_iu, *rx_iu; size_t rx_sz; int err; rx_sz = sizeof(struct rtrs_msg_info_rsp); - rx_sz += sizeof(struct rtrs_sg_desc) * sess->queue_depth; + rx_sz += sizeof(struct rtrs_sg_desc) * clt_path->queue_depth; tx_iu = rtrs_iu_alloc(1, sizeof(struct rtrs_msg_info_req), GFP_KERNEL, - sess->s.dev->ib_dev, DMA_TO_DEVICE, + clt_path->s.dev->ib_dev, DMA_TO_DEVICE, rtrs_clt_info_req_done); - rx_iu = rtrs_iu_alloc(1, rx_sz, GFP_KERNEL, sess->s.dev->ib_dev, + rx_iu = rtrs_iu_alloc(1, rx_sz, GFP_KERNEL, clt_path->s.dev->ib_dev, DMA_FROM_DEVICE, rtrs_clt_info_rsp_done); if (!tx_iu || !rx_iu) { err = -ENOMEM; @@ -2533,33 +2549,34 @@ static int rtrs_send_sess_info(struct rtrs_clt_sess *sess) /* Prepare for getting info response */ err = rtrs_iu_post_recv(&usr_con->c, rx_iu); if (err) { - rtrs_err(sess->clt, "rtrs_iu_post_recv(), err: %d\n", err); + rtrs_err(clt_path->clt, "rtrs_iu_post_recv(), err: %d\n", err); goto out; } rx_iu = NULL; msg = tx_iu->buf; msg->type = cpu_to_le16(RTRS_MSG_INFO_REQ); - memcpy(msg->sessname, sess->s.sessname, sizeof(msg->sessname)); + memcpy(msg->pathname, clt_path->s.sessname, sizeof(msg->pathname)); - ib_dma_sync_single_for_device(sess->s.dev->ib_dev, tx_iu->dma_addr, + ib_dma_sync_single_for_device(clt_path->s.dev->ib_dev, + tx_iu->dma_addr, tx_iu->size, DMA_TO_DEVICE); /* Send info request */ err = rtrs_iu_post_send(&usr_con->c, tx_iu, sizeof(*msg), NULL); if (err) { - rtrs_err(sess->clt, "rtrs_iu_post_send(), err: %d\n", err); + rtrs_err(clt_path->clt, "rtrs_iu_post_send(), err: %d\n", err); goto out; } tx_iu = NULL; /* Wait for state change */ - wait_event_interruptible_timeout(sess->state_wq, - sess->state != RTRS_CLT_CONNECTING, + wait_event_interruptible_timeout(clt_path->state_wq, + clt_path->state != RTRS_CLT_CONNECTING, msecs_to_jiffies( RTRS_CONNECT_TIMEOUT_MS)); - if (READ_ONCE(sess->state) != RTRS_CLT_CONNECTED) { - if (READ_ONCE(sess->state) == RTRS_CLT_CONNECTING_ERR) + if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED) { + if (READ_ONCE(clt_path->state) == RTRS_CLT_CONNECTING_ERR) err = -ECONNRESET; else err = -ETIMEDOUT; @@ -2567,82 +2584,82 @@ static int rtrs_send_sess_info(struct rtrs_clt_sess *sess) out: if (tx_iu) - rtrs_iu_free(tx_iu, sess->s.dev->ib_dev, 1); + rtrs_iu_free(tx_iu, clt_path->s.dev->ib_dev, 1); if (rx_iu) - rtrs_iu_free(rx_iu, sess->s.dev->ib_dev, 1); + rtrs_iu_free(rx_iu, clt_path->s.dev->ib_dev, 1); if (err) /* If we've never taken async path because of malloc problems */ - rtrs_clt_change_state_get_old(sess, RTRS_CLT_CONNECTING_ERR, NULL); + rtrs_clt_change_state_get_old(clt_path, + RTRS_CLT_CONNECTING_ERR, NULL); return err; } /** - * init_sess() - establishes all session connections and does handshake - * @sess: client session. + * init_path() - establishes all path connections and does handshake + * @clt_path: client path. * In case of error full close or reconnect procedure should be taken, * because reconnect or close async works can be started. */ -static int init_sess(struct rtrs_clt_sess *sess) +static int init_path(struct rtrs_clt_path *clt_path) { int err; char str[NAME_MAX]; struct rtrs_addr path = { - .src = &sess->s.src_addr, - .dst = &sess->s.dst_addr, + .src = &clt_path->s.src_addr, + .dst = &clt_path->s.dst_addr, }; rtrs_addr_to_str(&path, str, sizeof(str)); - mutex_lock(&sess->init_mutex); - err = init_conns(sess); + mutex_lock(&clt_path->init_mutex); + err = init_conns(clt_path); if (err) { - rtrs_err(sess->clt, + rtrs_err(clt_path->clt, "init_conns() failed: err=%d path=%s [%s:%u]\n", err, - str, sess->hca_name, sess->hca_port); + str, clt_path->hca_name, clt_path->hca_port); goto out; } - err = rtrs_send_sess_info(sess); + err = rtrs_send_path_info(clt_path); if (err) { - rtrs_err( - sess->clt, - "rtrs_send_sess_info() failed: err=%d path=%s [%s:%u]\n", - err, str, sess->hca_name, sess->hca_port); + rtrs_err(clt_path->clt, + "rtrs_send_path_info() failed: err=%d path=%s [%s:%u]\n", + err, str, clt_path->hca_name, clt_path->hca_port); goto out; } - rtrs_clt_sess_up(sess); + rtrs_clt_path_up(clt_path); out: - mutex_unlock(&sess->init_mutex); + mutex_unlock(&clt_path->init_mutex); return err; } static void rtrs_clt_reconnect_work(struct work_struct *work) { - struct rtrs_clt_sess *sess; - struct rtrs_clt *clt; + struct rtrs_clt_path *clt_path; + struct rtrs_clt_sess *clt; unsigned int delay_ms; int err; - sess = container_of(to_delayed_work(work), struct rtrs_clt_sess, - reconnect_dwork); - clt = sess->clt; + clt_path = container_of(to_delayed_work(work), struct rtrs_clt_path, + reconnect_dwork); + clt = clt_path->clt; - if (READ_ONCE(sess->state) != RTRS_CLT_RECONNECTING) + if (READ_ONCE(clt_path->state) != RTRS_CLT_RECONNECTING) return; - if (sess->reconnect_attempts >= clt->max_reconnect_attempts) { - /* Close a session completely if max attempts is reached */ - rtrs_clt_close_conns(sess, false); + if (clt_path->reconnect_attempts >= clt->max_reconnect_attempts) { + /* Close a path completely if max attempts is reached */ + rtrs_clt_close_conns(clt_path, false); return; } - sess->reconnect_attempts++; + clt_path->reconnect_attempts++; /* Stop everything */ - rtrs_clt_stop_and_destroy_conns(sess); + rtrs_clt_stop_and_destroy_conns(clt_path); msleep(RTRS_RECONNECT_BACKOFF); - if (rtrs_clt_change_state_get_old(sess, RTRS_CLT_CONNECTING, NULL)) { - err = init_sess(sess); + if (rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_CONNECTING, NULL)) { + err = init_path(clt_path); if (err) goto reconnect_again; } @@ -2650,10 +2667,10 @@ static void rtrs_clt_reconnect_work(struct work_struct *work) return; reconnect_again: - if (rtrs_clt_change_state_get_old(sess, RTRS_CLT_RECONNECTING, NULL)) { - sess->stats->reconnects.fail_cnt++; + if (rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_RECONNECTING, NULL)) { + clt_path->stats->reconnects.fail_cnt++; delay_ms = clt->reconnect_delay_sec * 1000; - queue_delayed_work(rtrs_wq, &sess->reconnect_dwork, + queue_delayed_work(rtrs_wq, &clt_path->reconnect_dwork, msecs_to_jiffies(delay_ms + prandom_u32() % RTRS_RECONNECT_SEED)); @@ -2662,19 +2679,20 @@ reconnect_again: static void rtrs_clt_dev_release(struct device *dev) { - struct rtrs_clt *clt = container_of(dev, struct rtrs_clt, dev); + struct rtrs_clt_sess *clt = container_of(dev, struct rtrs_clt_sess, + dev); kfree(clt); } -static struct rtrs_clt *alloc_clt(const char *sessname, size_t paths_num, +static struct rtrs_clt_sess *alloc_clt(const char *sessname, size_t paths_num, u16 port, size_t pdu_sz, void *priv, void (*link_ev)(void *priv, enum rtrs_clt_link_ev ev), unsigned int reconnect_delay_sec, unsigned int max_reconnect_attempts) { - struct rtrs_clt *clt; + struct rtrs_clt_sess *clt; int err; if (!paths_num || paths_num > MAX_PATHS_NUM) @@ -2749,7 +2767,7 @@ err: return ERR_PTR(err); } -static void free_clt(struct rtrs_clt *clt) +static void free_clt(struct rtrs_clt_sess *clt) { free_permits(clt); free_percpu(clt->pcpu_path); @@ -2760,7 +2778,7 @@ static void free_clt(struct rtrs_clt *clt) } /** - * rtrs_clt_open() - Open a session to an RTRS server + * rtrs_clt_open() - Open a path to an RTRS server * @ops: holds the link event callback and the private pointer. * @sessname: name of the session * @paths: Paths to be established defined by their src and dst addresses @@ -2777,18 +2795,24 @@ static void free_clt(struct rtrs_clt *clt) * * Return a valid pointer on success otherwise PTR_ERR. */ -struct rtrs_clt *rtrs_clt_open(struct rtrs_clt_ops *ops, - const char *sessname, +struct rtrs_clt_sess *rtrs_clt_open(struct rtrs_clt_ops *ops, + const char *pathname, const struct rtrs_addr *paths, size_t paths_num, u16 port, size_t pdu_sz, u8 reconnect_delay_sec, s16 max_reconnect_attempts, u32 nr_poll_queues) { - struct rtrs_clt_sess *sess, *tmp; - struct rtrs_clt *clt; + struct rtrs_clt_path *clt_path, *tmp; + struct rtrs_clt_sess *clt; int err, i; - clt = alloc_clt(sessname, paths_num, port, pdu_sz, ops->priv, + if (strchr(pathname, '/') || strchr(pathname, '.')) { + pr_err("pathname cannot contain / and .\n"); + err = -EINVAL; + goto out; + } + + clt = alloc_clt(pathname, paths_num, port, pdu_sz, ops->priv, ops->link_ev, reconnect_delay_sec, max_reconnect_attempts); @@ -2797,49 +2821,49 @@ struct rtrs_clt *rtrs_clt_open(struct rtrs_clt_ops *ops, goto out; } for (i = 0; i < paths_num; i++) { - struct rtrs_clt_sess *sess; + struct rtrs_clt_path *clt_path; - sess = alloc_sess(clt, &paths[i], nr_cpu_ids, + clt_path = alloc_path(clt, &paths[i], nr_cpu_ids, nr_poll_queues); - if (IS_ERR(sess)) { - err = PTR_ERR(sess); - goto close_all_sess; + if (IS_ERR(clt_path)) { + err = PTR_ERR(clt_path); + goto close_all_path; } if (!i) - sess->for_new_clt = 1; - list_add_tail_rcu(&sess->s.entry, &clt->paths_list); + clt_path->for_new_clt = 1; + list_add_tail_rcu(&clt_path->s.entry, &clt->paths_list); - err = init_sess(sess); + err = init_path(clt_path); if (err) { - list_del_rcu(&sess->s.entry); - rtrs_clt_close_conns(sess, true); - free_percpu(sess->stats->pcpu_stats); - kfree(sess->stats); - free_sess(sess); - goto close_all_sess; + list_del_rcu(&clt_path->s.entry); + rtrs_clt_close_conns(clt_path, true); + free_percpu(clt_path->stats->pcpu_stats); + kfree(clt_path->stats); + free_path(clt_path); + goto close_all_path; } - err = rtrs_clt_create_sess_files(sess); + err = rtrs_clt_create_path_files(clt_path); if (err) { - list_del_rcu(&sess->s.entry); - rtrs_clt_close_conns(sess, true); - free_percpu(sess->stats->pcpu_stats); - kfree(sess->stats); - free_sess(sess); - goto close_all_sess; + list_del_rcu(&clt_path->s.entry); + rtrs_clt_close_conns(clt_path, true); + free_percpu(clt_path->stats->pcpu_stats); + kfree(clt_path->stats); + free_path(clt_path); + goto close_all_path; } } err = alloc_permits(clt); if (err) - goto close_all_sess; + goto close_all_path; return clt; -close_all_sess: - list_for_each_entry_safe(sess, tmp, &clt->paths_list, s.entry) { - rtrs_clt_destroy_sess_files(sess, NULL); - rtrs_clt_close_conns(sess, true); - kobject_put(&sess->kobj); +close_all_path: + list_for_each_entry_safe(clt_path, tmp, &clt->paths_list, s.entry) { + rtrs_clt_destroy_path_files(clt_path, NULL); + rtrs_clt_close_conns(clt_path, true); + kobject_put(&clt_path->kobj); } rtrs_clt_destroy_sysfs_root(clt); free_clt(clt); @@ -2850,37 +2874,38 @@ out: EXPORT_SYMBOL(rtrs_clt_open); /** - * rtrs_clt_close() - Close a session + * rtrs_clt_close() - Close a path * @clt: Session handle. Session is freed upon return. */ -void rtrs_clt_close(struct rtrs_clt *clt) +void rtrs_clt_close(struct rtrs_clt_sess *clt) { - struct rtrs_clt_sess *sess, *tmp; + struct rtrs_clt_path *clt_path, *tmp; /* Firstly forbid sysfs access */ rtrs_clt_destroy_sysfs_root(clt); /* Now it is safe to iterate over all paths without locks */ - list_for_each_entry_safe(sess, tmp, &clt->paths_list, s.entry) { - rtrs_clt_close_conns(sess, true); - rtrs_clt_destroy_sess_files(sess, NULL); - kobject_put(&sess->kobj); + list_for_each_entry_safe(clt_path, tmp, &clt->paths_list, s.entry) { + rtrs_clt_close_conns(clt_path, true); + rtrs_clt_destroy_path_files(clt_path, NULL); + kobject_put(&clt_path->kobj); } free_clt(clt); } EXPORT_SYMBOL(rtrs_clt_close); -int rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_sess *sess) +int rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_path *clt_path) { enum rtrs_clt_state old_state; int err = -EBUSY; bool changed; - changed = rtrs_clt_change_state_get_old(sess, RTRS_CLT_RECONNECTING, + changed = rtrs_clt_change_state_get_old(clt_path, + RTRS_CLT_RECONNECTING, &old_state); if (changed) { - sess->reconnect_attempts = 0; - queue_delayed_work(rtrs_wq, &sess->reconnect_dwork, 0); + clt_path->reconnect_attempts = 0; + queue_delayed_work(rtrs_wq, &clt_path->reconnect_dwork, 0); } if (changed || old_state == RTRS_CLT_RECONNECTING) { /* @@ -2888,15 +2913,15 @@ int rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_sess *sess) * execution, so do the flush if we have queued something * right now or work is pending. */ - flush_delayed_work(&sess->reconnect_dwork); - err = (READ_ONCE(sess->state) == + flush_delayed_work(&clt_path->reconnect_dwork); + err = (READ_ONCE(clt_path->state) == RTRS_CLT_CONNECTED ? 0 : -ENOTCONN); } return err; } -int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_sess *sess, +int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_path *clt_path, const struct attribute *sysfs_self) { enum rtrs_clt_state old_state; @@ -2912,27 +2937,27 @@ int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_sess *sess, * removing the path. */ do { - rtrs_clt_close_conns(sess, true); - changed = rtrs_clt_change_state_get_old(sess, + rtrs_clt_close_conns(clt_path, true); + changed = rtrs_clt_change_state_get_old(clt_path, RTRS_CLT_DEAD, &old_state); } while (!changed && old_state != RTRS_CLT_DEAD); if (changed) { - rtrs_clt_remove_path_from_arr(sess); - rtrs_clt_destroy_sess_files(sess, sysfs_self); - kobject_put(&sess->kobj); + rtrs_clt_remove_path_from_arr(clt_path); + rtrs_clt_destroy_path_files(clt_path, sysfs_self); + kobject_put(&clt_path->kobj); } return 0; } -void rtrs_clt_set_max_reconnect_attempts(struct rtrs_clt *clt, int value) +void rtrs_clt_set_max_reconnect_attempts(struct rtrs_clt_sess *clt, int value) { clt->max_reconnect_attempts = (unsigned int)value; } -int rtrs_clt_get_max_reconnect_attempts(const struct rtrs_clt *clt) +int rtrs_clt_get_max_reconnect_attempts(const struct rtrs_clt_sess *clt) { return (int)clt->max_reconnect_attempts; } @@ -2962,12 +2987,12 @@ int rtrs_clt_get_max_reconnect_attempts(const struct rtrs_clt *clt) * On dir=WRITE rtrs client will rdma write data in sg to server side. */ int rtrs_clt_request(int dir, struct rtrs_clt_req_ops *ops, - struct rtrs_clt *clt, struct rtrs_permit *permit, - const struct kvec *vec, size_t nr, size_t data_len, - struct scatterlist *sg, unsigned int sg_cnt) + struct rtrs_clt_sess *clt, struct rtrs_permit *permit, + const struct kvec *vec, size_t nr, size_t data_len, + struct scatterlist *sg, unsigned int sg_cnt) { struct rtrs_clt_io_req *req; - struct rtrs_clt_sess *sess; + struct rtrs_clt_path *clt_path; enum dma_data_direction dma_dir; int err = -ECONNABORTED, i; @@ -2989,19 +3014,19 @@ int rtrs_clt_request(int dir, struct rtrs_clt_req_ops *ops, rcu_read_lock(); for (path_it_init(&it, clt); - (sess = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) { - if (READ_ONCE(sess->state) != RTRS_CLT_CONNECTED) + (clt_path = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) { + if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED) continue; - if (usr_len + hdr_len > sess->max_hdr_size) { - rtrs_wrn_rl(sess->clt, + if (usr_len + hdr_len > clt_path->max_hdr_size) { + rtrs_wrn_rl(clt_path->clt, "%s request failed, user message size is %zu and header length %zu, but max size is %u\n", dir == READ ? "Read" : "Write", - usr_len, hdr_len, sess->max_hdr_size); + usr_len, hdr_len, clt_path->max_hdr_size); err = -EMSGSIZE; break; } - req = rtrs_clt_get_req(sess, ops->conf_fn, permit, ops->priv, + req = rtrs_clt_get_req(clt_path, ops->conf_fn, permit, ops->priv, vec, usr_len, sg, sg_cnt, data_len, dma_dir); if (dir == READ) @@ -3022,21 +3047,21 @@ int rtrs_clt_request(int dir, struct rtrs_clt_req_ops *ops, } EXPORT_SYMBOL(rtrs_clt_request); -int rtrs_clt_rdma_cq_direct(struct rtrs_clt *clt, unsigned int index) +int rtrs_clt_rdma_cq_direct(struct rtrs_clt_sess *clt, unsigned int index) { /* If no path, return -1 for block layer not to try again */ int cnt = -1; struct rtrs_con *con; - struct rtrs_clt_sess *sess; + struct rtrs_clt_path *clt_path; struct path_it it; rcu_read_lock(); for (path_it_init(&it, clt); - (sess = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) { - if (READ_ONCE(sess->state) != RTRS_CLT_CONNECTED) + (clt_path = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) { + if (READ_ONCE(clt_path->state) != RTRS_CLT_CONNECTED) continue; - con = sess->s.con[index + 1]; + con = clt_path->s.con[index + 1]; cnt = ib_process_cq_direct(con->cq, -1); if (cnt) break; @@ -3056,7 +3081,7 @@ EXPORT_SYMBOL(rtrs_clt_rdma_cq_direct); * 0 on success * -ECOMM no connection to the server */ -int rtrs_clt_query(struct rtrs_clt *clt, struct rtrs_attrs *attr) +int rtrs_clt_query(struct rtrs_clt_sess *clt, struct rtrs_attrs *attr) { if (!rtrs_clt_is_connected(clt)) return -ECOMM; @@ -3071,15 +3096,15 @@ int rtrs_clt_query(struct rtrs_clt *clt, struct rtrs_attrs *attr) } EXPORT_SYMBOL(rtrs_clt_query); -int rtrs_clt_create_path_from_sysfs(struct rtrs_clt *clt, +int rtrs_clt_create_path_from_sysfs(struct rtrs_clt_sess *clt, struct rtrs_addr *addr) { - struct rtrs_clt_sess *sess; + struct rtrs_clt_path *clt_path; int err; - sess = alloc_sess(clt, addr, nr_cpu_ids, 0); - if (IS_ERR(sess)) - return PTR_ERR(sess); + clt_path = alloc_path(clt, addr, nr_cpu_ids, 0); + if (IS_ERR(clt_path)) + return PTR_ERR(clt_path); mutex_lock(&clt->paths_mutex); if (clt->paths_num == 0) { @@ -3088,7 +3113,7 @@ int rtrs_clt_create_path_from_sysfs(struct rtrs_clt *clt, * the addition of the first path is like a new session for * the storage server */ - sess->for_new_clt = 1; + clt_path->for_new_clt = 1; } mutex_unlock(&clt->paths_mutex); @@ -3098,24 +3123,24 @@ int rtrs_clt_create_path_from_sysfs(struct rtrs_clt *clt, * IO will never grab it. Also it is very important to add * path before init, since init fires LINK_CONNECTED event. */ - rtrs_clt_add_path_to_arr(sess); + rtrs_clt_add_path_to_arr(clt_path); - err = init_sess(sess); + err = init_path(clt_path); if (err) - goto close_sess; + goto close_path; - err = rtrs_clt_create_sess_files(sess); + err = rtrs_clt_create_path_files(clt_path); if (err) - goto close_sess; + goto close_path; return 0; -close_sess: - rtrs_clt_remove_path_from_arr(sess); - rtrs_clt_close_conns(sess, true); - free_percpu(sess->stats->pcpu_stats); - kfree(sess->stats); - free_sess(sess); +close_path: + rtrs_clt_remove_path_from_arr(clt_path); + rtrs_clt_close_conns(clt_path, true); + free_percpu(clt_path->stats->pcpu_stats); + kfree(clt_path->stats); + free_path(clt_path); return err; } diff --git a/drivers/infiniband/ulp/rtrs/rtrs-clt.h b/drivers/infiniband/ulp/rtrs/rtrs-clt.h index 9dc819885ec7..d1b18a154ae0 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs-clt.h +++ b/drivers/infiniband/ulp/rtrs/rtrs-clt.h @@ -124,9 +124,9 @@ struct rtrs_rbuf { u32 rkey; }; -struct rtrs_clt_sess { - struct rtrs_sess s; - struct rtrs_clt *clt; +struct rtrs_clt_path { + struct rtrs_path s; + struct rtrs_clt_sess *clt; wait_queue_head_t state_wq; enum rtrs_clt_state state; atomic_t connected_cnt; @@ -153,10 +153,10 @@ struct rtrs_clt_sess { *mp_skip_entry; }; -struct rtrs_clt { +struct rtrs_clt_sess { struct list_head paths_list; /* rcu protected list */ size_t paths_num; - struct rtrs_clt_sess + struct rtrs_clt_path __rcu * __percpu *pcpu_path; uuid_t paths_uuid; int paths_up; @@ -186,31 +186,32 @@ static inline struct rtrs_clt_con *to_clt_con(struct rtrs_con *c) return container_of(c, struct rtrs_clt_con, c); } -static inline struct rtrs_clt_sess *to_clt_sess(struct rtrs_sess *s) +static inline struct rtrs_clt_path *to_clt_path(struct rtrs_path *s) { - return container_of(s, struct rtrs_clt_sess, s); + return container_of(s, struct rtrs_clt_path, s); } -static inline int permit_size(struct rtrs_clt *clt) +static inline int permit_size(struct rtrs_clt_sess *clt) { return sizeof(struct rtrs_permit) + clt->pdu_sz; } -static inline struct rtrs_permit *get_permit(struct rtrs_clt *clt, int idx) +static inline struct rtrs_permit *get_permit(struct rtrs_clt_sess *clt, + int idx) { return (struct rtrs_permit *)(clt->permits + permit_size(clt) * idx); } -int rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_sess *sess); -void rtrs_clt_close_conns(struct rtrs_clt_sess *sess, bool wait); -int rtrs_clt_create_path_from_sysfs(struct rtrs_clt *clt, +int rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_path *path); +void rtrs_clt_close_conns(struct rtrs_clt_path *clt_path, bool wait); +int rtrs_clt_create_path_from_sysfs(struct rtrs_clt_sess *clt, struct rtrs_addr *addr); -int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_sess *sess, +int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_path *path, const struct attribute *sysfs_self); -void rtrs_clt_set_max_reconnect_attempts(struct rtrs_clt *clt, int value); -int rtrs_clt_get_max_reconnect_attempts(const struct rtrs_clt *clt); -void free_sess(struct rtrs_clt_sess *sess); +void rtrs_clt_set_max_reconnect_attempts(struct rtrs_clt_sess *clt, int value); +int rtrs_clt_get_max_reconnect_attempts(const struct rtrs_clt_sess *clt); +void free_path(struct rtrs_clt_path *clt_path); /* rtrs-clt-stats.c */ @@ -224,27 +225,26 @@ void rtrs_clt_update_all_stats(struct rtrs_clt_io_req *req, int dir); int rtrs_clt_reset_rdma_lat_distr_stats(struct rtrs_clt_stats *stats, bool enable); ssize_t rtrs_clt_stats_rdma_lat_distr_to_str(struct rtrs_clt_stats *stats, - char *page, size_t len); + char *page); int rtrs_clt_reset_cpu_migr_stats(struct rtrs_clt_stats *stats, bool enable); -int rtrs_clt_stats_migration_cnt_to_str(struct rtrs_clt_stats *stats, char *buf, - size_t len); +int rtrs_clt_stats_migration_from_cnt_to_str(struct rtrs_clt_stats *stats, char *buf); +int rtrs_clt_stats_migration_to_cnt_to_str(struct rtrs_clt_stats *stats, char *buf); int rtrs_clt_reset_reconnects_stat(struct rtrs_clt_stats *stats, bool enable); -int rtrs_clt_stats_reconnects_to_str(struct rtrs_clt_stats *stats, char *buf, - size_t len); +int rtrs_clt_stats_reconnects_to_str(struct rtrs_clt_stats *stats, char *buf); int rtrs_clt_reset_rdma_stats(struct rtrs_clt_stats *stats, bool enable); ssize_t rtrs_clt_stats_rdma_to_str(struct rtrs_clt_stats *stats, - char *page, size_t len); + char *page); int rtrs_clt_reset_all_stats(struct rtrs_clt_stats *stats, bool enable); ssize_t rtrs_clt_reset_all_help(struct rtrs_clt_stats *stats, - char *page, size_t len); + char *page); /* rtrs-clt-sysfs.c */ -int rtrs_clt_create_sysfs_root_files(struct rtrs_clt *clt); -void rtrs_clt_destroy_sysfs_root(struct rtrs_clt *clt); +int rtrs_clt_create_sysfs_root_files(struct rtrs_clt_sess *clt); +void rtrs_clt_destroy_sysfs_root(struct rtrs_clt_sess *clt); -int rtrs_clt_create_sess_files(struct rtrs_clt_sess *sess); -void rtrs_clt_destroy_sess_files(struct rtrs_clt_sess *sess, +int rtrs_clt_create_path_files(struct rtrs_clt_path *clt_path); +void rtrs_clt_destroy_path_files(struct rtrs_clt_path *clt_path, const struct attribute *sysfs_self); #endif /* RTRS_CLT_H */ diff --git a/drivers/infiniband/ulp/rtrs/rtrs-pri.h b/drivers/infiniband/ulp/rtrs/rtrs-pri.h index d12ddfa50747..9a1e5c2ae55c 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs-pri.h +++ b/drivers/infiniband/ulp/rtrs/rtrs-pri.h @@ -90,7 +90,7 @@ struct rtrs_ib_dev { }; struct rtrs_con { - struct rtrs_sess *sess; + struct rtrs_path *path; struct ib_qp *qp; struct ib_cq *cq; struct rdma_cm_id *cm_id; @@ -100,7 +100,7 @@ struct rtrs_con { atomic_t sq_wr_avail; }; -struct rtrs_sess { +struct rtrs_path { struct list_head entry; struct sockaddr_storage dst_addr; struct sockaddr_storage src_addr; @@ -229,11 +229,11 @@ struct rtrs_msg_conn_rsp { /** * struct rtrs_msg_info_req * @type: @RTRS_MSG_INFO_REQ - * @sessname: Session name chosen by client + * @pathname: Path name chosen by client */ struct rtrs_msg_info_req { __le16 type; - u8 sessname[NAME_MAX]; + u8 pathname[NAME_MAX]; u8 reserved[15]; }; @@ -313,19 +313,19 @@ int rtrs_iu_post_rdma_write_imm(struct rtrs_con *con, struct rtrs_iu *iu, int rtrs_post_recv_empty(struct rtrs_con *con, struct ib_cqe *cqe); -int rtrs_cq_qp_create(struct rtrs_sess *sess, struct rtrs_con *con, +int rtrs_cq_qp_create(struct rtrs_path *path, struct rtrs_con *con, u32 max_send_sge, int cq_vector, int nr_cqe, u32 max_send_wr, u32 max_recv_wr, enum ib_poll_context poll_ctx); void rtrs_cq_qp_destroy(struct rtrs_con *con); -void rtrs_init_hb(struct rtrs_sess *sess, struct ib_cqe *cqe, +void rtrs_init_hb(struct rtrs_path *path, struct ib_cqe *cqe, unsigned int interval_ms, unsigned int missed_max, void (*err_handler)(struct rtrs_con *con), struct workqueue_struct *wq); -void rtrs_start_hb(struct rtrs_sess *sess); -void rtrs_stop_hb(struct rtrs_sess *sess); -void rtrs_send_hb_ack(struct rtrs_sess *sess); +void rtrs_start_hb(struct rtrs_path *path); +void rtrs_stop_hb(struct rtrs_path *path); +void rtrs_send_hb_ack(struct rtrs_path *path); void rtrs_rdma_dev_pd_init(enum ib_pd_flags pd_flags, struct rtrs_rdma_dev_pd *pool); @@ -398,7 +398,7 @@ static ssize_t get_value##_show(struct kobject *kobj, \ { \ type *stats = container_of(kobj, type, kobj_stats); \ \ - return print(stats, page, PAGE_SIZE); \ + return print(stats, page); \ } #define STAT_ATTR(type, stat, print, reset) \ diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv-stats.c b/drivers/infiniband/ulp/rtrs/rtrs-srv-stats.c index 12c374b5eb6e..44b1c1652131 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs-srv-stats.c +++ b/drivers/infiniband/ulp/rtrs/rtrs-srv-stats.c @@ -23,8 +23,7 @@ int rtrs_srv_reset_rdma_stats(struct rtrs_srv_stats *stats, bool enable) return -EINVAL; } -ssize_t rtrs_srv_stats_rdma_to_str(struct rtrs_srv_stats *stats, - char *page, size_t len) +ssize_t rtrs_srv_stats_rdma_to_str(struct rtrs_srv_stats *stats, char *page) { struct rtrs_srv_stats_rdma_stats *r = &stats->rdma_stats; diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c index 20efd44297fb..b94ae12c2795 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c +++ b/drivers/infiniband/ulp/rtrs/rtrs-srv-sysfs.c @@ -15,10 +15,10 @@ static void rtrs_srv_release(struct kobject *kobj) { - struct rtrs_srv_sess *sess; + struct rtrs_srv_path *srv_path; - sess = container_of(kobj, struct rtrs_srv_sess, kobj); - kfree(sess); + srv_path = container_of(kobj, struct rtrs_srv_path, kobj); + kfree(srv_path); } static struct kobj_type ktype = { @@ -36,24 +36,25 @@ static ssize_t rtrs_srv_disconnect_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { - struct rtrs_srv_sess *sess; - struct rtrs_sess *s; + struct rtrs_srv_path *srv_path; + struct rtrs_path *s; char str[MAXHOSTNAMELEN]; - sess = container_of(kobj, struct rtrs_srv_sess, kobj); - s = &sess->s; + srv_path = container_of(kobj, struct rtrs_srv_path, kobj); + s = &srv_path->s; if (!sysfs_streq(buf, "1")) { rtrs_err(s, "%s: invalid value: '%s'\n", attr->attr.name, buf); return -EINVAL; } - sockaddr_to_str((struct sockaddr *)&sess->s.dst_addr, str, sizeof(str)); + sockaddr_to_str((struct sockaddr *)&srv_path->s.dst_addr, str, + sizeof(str)); rtrs_info(s, "disconnect for path %s requested\n", str); /* first remove sysfs itself to avoid deadlock */ - sysfs_remove_file_self(&sess->kobj, &attr->attr); - close_sess(sess); + sysfs_remove_file_self(&srv_path->kobj, &attr->attr); + close_path(srv_path); return count; } @@ -66,11 +67,11 @@ static ssize_t rtrs_srv_hca_port_show(struct kobject *kobj, struct kobj_attribute *attr, char *page) { - struct rtrs_srv_sess *sess; + struct rtrs_srv_path *srv_path; struct rtrs_con *usr_con; - sess = container_of(kobj, typeof(*sess), kobj); - usr_con = sess->s.con[0]; + srv_path = container_of(kobj, typeof(*srv_path), kobj); + usr_con = srv_path->s.con[0]; return sysfs_emit(page, "%u\n", usr_con->cm_id->port_num); } @@ -82,11 +83,11 @@ static ssize_t rtrs_srv_hca_name_show(struct kobject *kobj, struct kobj_attribute *attr, char *page) { - struct rtrs_srv_sess *sess; + struct rtrs_srv_path *srv_path; - sess = container_of(kobj, struct rtrs_srv_sess, kobj); + srv_path = container_of(kobj, struct rtrs_srv_path, kobj); - return sysfs_emit(page, "%s\n", sess->s.dev->ib_dev->name); + return sysfs_emit(page, "%s\n", srv_path->s.dev->ib_dev->name); } static struct kobj_attribute rtrs_srv_hca_name_attr = @@ -96,13 +97,13 @@ static ssize_t rtrs_srv_src_addr_show(struct kobject *kobj, struct kobj_attribute *attr, char *page) { - struct rtrs_srv_sess *sess; + struct rtrs_srv_path *srv_path; int cnt; - sess = container_of(kobj, struct rtrs_srv_sess, kobj); - cnt = sockaddr_to_str((struct sockaddr *)&sess->s.dst_addr, + srv_path = container_of(kobj, struct rtrs_srv_path, kobj); + cnt = sockaddr_to_str((struct sockaddr *)&srv_path->s.dst_addr, page, PAGE_SIZE); - return cnt + scnprintf(page + cnt, PAGE_SIZE - cnt, "\n"); + return cnt + sysfs_emit_at(page, cnt, "\n"); } static struct kobj_attribute rtrs_srv_src_addr_attr = @@ -112,11 +113,11 @@ static ssize_t rtrs_srv_dst_addr_show(struct kobject *kobj, struct kobj_attribute *attr, char *page) { - struct rtrs_srv_sess *sess; + struct rtrs_srv_path *srv_path; int len; - sess = container_of(kobj, struct rtrs_srv_sess, kobj); - len = sockaddr_to_str((struct sockaddr *)&sess->s.src_addr, page, + srv_path = container_of(kobj, struct rtrs_srv_path, kobj); + len = sockaddr_to_str((struct sockaddr *)&srv_path->s.src_addr, page, PAGE_SIZE); len += sysfs_emit_at(page, len, "\n"); return len; @@ -125,7 +126,7 @@ static ssize_t rtrs_srv_dst_addr_show(struct kobject *kobj, static struct kobj_attribute rtrs_srv_dst_addr_attr = __ATTR(dst_addr, 0444, rtrs_srv_dst_addr_show, NULL); -static struct attribute *rtrs_srv_sess_attrs[] = { +static struct attribute *rtrs_srv_path_attrs[] = { &rtrs_srv_hca_name_attr.attr, &rtrs_srv_hca_port_attr.attr, &rtrs_srv_src_addr_attr.attr, @@ -134,8 +135,8 @@ static struct attribute *rtrs_srv_sess_attrs[] = { NULL, }; -static const struct attribute_group rtrs_srv_sess_attr_group = { - .attrs = rtrs_srv_sess_attrs, +static const struct attribute_group rtrs_srv_path_attr_group = { + .attrs = rtrs_srv_path_attrs, }; STAT_ATTR(struct rtrs_srv_stats, rdma, @@ -151,9 +152,9 @@ static const struct attribute_group rtrs_srv_stats_attr_group = { .attrs = rtrs_srv_stats_attrs, }; -static int rtrs_srv_create_once_sysfs_root_folders(struct rtrs_srv_sess *sess) +static int rtrs_srv_create_once_sysfs_root_folders(struct rtrs_srv_path *srv_path) { - struct rtrs_srv *srv = sess->srv; + struct rtrs_srv_sess *srv = srv_path->srv; int err = 0; mutex_lock(&srv->paths_mutex); @@ -164,7 +165,7 @@ static int rtrs_srv_create_once_sysfs_root_folders(struct rtrs_srv_sess *sess) goto unlock; } srv->dev.class = rtrs_dev_class; - err = dev_set_name(&srv->dev, "%s", sess->s.sessname); + err = dev_set_name(&srv->dev, "%s", srv_path->s.sessname); if (err) goto unlock; @@ -196,9 +197,9 @@ unlock: } static void -rtrs_srv_destroy_once_sysfs_root_folders(struct rtrs_srv_sess *sess) +rtrs_srv_destroy_once_sysfs_root_folders(struct rtrs_srv_path *srv_path) { - struct rtrs_srv *srv = sess->srv; + struct rtrs_srv_sess *srv = srv_path->srv; mutex_lock(&srv->paths_mutex); if (!--srv->dev_ref) { @@ -213,7 +214,7 @@ rtrs_srv_destroy_once_sysfs_root_folders(struct rtrs_srv_sess *sess) } } -static void rtrs_srv_sess_stats_release(struct kobject *kobj) +static void rtrs_srv_path_stats_release(struct kobject *kobj) { struct rtrs_srv_stats *stats; @@ -224,22 +225,22 @@ static void rtrs_srv_sess_stats_release(struct kobject *kobj) static struct kobj_type ktype_stats = { .sysfs_ops = &kobj_sysfs_ops, - .release = rtrs_srv_sess_stats_release, + .release = rtrs_srv_path_stats_release, }; -static int rtrs_srv_create_stats_files(struct rtrs_srv_sess *sess) +static int rtrs_srv_create_stats_files(struct rtrs_srv_path *srv_path) { int err; - struct rtrs_sess *s = &sess->s; + struct rtrs_path *s = &srv_path->s; - err = kobject_init_and_add(&sess->stats->kobj_stats, &ktype_stats, - &sess->kobj, "stats"); + err = kobject_init_and_add(&srv_path->stats->kobj_stats, &ktype_stats, + &srv_path->kobj, "stats"); if (err) { rtrs_err(s, "kobject_init_and_add(): %d\n", err); - kobject_put(&sess->stats->kobj_stats); + kobject_put(&srv_path->stats->kobj_stats); return err; } - err = sysfs_create_group(&sess->stats->kobj_stats, + err = sysfs_create_group(&srv_path->stats->kobj_stats, &rtrs_srv_stats_attr_group); if (err) { rtrs_err(s, "sysfs_create_group(): %d\n", err); @@ -249,64 +250,64 @@ static int rtrs_srv_create_stats_files(struct rtrs_srv_sess *sess) return 0; err: - kobject_del(&sess->stats->kobj_stats); - kobject_put(&sess->stats->kobj_stats); + kobject_del(&srv_path->stats->kobj_stats); + kobject_put(&srv_path->stats->kobj_stats); return err; } -int rtrs_srv_create_sess_files(struct rtrs_srv_sess *sess) +int rtrs_srv_create_path_files(struct rtrs_srv_path *srv_path) { - struct rtrs_srv *srv = sess->srv; - struct rtrs_sess *s = &sess->s; + struct rtrs_srv_sess *srv = srv_path->srv; + struct rtrs_path *s = &srv_path->s; char str[NAME_MAX]; int err; struct rtrs_addr path = { - .src = &sess->s.dst_addr, - .dst = &sess->s.src_addr, + .src = &srv_path->s.dst_addr, + .dst = &srv_path->s.src_addr, }; rtrs_addr_to_str(&path, str, sizeof(str)); - err = rtrs_srv_create_once_sysfs_root_folders(sess); + err = rtrs_srv_create_once_sysfs_root_folders(srv_path); if (err) return err; - err = kobject_init_and_add(&sess->kobj, &ktype, srv->kobj_paths, + err = kobject_init_and_add(&srv_path->kobj, &ktype, srv->kobj_paths, "%s", str); if (err) { rtrs_err(s, "kobject_init_and_add(): %d\n", err); goto destroy_root; } - err = sysfs_create_group(&sess->kobj, &rtrs_srv_sess_attr_group); + err = sysfs_create_group(&srv_path->kobj, &rtrs_srv_path_attr_group); if (err) { rtrs_err(s, "sysfs_create_group(): %d\n", err); goto put_kobj; } - err = rtrs_srv_create_stats_files(sess); + err = rtrs_srv_create_stats_files(srv_path); if (err) goto remove_group; return 0; remove_group: - sysfs_remove_group(&sess->kobj, &rtrs_srv_sess_attr_group); + sysfs_remove_group(&srv_path->kobj, &rtrs_srv_path_attr_group); put_kobj: - kobject_del(&sess->kobj); + kobject_del(&srv_path->kobj); destroy_root: - kobject_put(&sess->kobj); - rtrs_srv_destroy_once_sysfs_root_folders(sess); + kobject_put(&srv_path->kobj); + rtrs_srv_destroy_once_sysfs_root_folders(srv_path); return err; } -void rtrs_srv_destroy_sess_files(struct rtrs_srv_sess *sess) +void rtrs_srv_destroy_path_files(struct rtrs_srv_path *srv_path) { - if (sess->kobj.state_in_sysfs) { - kobject_del(&sess->stats->kobj_stats); - kobject_put(&sess->stats->kobj_stats); - sysfs_remove_group(&sess->kobj, &rtrs_srv_sess_attr_group); - kobject_put(&sess->kobj); + if (srv_path->kobj.state_in_sysfs) { + kobject_del(&srv_path->stats->kobj_stats); + kobject_put(&srv_path->stats->kobj_stats); + sysfs_remove_group(&srv_path->kobj, &rtrs_srv_path_attr_group); + kobject_put(&srv_path->kobj); - rtrs_srv_destroy_once_sysfs_root_folders(sess); + rtrs_srv_destroy_once_sysfs_root_folders(srv_path); } } diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.c b/drivers/infiniband/ulp/rtrs/rtrs-srv.c index 716ef7b23558..24024bce2566 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs-srv.c +++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.c @@ -62,19 +62,19 @@ static inline struct rtrs_srv_con *to_srv_con(struct rtrs_con *c) return container_of(c, struct rtrs_srv_con, c); } -static inline struct rtrs_srv_sess *to_srv_sess(struct rtrs_sess *s) +static inline struct rtrs_srv_path *to_srv_path(struct rtrs_path *s) { - return container_of(s, struct rtrs_srv_sess, s); + return container_of(s, struct rtrs_srv_path, s); } -static bool rtrs_srv_change_state(struct rtrs_srv_sess *sess, +static bool rtrs_srv_change_state(struct rtrs_srv_path *srv_path, enum rtrs_srv_state new_state) { enum rtrs_srv_state old_state; bool changed = false; - spin_lock_irq(&sess->state_lock); - old_state = sess->state; + spin_lock_irq(&srv_path->state_lock); + old_state = srv_path->state; switch (new_state) { case RTRS_SRV_CONNECTED: if (old_state == RTRS_SRV_CONNECTING) @@ -93,8 +93,8 @@ static bool rtrs_srv_change_state(struct rtrs_srv_sess *sess, break; } if (changed) - sess->state = new_state; - spin_unlock_irq(&sess->state_lock); + srv_path->state = new_state; + spin_unlock_irq(&srv_path->state_lock); return changed; } @@ -106,16 +106,16 @@ static void free_id(struct rtrs_srv_op *id) kfree(id); } -static void rtrs_srv_free_ops_ids(struct rtrs_srv_sess *sess) +static void rtrs_srv_free_ops_ids(struct rtrs_srv_path *srv_path) { - struct rtrs_srv *srv = sess->srv; + struct rtrs_srv_sess *srv = srv_path->srv; int i; - if (sess->ops_ids) { + if (srv_path->ops_ids) { for (i = 0; i < srv->queue_depth; i++) - free_id(sess->ops_ids[i]); - kfree(sess->ops_ids); - sess->ops_ids = NULL; + free_id(srv_path->ops_ids[i]); + kfree(srv_path->ops_ids); + srv_path->ops_ids = NULL; } } @@ -127,21 +127,24 @@ static struct ib_cqe io_comp_cqe = { static inline void rtrs_srv_inflight_ref_release(struct percpu_ref *ref) { - struct rtrs_srv_sess *sess = container_of(ref, struct rtrs_srv_sess, ids_inflight_ref); + struct rtrs_srv_path *srv_path = container_of(ref, + struct rtrs_srv_path, + ids_inflight_ref); - percpu_ref_exit(&sess->ids_inflight_ref); - complete(&sess->complete_done); + percpu_ref_exit(&srv_path->ids_inflight_ref); + complete(&srv_path->complete_done); } -static int rtrs_srv_alloc_ops_ids(struct rtrs_srv_sess *sess) +static int rtrs_srv_alloc_ops_ids(struct rtrs_srv_path *srv_path) { - struct rtrs_srv *srv = sess->srv; + struct rtrs_srv_sess *srv = srv_path->srv; struct rtrs_srv_op *id; int i, ret; - sess->ops_ids = kcalloc(srv->queue_depth, sizeof(*sess->ops_ids), - GFP_KERNEL); - if (!sess->ops_ids) + srv_path->ops_ids = kcalloc(srv->queue_depth, + sizeof(*srv_path->ops_ids), + GFP_KERNEL); + if (!srv_path->ops_ids) goto err; for (i = 0; i < srv->queue_depth; ++i) { @@ -149,44 +152,44 @@ static int rtrs_srv_alloc_ops_ids(struct rtrs_srv_sess *sess) if (!id) goto err; - sess->ops_ids[i] = id; + srv_path->ops_ids[i] = id; } - ret = percpu_ref_init(&sess->ids_inflight_ref, + ret = percpu_ref_init(&srv_path->ids_inflight_ref, rtrs_srv_inflight_ref_release, 0, GFP_KERNEL); if (ret) { pr_err("Percpu reference init failed\n"); goto err; } - init_completion(&sess->complete_done); + init_completion(&srv_path->complete_done); return 0; err: - rtrs_srv_free_ops_ids(sess); + rtrs_srv_free_ops_ids(srv_path); return -ENOMEM; } -static inline void rtrs_srv_get_ops_ids(struct rtrs_srv_sess *sess) +static inline void rtrs_srv_get_ops_ids(struct rtrs_srv_path *srv_path) { - percpu_ref_get(&sess->ids_inflight_ref); + percpu_ref_get(&srv_path->ids_inflight_ref); } -static inline void rtrs_srv_put_ops_ids(struct rtrs_srv_sess *sess) +static inline void rtrs_srv_put_ops_ids(struct rtrs_srv_path *srv_path) { - percpu_ref_put(&sess->ids_inflight_ref); + percpu_ref_put(&srv_path->ids_inflight_ref); } static void rtrs_srv_reg_mr_done(struct ib_cq *cq, struct ib_wc *wc) { struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context); - struct rtrs_sess *s = con->c.sess; - struct rtrs_srv_sess *sess = to_srv_sess(s); + struct rtrs_path *s = con->c.path; + struct rtrs_srv_path *srv_path = to_srv_path(s); if (wc->status != IB_WC_SUCCESS) { rtrs_err(s, "REG MR failed: %s\n", ib_wc_status_msg(wc->status)); - close_sess(sess); + close_path(srv_path); return; } } @@ -197,9 +200,9 @@ static struct ib_cqe local_reg_cqe = { static int rdma_write_sg(struct rtrs_srv_op *id) { - struct rtrs_sess *s = id->con->c.sess; - struct rtrs_srv_sess *sess = to_srv_sess(s); - dma_addr_t dma_addr = sess->dma_addr[id->msg_id]; + struct rtrs_path *s = id->con->c.path; + struct rtrs_srv_path *srv_path = to_srv_path(s); + dma_addr_t dma_addr = srv_path->dma_addr[id->msg_id]; struct rtrs_srv_mr *srv_mr; struct ib_send_wr inv_wr; struct ib_rdma_wr imm_wr; @@ -233,7 +236,7 @@ static int rdma_write_sg(struct rtrs_srv_op *id) return -EINVAL; } - plist->lkey = sess->s.dev->ib_pd->local_dma_lkey; + plist->lkey = srv_path->s.dev->ib_pd->local_dma_lkey; offset += plist->length; wr->wr.sg_list = plist; @@ -284,7 +287,7 @@ static int rdma_write_sg(struct rtrs_srv_op *id) if (always_invalidate) { struct rtrs_msg_rkey_rsp *msg; - srv_mr = &sess->mrs[id->msg_id]; + srv_mr = &srv_path->mrs[id->msg_id]; rwr.wr.opcode = IB_WR_REG_MR; rwr.wr.wr_cqe = &local_reg_cqe; rwr.wr.num_sge = 0; @@ -300,11 +303,11 @@ static int rdma_write_sg(struct rtrs_srv_op *id) list.addr = srv_mr->iu->dma_addr; list.length = sizeof(*msg); - list.lkey = sess->s.dev->ib_pd->local_dma_lkey; + list.lkey = srv_path->s.dev->ib_pd->local_dma_lkey; imm_wr.wr.sg_list = &list; imm_wr.wr.num_sge = 1; imm_wr.wr.opcode = IB_WR_SEND_WITH_IMM; - ib_dma_sync_single_for_device(sess->s.dev->ib_dev, + ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev, srv_mr->iu->dma_addr, srv_mr->iu->size, DMA_TO_DEVICE); } else { @@ -317,7 +320,7 @@ static int rdma_write_sg(struct rtrs_srv_op *id) 0, need_inval)); imm_wr.wr.wr_cqe = &io_comp_cqe; - ib_dma_sync_single_for_device(sess->s.dev->ib_dev, dma_addr, + ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev, dma_addr, offset, DMA_BIDIRECTIONAL); err = ib_post_send(id->con->c.qp, &id->tx_wr.wr, NULL); @@ -341,8 +344,8 @@ static int rdma_write_sg(struct rtrs_srv_op *id) static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id, int errno) { - struct rtrs_sess *s = con->c.sess; - struct rtrs_srv_sess *sess = to_srv_sess(s); + struct rtrs_path *s = con->c.path; + struct rtrs_srv_path *srv_path = to_srv_path(s); struct ib_send_wr inv_wr, *wr = NULL; struct ib_rdma_wr imm_wr; struct ib_reg_wr rwr; @@ -402,7 +405,7 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id, struct ib_sge list; struct rtrs_msg_rkey_rsp *msg; - srv_mr = &sess->mrs[id->msg_id]; + srv_mr = &srv_path->mrs[id->msg_id]; rwr.wr.next = &imm_wr.wr; rwr.wr.opcode = IB_WR_REG_MR; rwr.wr.wr_cqe = &local_reg_cqe; @@ -419,11 +422,11 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id, list.addr = srv_mr->iu->dma_addr; list.length = sizeof(*msg); - list.lkey = sess->s.dev->ib_pd->local_dma_lkey; + list.lkey = srv_path->s.dev->ib_pd->local_dma_lkey; imm_wr.wr.sg_list = &list; imm_wr.wr.num_sge = 1; imm_wr.wr.opcode = IB_WR_SEND_WITH_IMM; - ib_dma_sync_single_for_device(sess->s.dev->ib_dev, + ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev, srv_mr->iu->dma_addr, srv_mr->iu->size, DMA_TO_DEVICE); } else { @@ -444,11 +447,11 @@ static int send_io_resp_imm(struct rtrs_srv_con *con, struct rtrs_srv_op *id, return err; } -void close_sess(struct rtrs_srv_sess *sess) +void close_path(struct rtrs_srv_path *srv_path) { - if (rtrs_srv_change_state(sess, RTRS_SRV_CLOSING)) - queue_work(rtrs_wq, &sess->close_work); - WARN_ON(sess->state != RTRS_SRV_CLOSING); + if (rtrs_srv_change_state(srv_path, RTRS_SRV_CLOSING)) + queue_work(rtrs_wq, &srv_path->close_work); + WARN_ON(srv_path->state != RTRS_SRV_CLOSING); } static inline const char *rtrs_srv_state_str(enum rtrs_srv_state state) @@ -480,35 +483,35 @@ static inline const char *rtrs_srv_state_str(enum rtrs_srv_state state) */ bool rtrs_srv_resp_rdma(struct rtrs_srv_op *id, int status) { - struct rtrs_srv_sess *sess; + struct rtrs_srv_path *srv_path; struct rtrs_srv_con *con; - struct rtrs_sess *s; + struct rtrs_path *s; int err; if (WARN_ON(!id)) return true; con = id->con; - s = con->c.sess; - sess = to_srv_sess(s); + s = con->c.path; + srv_path = to_srv_path(s); id->status = status; - if (sess->state != RTRS_SRV_CONNECTED) { + if (srv_path->state != RTRS_SRV_CONNECTED) { rtrs_err_rl(s, - "Sending I/O response failed, session %s is disconnected, sess state %s\n", - kobject_name(&sess->kobj), - rtrs_srv_state_str(sess->state)); + "Sending I/O response failed, server path %s is disconnected, path state %s\n", + kobject_name(&srv_path->kobj), + rtrs_srv_state_str(srv_path->state)); goto out; } if (always_invalidate) { - struct rtrs_srv_mr *mr = &sess->mrs[id->msg_id]; + struct rtrs_srv_mr *mr = &srv_path->mrs[id->msg_id]; ib_update_fast_reg_key(mr->mr, ib_inc_rkey(mr->mr->rkey)); } if (atomic_sub_return(1, &con->c.sq_wr_avail) < 0) { - rtrs_err(s, "IB send queue full: sess=%s cid=%d\n", - kobject_name(&sess->kobj), + rtrs_err(s, "IB send queue full: srv_path=%s cid=%d\n", + kobject_name(&srv_path->kobj), con->c.cid); atomic_add(1, &con->c.sq_wr_avail); spin_lock(&con->rsp_wr_wait_lock); @@ -523,12 +526,12 @@ bool rtrs_srv_resp_rdma(struct rtrs_srv_op *id, int status) err = rdma_write_sg(id); if (err) { - rtrs_err_rl(s, "IO response failed: %d: sess=%s\n", err, - kobject_name(&sess->kobj)); - close_sess(sess); + rtrs_err_rl(s, "IO response failed: %d: srv_path=%s\n", err, + kobject_name(&srv_path->kobj)); + close_path(srv_path); } out: - rtrs_srv_put_ops_ids(sess); + rtrs_srv_put_ops_ids(srv_path); return true; } EXPORT_SYMBOL(rtrs_srv_resp_rdma); @@ -538,33 +541,33 @@ EXPORT_SYMBOL(rtrs_srv_resp_rdma); * @srv: Session pointer * @priv: The private pointer that is associated with the session. */ -void rtrs_srv_set_sess_priv(struct rtrs_srv *srv, void *priv) +void rtrs_srv_set_sess_priv(struct rtrs_srv_sess *srv, void *priv) { srv->priv = priv; } EXPORT_SYMBOL(rtrs_srv_set_sess_priv); -static void unmap_cont_bufs(struct rtrs_srv_sess *sess) +static void unmap_cont_bufs(struct rtrs_srv_path *srv_path) { int i; - for (i = 0; i < sess->mrs_num; i++) { + for (i = 0; i < srv_path->mrs_num; i++) { struct rtrs_srv_mr *srv_mr; - srv_mr = &sess->mrs[i]; - rtrs_iu_free(srv_mr->iu, sess->s.dev->ib_dev, 1); + srv_mr = &srv_path->mrs[i]; + rtrs_iu_free(srv_mr->iu, srv_path->s.dev->ib_dev, 1); ib_dereg_mr(srv_mr->mr); - ib_dma_unmap_sg(sess->s.dev->ib_dev, srv_mr->sgt.sgl, + ib_dma_unmap_sg(srv_path->s.dev->ib_dev, srv_mr->sgt.sgl, srv_mr->sgt.nents, DMA_BIDIRECTIONAL); sg_free_table(&srv_mr->sgt); } - kfree(sess->mrs); + kfree(srv_path->mrs); } -static int map_cont_bufs(struct rtrs_srv_sess *sess) +static int map_cont_bufs(struct rtrs_srv_path *srv_path) { - struct rtrs_srv *srv = sess->srv; - struct rtrs_sess *ss = &sess->s; + struct rtrs_srv_sess *srv = srv_path->srv; + struct rtrs_path *ss = &srv_path->s; int i, mri, err, mrs_num; unsigned int chunk_bits; int chunks_per_mr = 1; @@ -581,19 +584,19 @@ static int map_cont_bufs(struct rtrs_srv_sess *sess) mrs_num = srv->queue_depth; } else { chunks_per_mr = - sess->s.dev->ib_dev->attrs.max_fast_reg_page_list_len; + srv_path->s.dev->ib_dev->attrs.max_fast_reg_page_list_len; mrs_num = DIV_ROUND_UP(srv->queue_depth, chunks_per_mr); chunks_per_mr = DIV_ROUND_UP(srv->queue_depth, mrs_num); } - sess->mrs = kcalloc(mrs_num, sizeof(*sess->mrs), GFP_KERNEL); - if (!sess->mrs) + srv_path->mrs = kcalloc(mrs_num, sizeof(*srv_path->mrs), GFP_KERNEL); + if (!srv_path->mrs) return -ENOMEM; - sess->mrs_num = mrs_num; + srv_path->mrs_num = mrs_num; for (mri = 0; mri < mrs_num; mri++) { - struct rtrs_srv_mr *srv_mr = &sess->mrs[mri]; + struct rtrs_srv_mr *srv_mr = &srv_path->mrs[mri]; struct sg_table *sgt = &srv_mr->sgt; struct scatterlist *s; struct ib_mr *mr; @@ -612,13 +615,13 @@ static int map_cont_bufs(struct rtrs_srv_sess *sess) sg_set_page(s, srv->chunks[chunks + i], max_chunk_size, 0); - nr = ib_dma_map_sg(sess->s.dev->ib_dev, sgt->sgl, + nr = ib_dma_map_sg(srv_path->s.dev->ib_dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL); if (nr < sgt->nents) { err = nr < 0 ? nr : -EINVAL; goto free_sg; } - mr = ib_alloc_mr(sess->s.dev->ib_pd, IB_MR_TYPE_MEM_REG, + mr = ib_alloc_mr(srv_path->s.dev->ib_pd, IB_MR_TYPE_MEM_REG, sgt->nents); if (IS_ERR(mr)) { err = PTR_ERR(mr); @@ -634,7 +637,7 @@ static int map_cont_bufs(struct rtrs_srv_sess *sess) if (always_invalidate) { srv_mr->iu = rtrs_iu_alloc(1, sizeof(struct rtrs_msg_rkey_rsp), - GFP_KERNEL, sess->s.dev->ib_dev, + GFP_KERNEL, srv_path->s.dev->ib_dev, DMA_TO_DEVICE, rtrs_srv_rdma_done); if (!srv_mr->iu) { err = -ENOMEM; @@ -644,7 +647,7 @@ static int map_cont_bufs(struct rtrs_srv_sess *sess) } /* Eventually dma addr for each chunk can be cached */ for_each_sg(sgt->sgl, s, sgt->orig_nents, i) - sess->dma_addr[chunks + i] = sg_dma_address(s); + srv_path->dma_addr[chunks + i] = sg_dma_address(s); ib_update_fast_reg_key(mr, ib_inc_rkey(mr->rkey)); srv_mr->mr = mr; @@ -652,75 +655,75 @@ static int map_cont_bufs(struct rtrs_srv_sess *sess) continue; err: while (mri--) { - srv_mr = &sess->mrs[mri]; + srv_mr = &srv_path->mrs[mri]; sgt = &srv_mr->sgt; mr = srv_mr->mr; - rtrs_iu_free(srv_mr->iu, sess->s.dev->ib_dev, 1); + rtrs_iu_free(srv_mr->iu, srv_path->s.dev->ib_dev, 1); dereg_mr: ib_dereg_mr(mr); unmap_sg: - ib_dma_unmap_sg(sess->s.dev->ib_dev, sgt->sgl, + ib_dma_unmap_sg(srv_path->s.dev->ib_dev, sgt->sgl, sgt->nents, DMA_BIDIRECTIONAL); free_sg: sg_free_table(sgt); } - kfree(sess->mrs); + kfree(srv_path->mrs); return err; } chunk_bits = ilog2(srv->queue_depth - 1) + 1; - sess->mem_bits = (MAX_IMM_PAYL_BITS - chunk_bits); + srv_path->mem_bits = (MAX_IMM_PAYL_BITS - chunk_bits); return 0; } static void rtrs_srv_hb_err_handler(struct rtrs_con *c) { - close_sess(to_srv_sess(c->sess)); + close_path(to_srv_path(c->path)); } -static void rtrs_srv_init_hb(struct rtrs_srv_sess *sess) +static void rtrs_srv_init_hb(struct rtrs_srv_path *srv_path) { - rtrs_init_hb(&sess->s, &io_comp_cqe, + rtrs_init_hb(&srv_path->s, &io_comp_cqe, RTRS_HB_INTERVAL_MS, RTRS_HB_MISSED_MAX, rtrs_srv_hb_err_handler, rtrs_wq); } -static void rtrs_srv_start_hb(struct rtrs_srv_sess *sess) +static void rtrs_srv_start_hb(struct rtrs_srv_path *srv_path) { - rtrs_start_hb(&sess->s); + rtrs_start_hb(&srv_path->s); } -static void rtrs_srv_stop_hb(struct rtrs_srv_sess *sess) +static void rtrs_srv_stop_hb(struct rtrs_srv_path *srv_path) { - rtrs_stop_hb(&sess->s); + rtrs_stop_hb(&srv_path->s); } static void rtrs_srv_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc) { struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context); - struct rtrs_sess *s = con->c.sess; - struct rtrs_srv_sess *sess = to_srv_sess(s); + struct rtrs_path *s = con->c.path; + struct rtrs_srv_path *srv_path = to_srv_path(s); struct rtrs_iu *iu; iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe); - rtrs_iu_free(iu, sess->s.dev->ib_dev, 1); + rtrs_iu_free(iu, srv_path->s.dev->ib_dev, 1); if (wc->status != IB_WC_SUCCESS) { rtrs_err(s, "Sess info response send failed: %s\n", ib_wc_status_msg(wc->status)); - close_sess(sess); + close_path(srv_path); return; } WARN_ON(wc->opcode != IB_WC_SEND); } -static void rtrs_srv_sess_up(struct rtrs_srv_sess *sess) +static void rtrs_srv_path_up(struct rtrs_srv_path *srv_path) { - struct rtrs_srv *srv = sess->srv; + struct rtrs_srv_sess *srv = srv_path->srv; struct rtrs_srv_ctx *ctx = srv->ctx; int up; @@ -731,18 +734,18 @@ static void rtrs_srv_sess_up(struct rtrs_srv_sess *sess) mutex_unlock(&srv->paths_ev_mutex); /* Mark session as established */ - sess->established = true; + srv_path->established = true; } -static void rtrs_srv_sess_down(struct rtrs_srv_sess *sess) +static void rtrs_srv_path_down(struct rtrs_srv_path *srv_path) { - struct rtrs_srv *srv = sess->srv; + struct rtrs_srv_sess *srv = srv_path->srv; struct rtrs_srv_ctx *ctx = srv->ctx; - if (!sess->established) + if (!srv_path->established) return; - sess->established = false; + srv_path->established = false; mutex_lock(&srv->paths_ev_mutex); WARN_ON(!srv->paths_up); if (--srv->paths_up == 0) @@ -750,11 +753,11 @@ static void rtrs_srv_sess_down(struct rtrs_srv_sess *sess) mutex_unlock(&srv->paths_ev_mutex); } -static bool exist_sessname(struct rtrs_srv_ctx *ctx, - const char *sessname, const uuid_t *path_uuid) +static bool exist_pathname(struct rtrs_srv_ctx *ctx, + const char *pathname, const uuid_t *path_uuid) { - struct rtrs_srv *srv; - struct rtrs_srv_sess *sess; + struct rtrs_srv_sess *srv; + struct rtrs_srv_path *srv_path; bool found = false; mutex_lock(&ctx->srv_mutex); @@ -767,9 +770,9 @@ static bool exist_sessname(struct rtrs_srv_ctx *ctx, continue; } - list_for_each_entry(sess, &srv->paths_list, s.entry) { - if (strlen(sess->s.sessname) == strlen(sessname) && - !strcmp(sess->s.sessname, sessname)) { + list_for_each_entry(srv_path, &srv->paths_list, s.entry) { + if (strlen(srv_path->s.sessname) == strlen(pathname) && + !strcmp(srv_path->s.sessname, pathname)) { found = true; break; } @@ -782,14 +785,14 @@ static bool exist_sessname(struct rtrs_srv_ctx *ctx, return found; } -static int post_recv_sess(struct rtrs_srv_sess *sess); +static int post_recv_path(struct rtrs_srv_path *srv_path); static int rtrs_rdma_do_reject(struct rdma_cm_id *cm_id, int errno); static int process_info_req(struct rtrs_srv_con *con, struct rtrs_msg_info_req *msg) { - struct rtrs_sess *s = con->c.sess; - struct rtrs_srv_sess *sess = to_srv_sess(s); + struct rtrs_path *s = con->c.path; + struct rtrs_srv_path *srv_path = to_srv_path(s); struct ib_send_wr *reg_wr = NULL; struct rtrs_msg_info_rsp *rsp; struct rtrs_iu *tx_iu; @@ -797,26 +800,32 @@ static int process_info_req(struct rtrs_srv_con *con, int mri, err; size_t tx_sz; - err = post_recv_sess(sess); + err = post_recv_path(srv_path); if (err) { - rtrs_err(s, "post_recv_sess(), err: %d\n", err); + rtrs_err(s, "post_recv_path(), err: %d\n", err); return err; } - if (exist_sessname(sess->srv->ctx, - msg->sessname, &sess->srv->paths_uuid)) { - rtrs_err(s, "sessname is duplicated: %s\n", msg->sessname); + if (strchr(msg->pathname, '/') || strchr(msg->pathname, '.')) { + rtrs_err(s, "pathname cannot contain / and .\n"); + return -EINVAL; + } + + if (exist_pathname(srv_path->srv->ctx, + msg->pathname, &srv_path->srv->paths_uuid)) { + rtrs_err(s, "pathname is duplicated: %s\n", msg->pathname); return -EPERM; } - strscpy(sess->s.sessname, msg->sessname, sizeof(sess->s.sessname)); + strscpy(srv_path->s.sessname, msg->pathname, + sizeof(srv_path->s.sessname)); - rwr = kcalloc(sess->mrs_num, sizeof(*rwr), GFP_KERNEL); + rwr = kcalloc(srv_path->mrs_num, sizeof(*rwr), GFP_KERNEL); if (!rwr) return -ENOMEM; tx_sz = sizeof(*rsp); - tx_sz += sizeof(rsp->desc[0]) * sess->mrs_num; - tx_iu = rtrs_iu_alloc(1, tx_sz, GFP_KERNEL, sess->s.dev->ib_dev, + tx_sz += sizeof(rsp->desc[0]) * srv_path->mrs_num; + tx_iu = rtrs_iu_alloc(1, tx_sz, GFP_KERNEL, srv_path->s.dev->ib_dev, DMA_TO_DEVICE, rtrs_srv_info_rsp_done); if (!tx_iu) { err = -ENOMEM; @@ -825,10 +834,10 @@ static int process_info_req(struct rtrs_srv_con *con, rsp = tx_iu->buf; rsp->type = cpu_to_le16(RTRS_MSG_INFO_RSP); - rsp->sg_cnt = cpu_to_le16(sess->mrs_num); + rsp->sg_cnt = cpu_to_le16(srv_path->mrs_num); - for (mri = 0; mri < sess->mrs_num; mri++) { - struct ib_mr *mr = sess->mrs[mri].mr; + for (mri = 0; mri < srv_path->mrs_num; mri++) { + struct ib_mr *mr = srv_path->mrs[mri].mr; rsp->desc[mri].addr = cpu_to_le64(mr->iova); rsp->desc[mri].key = cpu_to_le32(mr->rkey); @@ -849,13 +858,13 @@ static int process_info_req(struct rtrs_srv_con *con, reg_wr = &rwr[mri].wr; } - err = rtrs_srv_create_sess_files(sess); + err = rtrs_srv_create_path_files(srv_path); if (err) goto iu_free; - kobject_get(&sess->kobj); - get_device(&sess->srv->dev); - rtrs_srv_change_state(sess, RTRS_SRV_CONNECTED); - rtrs_srv_start_hb(sess); + kobject_get(&srv_path->kobj); + get_device(&srv_path->srv->dev); + rtrs_srv_change_state(srv_path, RTRS_SRV_CONNECTED); + rtrs_srv_start_hb(srv_path); /* * We do not account number of established connections at the current @@ -863,9 +872,10 @@ static int process_info_req(struct rtrs_srv_con *con, * all connections are successfully established. Thus, simply notify * listener with a proper event if we are the first path. */ - rtrs_srv_sess_up(sess); + rtrs_srv_path_up(srv_path); - ib_dma_sync_single_for_device(sess->s.dev->ib_dev, tx_iu->dma_addr, + ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev, + tx_iu->dma_addr, tx_iu->size, DMA_TO_DEVICE); /* Send info response */ @@ -873,7 +883,7 @@ static int process_info_req(struct rtrs_srv_con *con, if (err) { rtrs_err(s, "rtrs_iu_post_send(), err: %d\n", err); iu_free: - rtrs_iu_free(tx_iu, sess->s.dev->ib_dev, 1); + rtrs_iu_free(tx_iu, srv_path->s.dev->ib_dev, 1); } rwr_free: kfree(rwr); @@ -884,8 +894,8 @@ rwr_free: static void rtrs_srv_info_req_done(struct ib_cq *cq, struct ib_wc *wc) { struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context); - struct rtrs_sess *s = con->c.sess; - struct rtrs_srv_sess *sess = to_srv_sess(s); + struct rtrs_path *s = con->c.path; + struct rtrs_srv_path *srv_path = to_srv_path(s); struct rtrs_msg_info_req *msg; struct rtrs_iu *iu; int err; @@ -905,7 +915,7 @@ static void rtrs_srv_info_req_done(struct ib_cq *cq, struct ib_wc *wc) wc->byte_len); goto close; } - ib_dma_sync_single_for_cpu(sess->s.dev->ib_dev, iu->dma_addr, + ib_dma_sync_single_for_cpu(srv_path->s.dev->ib_dev, iu->dma_addr, iu->size, DMA_FROM_DEVICE); msg = iu->buf; if (le16_to_cpu(msg->type) != RTRS_MSG_INFO_REQ) { @@ -918,22 +928,22 @@ static void rtrs_srv_info_req_done(struct ib_cq *cq, struct ib_wc *wc) goto close; out: - rtrs_iu_free(iu, sess->s.dev->ib_dev, 1); + rtrs_iu_free(iu, srv_path->s.dev->ib_dev, 1); return; close: - close_sess(sess); + close_path(srv_path); goto out; } static int post_recv_info_req(struct rtrs_srv_con *con) { - struct rtrs_sess *s = con->c.sess; - struct rtrs_srv_sess *sess = to_srv_sess(s); + struct rtrs_path *s = con->c.path; + struct rtrs_srv_path *srv_path = to_srv_path(s); struct rtrs_iu *rx_iu; int err; rx_iu = rtrs_iu_alloc(1, sizeof(struct rtrs_msg_info_req), - GFP_KERNEL, sess->s.dev->ib_dev, + GFP_KERNEL, srv_path->s.dev->ib_dev, DMA_FROM_DEVICE, rtrs_srv_info_req_done); if (!rx_iu) return -ENOMEM; @@ -941,7 +951,7 @@ static int post_recv_info_req(struct rtrs_srv_con *con) err = rtrs_iu_post_recv(&con->c, rx_iu); if (err) { rtrs_err(s, "rtrs_iu_post_recv(), err: %d\n", err); - rtrs_iu_free(rx_iu, sess->s.dev->ib_dev, 1); + rtrs_iu_free(rx_iu, srv_path->s.dev->ib_dev, 1); return err; } @@ -961,20 +971,20 @@ static int post_recv_io(struct rtrs_srv_con *con, size_t q_size) return 0; } -static int post_recv_sess(struct rtrs_srv_sess *sess) +static int post_recv_path(struct rtrs_srv_path *srv_path) { - struct rtrs_srv *srv = sess->srv; - struct rtrs_sess *s = &sess->s; + struct rtrs_srv_sess *srv = srv_path->srv; + struct rtrs_path *s = &srv_path->s; size_t q_size; int err, cid; - for (cid = 0; cid < sess->s.con_num; cid++) { + for (cid = 0; cid < srv_path->s.con_num; cid++) { if (cid == 0) q_size = SERVICE_CON_QUEUE_DEPTH; else q_size = srv->queue_depth; - err = post_recv_io(to_srv_con(sess->s.con[cid]), q_size); + err = post_recv_io(to_srv_con(srv_path->s.con[cid]), q_size); if (err) { rtrs_err(s, "post_recv_io(), err: %d\n", err); return err; @@ -988,9 +998,9 @@ static void process_read(struct rtrs_srv_con *con, struct rtrs_msg_rdma_read *msg, u32 buf_id, u32 off) { - struct rtrs_sess *s = con->c.sess; - struct rtrs_srv_sess *sess = to_srv_sess(s); - struct rtrs_srv *srv = sess->srv; + struct rtrs_path *s = con->c.path; + struct rtrs_srv_path *srv_path = to_srv_path(s); + struct rtrs_srv_sess *srv = srv_path->srv; struct rtrs_srv_ctx *ctx = srv->ctx; struct rtrs_srv_op *id; @@ -998,10 +1008,10 @@ static void process_read(struct rtrs_srv_con *con, void *data; int ret; - if (sess->state != RTRS_SRV_CONNECTED) { + if (srv_path->state != RTRS_SRV_CONNECTED) { rtrs_err_rl(s, "Processing read request failed, session is disconnected, sess state %s\n", - rtrs_srv_state_str(sess->state)); + rtrs_srv_state_str(srv_path->state)); return; } if (msg->sg_cnt != 1 && msg->sg_cnt != 0) { @@ -1009,9 +1019,9 @@ static void process_read(struct rtrs_srv_con *con, "Processing read request failed, invalid message\n"); return; } - rtrs_srv_get_ops_ids(sess); - rtrs_srv_update_rdma_stats(sess->stats, off, READ); - id = sess->ops_ids[buf_id]; + rtrs_srv_get_ops_ids(srv_path); + rtrs_srv_update_rdma_stats(srv_path->stats, off, READ); + id = srv_path->ops_ids[buf_id]; id->con = con; id->dir = READ; id->msg_id = buf_id; @@ -1037,18 +1047,18 @@ send_err_msg: rtrs_err_rl(s, "Sending err msg for failed RDMA-Write-Req failed, msg_id %d, err: %d\n", buf_id, ret); - close_sess(sess); + close_path(srv_path); } - rtrs_srv_put_ops_ids(sess); + rtrs_srv_put_ops_ids(srv_path); } static void process_write(struct rtrs_srv_con *con, struct rtrs_msg_rdma_write *req, u32 buf_id, u32 off) { - struct rtrs_sess *s = con->c.sess; - struct rtrs_srv_sess *sess = to_srv_sess(s); - struct rtrs_srv *srv = sess->srv; + struct rtrs_path *s = con->c.path; + struct rtrs_srv_path *srv_path = to_srv_path(s); + struct rtrs_srv_sess *srv = srv_path->srv; struct rtrs_srv_ctx *ctx = srv->ctx; struct rtrs_srv_op *id; @@ -1056,15 +1066,15 @@ static void process_write(struct rtrs_srv_con *con, void *data; int ret; - if (sess->state != RTRS_SRV_CONNECTED) { + if (srv_path->state != RTRS_SRV_CONNECTED) { rtrs_err_rl(s, "Processing write request failed, session is disconnected, sess state %s\n", - rtrs_srv_state_str(sess->state)); + rtrs_srv_state_str(srv_path->state)); return; } - rtrs_srv_get_ops_ids(sess); - rtrs_srv_update_rdma_stats(sess->stats, off, WRITE); - id = sess->ops_ids[buf_id]; + rtrs_srv_get_ops_ids(srv_path); + rtrs_srv_update_rdma_stats(srv_path->stats, off, WRITE); + id = srv_path->ops_ids[buf_id]; id->con = con; id->dir = WRITE; id->msg_id = buf_id; @@ -1089,20 +1099,21 @@ send_err_msg: rtrs_err_rl(s, "Processing write request failed, sending I/O response failed, msg_id %d, err: %d\n", buf_id, ret); - close_sess(sess); + close_path(srv_path); } - rtrs_srv_put_ops_ids(sess); + rtrs_srv_put_ops_ids(srv_path); } static void process_io_req(struct rtrs_srv_con *con, void *msg, u32 id, u32 off) { - struct rtrs_sess *s = con->c.sess; - struct rtrs_srv_sess *sess = to_srv_sess(s); + struct rtrs_path *s = con->c.path; + struct rtrs_srv_path *srv_path = to_srv_path(s); struct rtrs_msg_rdma_hdr *hdr; unsigned int type; - ib_dma_sync_single_for_cpu(sess->s.dev->ib_dev, sess->dma_addr[id], + ib_dma_sync_single_for_cpu(srv_path->s.dev->ib_dev, + srv_path->dma_addr[id], max_chunk_size, DMA_BIDIRECTIONAL); hdr = msg; type = le16_to_cpu(hdr->type); @@ -1124,7 +1135,7 @@ static void process_io_req(struct rtrs_srv_con *con, void *msg, return; err: - close_sess(sess); + close_path(srv_path); } static void rtrs_srv_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc) @@ -1132,16 +1143,16 @@ static void rtrs_srv_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc) struct rtrs_srv_mr *mr = container_of(wc->wr_cqe, typeof(*mr), inv_cqe); struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context); - struct rtrs_sess *s = con->c.sess; - struct rtrs_srv_sess *sess = to_srv_sess(s); - struct rtrs_srv *srv = sess->srv; + struct rtrs_path *s = con->c.path; + struct rtrs_srv_path *srv_path = to_srv_path(s); + struct rtrs_srv_sess *srv = srv_path->srv; u32 msg_id, off; void *data; if (wc->status != IB_WC_SUCCESS) { rtrs_err(s, "Failed IB_WR_LOCAL_INV: %s\n", ib_wc_status_msg(wc->status)); - close_sess(sess); + close_path(srv_path); } msg_id = mr->msg_id; off = mr->msg_off; @@ -1189,9 +1200,9 @@ static void rtrs_rdma_process_wr_wait_list(struct rtrs_srv_con *con) static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc) { struct rtrs_srv_con *con = to_srv_con(wc->qp->qp_context); - struct rtrs_sess *s = con->c.sess; - struct rtrs_srv_sess *sess = to_srv_sess(s); - struct rtrs_srv *srv = sess->srv; + struct rtrs_path *s = con->c.path; + struct rtrs_srv_path *srv_path = to_srv_path(s); + struct rtrs_srv_sess *srv = srv_path->srv; u32 imm_type, imm_payload; int err; @@ -1201,7 +1212,7 @@ static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc) "%s (wr_cqe: %p, type: %d, vendor_err: 0x%x, len: %u)\n", ib_wc_status_msg(wc->status), wc->wr_cqe, wc->opcode, wc->vendor_err, wc->byte_len); - close_sess(sess); + close_path(srv_path); } return; } @@ -1217,7 +1228,7 @@ static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc) err = rtrs_post_recv_empty(&con->c, &io_comp_cqe); if (err) { rtrs_err(s, "rtrs_post_recv(), err: %d\n", err); - close_sess(sess); + close_path(srv_path); break; } rtrs_from_imm(be32_to_cpu(wc->ex.imm_data), @@ -1226,16 +1237,16 @@ static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc) u32 msg_id, off; void *data; - msg_id = imm_payload >> sess->mem_bits; - off = imm_payload & ((1 << sess->mem_bits) - 1); + msg_id = imm_payload >> srv_path->mem_bits; + off = imm_payload & ((1 << srv_path->mem_bits) - 1); if (msg_id >= srv->queue_depth || off >= max_chunk_size) { rtrs_err(s, "Wrong msg_id %u, off %u\n", msg_id, off); - close_sess(sess); + close_path(srv_path); return; } if (always_invalidate) { - struct rtrs_srv_mr *mr = &sess->mrs[msg_id]; + struct rtrs_srv_mr *mr = &srv_path->mrs[msg_id]; mr->msg_off = off; mr->msg_id = msg_id; @@ -1243,7 +1254,7 @@ static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc) if (err) { rtrs_err(s, "rtrs_post_recv(), err: %d\n", err); - close_sess(sess); + close_path(srv_path); break; } } else { @@ -1252,10 +1263,10 @@ static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc) } } else if (imm_type == RTRS_HB_MSG_IMM) { WARN_ON(con->c.cid); - rtrs_send_hb_ack(&sess->s); + rtrs_send_hb_ack(&srv_path->s); } else if (imm_type == RTRS_HB_ACK_IMM) { WARN_ON(con->c.cid); - sess->s.hb_missed_cnt = 0; + srv_path->s.hb_missed_cnt = 0; } else { rtrs_wrn(s, "Unknown IMM type %u\n", imm_type); } @@ -1279,22 +1290,23 @@ static void rtrs_srv_rdma_done(struct ib_cq *cq, struct ib_wc *wc) } /** - * rtrs_srv_get_sess_name() - Get rtrs_srv peer hostname. + * rtrs_srv_get_path_name() - Get rtrs_srv peer hostname. * @srv: Session - * @sessname: Sessname buffer + * @pathname: Pathname buffer * @len: Length of sessname buffer */ -int rtrs_srv_get_sess_name(struct rtrs_srv *srv, char *sessname, size_t len) +int rtrs_srv_get_path_name(struct rtrs_srv_sess *srv, char *pathname, + size_t len) { - struct rtrs_srv_sess *sess; + struct rtrs_srv_path *srv_path; int err = -ENOTCONN; mutex_lock(&srv->paths_mutex); - list_for_each_entry(sess, &srv->paths_list, s.entry) { - if (sess->state != RTRS_SRV_CONNECTED) + list_for_each_entry(srv_path, &srv->paths_list, s.entry) { + if (srv_path->state != RTRS_SRV_CONNECTED) continue; - strscpy(sessname, sess->s.sessname, - min_t(size_t, sizeof(sess->s.sessname), len)); + strscpy(pathname, srv_path->s.sessname, + min_t(size_t, sizeof(srv_path->s.sessname), len)); err = 0; break; } @@ -1302,44 +1314,45 @@ int rtrs_srv_get_sess_name(struct rtrs_srv *srv, char *sessname, size_t len) return err; } -EXPORT_SYMBOL(rtrs_srv_get_sess_name); +EXPORT_SYMBOL(rtrs_srv_get_path_name); /** * rtrs_srv_get_queue_depth() - Get rtrs_srv qdepth. * @srv: Session */ -int rtrs_srv_get_queue_depth(struct rtrs_srv *srv) +int rtrs_srv_get_queue_depth(struct rtrs_srv_sess *srv) { return srv->queue_depth; } EXPORT_SYMBOL(rtrs_srv_get_queue_depth); -static int find_next_bit_ring(struct rtrs_srv_sess *sess) +static int find_next_bit_ring(struct rtrs_srv_path *srv_path) { - struct ib_device *ib_dev = sess->s.dev->ib_dev; + struct ib_device *ib_dev = srv_path->s.dev->ib_dev; int v; - v = cpumask_next(sess->cur_cq_vector, &cq_affinity_mask); + v = cpumask_next(srv_path->cur_cq_vector, &cq_affinity_mask); if (v >= nr_cpu_ids || v >= ib_dev->num_comp_vectors) v = cpumask_first(&cq_affinity_mask); return v; } -static int rtrs_srv_get_next_cq_vector(struct rtrs_srv_sess *sess) +static int rtrs_srv_get_next_cq_vector(struct rtrs_srv_path *srv_path) { - sess->cur_cq_vector = find_next_bit_ring(sess); + srv_path->cur_cq_vector = find_next_bit_ring(srv_path); - return sess->cur_cq_vector; + return srv_path->cur_cq_vector; } static void rtrs_srv_dev_release(struct device *dev) { - struct rtrs_srv *srv = container_of(dev, struct rtrs_srv, dev); + struct rtrs_srv_sess *srv = container_of(dev, struct rtrs_srv_sess, + dev); kfree(srv); } -static void free_srv(struct rtrs_srv *srv) +static void free_srv(struct rtrs_srv_sess *srv) { int i; @@ -1353,11 +1366,11 @@ static void free_srv(struct rtrs_srv *srv) put_device(&srv->dev); } -static struct rtrs_srv *get_or_create_srv(struct rtrs_srv_ctx *ctx, +static struct rtrs_srv_sess *get_or_create_srv(struct rtrs_srv_ctx *ctx, const uuid_t *paths_uuid, bool first_conn) { - struct rtrs_srv *srv; + struct rtrs_srv_sess *srv; int i; mutex_lock(&ctx->srv_mutex); @@ -1419,7 +1432,7 @@ err_free_srv: return ERR_PTR(-ENOMEM); } -static void put_srv(struct rtrs_srv *srv) +static void put_srv(struct rtrs_srv_sess *srv) { if (refcount_dec_and_test(&srv->refcount)) { struct rtrs_srv_ctx *ctx = srv->ctx; @@ -1433,23 +1446,23 @@ static void put_srv(struct rtrs_srv *srv) } } -static void __add_path_to_srv(struct rtrs_srv *srv, - struct rtrs_srv_sess *sess) +static void __add_path_to_srv(struct rtrs_srv_sess *srv, + struct rtrs_srv_path *srv_path) { - list_add_tail(&sess->s.entry, &srv->paths_list); + list_add_tail(&srv_path->s.entry, &srv->paths_list); srv->paths_num++; WARN_ON(srv->paths_num >= MAX_PATHS_NUM); } -static void del_path_from_srv(struct rtrs_srv_sess *sess) +static void del_path_from_srv(struct rtrs_srv_path *srv_path) { - struct rtrs_srv *srv = sess->srv; + struct rtrs_srv_sess *srv = srv_path->srv; if (WARN_ON(!srv)) return; mutex_lock(&srv->paths_mutex); - list_del(&sess->s.entry); + list_del(&srv_path->s.entry); WARN_ON(!srv->paths_num); srv->paths_num--; mutex_unlock(&srv->paths_mutex); @@ -1479,47 +1492,47 @@ static int sockaddr_cmp(const struct sockaddr *a, const struct sockaddr *b) } } -static bool __is_path_w_addr_exists(struct rtrs_srv *srv, +static bool __is_path_w_addr_exists(struct rtrs_srv_sess *srv, struct rdma_addr *addr) { - struct rtrs_srv_sess *sess; + struct rtrs_srv_path *srv_path; - list_for_each_entry(sess, &srv->paths_list, s.entry) - if (!sockaddr_cmp((struct sockaddr *)&sess->s.dst_addr, + list_for_each_entry(srv_path, &srv->paths_list, s.entry) + if (!sockaddr_cmp((struct sockaddr *)&srv_path->s.dst_addr, (struct sockaddr *)&addr->dst_addr) && - !sockaddr_cmp((struct sockaddr *)&sess->s.src_addr, + !sockaddr_cmp((struct sockaddr *)&srv_path->s.src_addr, (struct sockaddr *)&addr->src_addr)) return true; return false; } -static void free_sess(struct rtrs_srv_sess *sess) +static void free_path(struct rtrs_srv_path *srv_path) { - if (sess->kobj.state_in_sysfs) { - kobject_del(&sess->kobj); - kobject_put(&sess->kobj); + if (srv_path->kobj.state_in_sysfs) { + kobject_del(&srv_path->kobj); + kobject_put(&srv_path->kobj); } else { - kfree(sess->stats); - kfree(sess); + kfree(srv_path->stats); + kfree(srv_path); } } static void rtrs_srv_close_work(struct work_struct *work) { - struct rtrs_srv_sess *sess; + struct rtrs_srv_path *srv_path; struct rtrs_srv_con *con; int i; - sess = container_of(work, typeof(*sess), close_work); + srv_path = container_of(work, typeof(*srv_path), close_work); - rtrs_srv_destroy_sess_files(sess); - rtrs_srv_stop_hb(sess); + rtrs_srv_destroy_path_files(srv_path); + rtrs_srv_stop_hb(srv_path); - for (i = 0; i < sess->s.con_num; i++) { - if (!sess->s.con[i]) + for (i = 0; i < srv_path->s.con_num; i++) { + if (!srv_path->s.con[i]) continue; - con = to_srv_con(sess->s.con[i]); + con = to_srv_con(srv_path->s.con[i]); rdma_disconnect(con->c.cm_id); ib_drain_qp(con->c.qp); } @@ -1528,41 +1541,41 @@ static void rtrs_srv_close_work(struct work_struct *work) * Degrade ref count to the usual model with a single shared * atomic_t counter */ - percpu_ref_kill(&sess->ids_inflight_ref); + percpu_ref_kill(&srv_path->ids_inflight_ref); /* Wait for all completion */ - wait_for_completion(&sess->complete_done); + wait_for_completion(&srv_path->complete_done); /* Notify upper layer if we are the last path */ - rtrs_srv_sess_down(sess); + rtrs_srv_path_down(srv_path); - unmap_cont_bufs(sess); - rtrs_srv_free_ops_ids(sess); + unmap_cont_bufs(srv_path); + rtrs_srv_free_ops_ids(srv_path); - for (i = 0; i < sess->s.con_num; i++) { - if (!sess->s.con[i]) + for (i = 0; i < srv_path->s.con_num; i++) { + if (!srv_path->s.con[i]) continue; - con = to_srv_con(sess->s.con[i]); + con = to_srv_con(srv_path->s.con[i]); rtrs_cq_qp_destroy(&con->c); rdma_destroy_id(con->c.cm_id); kfree(con); } - rtrs_ib_dev_put(sess->s.dev); + rtrs_ib_dev_put(srv_path->s.dev); - del_path_from_srv(sess); - put_srv(sess->srv); - sess->srv = NULL; - rtrs_srv_change_state(sess, RTRS_SRV_CLOSED); + del_path_from_srv(srv_path); + put_srv(srv_path->srv); + srv_path->srv = NULL; + rtrs_srv_change_state(srv_path, RTRS_SRV_CLOSED); - kfree(sess->dma_addr); - kfree(sess->s.con); - free_sess(sess); + kfree(srv_path->dma_addr); + kfree(srv_path->s.con); + free_path(srv_path); } -static int rtrs_rdma_do_accept(struct rtrs_srv_sess *sess, +static int rtrs_rdma_do_accept(struct rtrs_srv_path *srv_path, struct rdma_cm_id *cm_id) { - struct rtrs_srv *srv = sess->srv; + struct rtrs_srv_sess *srv = srv_path->srv; struct rtrs_msg_conn_rsp msg; struct rdma_conn_param param; int err; @@ -1610,25 +1623,25 @@ static int rtrs_rdma_do_reject(struct rdma_cm_id *cm_id, int errno) return errno; } -static struct rtrs_srv_sess * -__find_sess(struct rtrs_srv *srv, const uuid_t *sess_uuid) +static struct rtrs_srv_path * +__find_path(struct rtrs_srv_sess *srv, const uuid_t *sess_uuid) { - struct rtrs_srv_sess *sess; + struct rtrs_srv_path *srv_path; - list_for_each_entry(sess, &srv->paths_list, s.entry) { - if (uuid_equal(&sess->s.uuid, sess_uuid)) - return sess; + list_for_each_entry(srv_path, &srv->paths_list, s.entry) { + if (uuid_equal(&srv_path->s.uuid, sess_uuid)) + return srv_path; } return NULL; } -static int create_con(struct rtrs_srv_sess *sess, +static int create_con(struct rtrs_srv_path *srv_path, struct rdma_cm_id *cm_id, unsigned int cid) { - struct rtrs_srv *srv = sess->srv; - struct rtrs_sess *s = &sess->s; + struct rtrs_srv_sess *srv = srv_path->srv; + struct rtrs_path *s = &srv_path->s; struct rtrs_srv_con *con; u32 cq_num, max_send_wr, max_recv_wr, wr_limit; @@ -1643,10 +1656,10 @@ static int create_con(struct rtrs_srv_sess *sess, spin_lock_init(&con->rsp_wr_wait_lock); INIT_LIST_HEAD(&con->rsp_wr_wait_list); con->c.cm_id = cm_id; - con->c.sess = &sess->s; + con->c.path = &srv_path->s; con->c.cid = cid; atomic_set(&con->c.wr_cnt, 1); - wr_limit = sess->s.dev->ib_dev->attrs.max_qp_wr; + wr_limit = srv_path->s.dev->ib_dev->attrs.max_qp_wr; if (con->c.cid == 0) { /* @@ -1679,10 +1692,10 @@ static int create_con(struct rtrs_srv_sess *sess, } cq_num = max_send_wr + max_recv_wr; atomic_set(&con->c.sq_wr_avail, max_send_wr); - cq_vector = rtrs_srv_get_next_cq_vector(sess); + cq_vector = rtrs_srv_get_next_cq_vector(srv_path); /* TODO: SOFTIRQ can be faster, but be careful with softirq context */ - err = rtrs_cq_qp_create(&sess->s, &con->c, 1, cq_vector, cq_num, + err = rtrs_cq_qp_create(&srv_path->s, &con->c, 1, cq_vector, cq_num, max_send_wr, max_recv_wr, IB_POLL_WORKQUEUE); if (err) { @@ -1694,8 +1707,8 @@ static int create_con(struct rtrs_srv_sess *sess, if (err) goto free_cqqp; } - WARN_ON(sess->s.con[cid]); - sess->s.con[cid] = &con->c; + WARN_ON(srv_path->s.con[cid]); + srv_path->s.con[cid] = &con->c; /* * Change context from server to current connection. The other @@ -1714,13 +1727,13 @@ err: return err; } -static struct rtrs_srv_sess *__alloc_sess(struct rtrs_srv *srv, +static struct rtrs_srv_path *__alloc_path(struct rtrs_srv_sess *srv, struct rdma_cm_id *cm_id, unsigned int con_num, unsigned int recon_cnt, const uuid_t *uuid) { - struct rtrs_srv_sess *sess; + struct rtrs_srv_path *srv_path; int err = -ENOMEM; char str[NAME_MAX]; struct rtrs_addr path; @@ -1734,73 +1747,76 @@ static struct rtrs_srv_sess *__alloc_sess(struct rtrs_srv *srv, pr_err("Path with same addr exists\n"); goto err; } - sess = kzalloc(sizeof(*sess), GFP_KERNEL); - if (!sess) + srv_path = kzalloc(sizeof(*srv_path), GFP_KERNEL); + if (!srv_path) goto err; - sess->stats = kzalloc(sizeof(*sess->stats), GFP_KERNEL); - if (!sess->stats) + srv_path->stats = kzalloc(sizeof(*srv_path->stats), GFP_KERNEL); + if (!srv_path->stats) goto err_free_sess; - sess->stats->sess = sess; + srv_path->stats->srv_path = srv_path; - sess->dma_addr = kcalloc(srv->queue_depth, sizeof(*sess->dma_addr), - GFP_KERNEL); - if (!sess->dma_addr) + srv_path->dma_addr = kcalloc(srv->queue_depth, + sizeof(*srv_path->dma_addr), + GFP_KERNEL); + if (!srv_path->dma_addr) goto err_free_stats; - sess->s.con = kcalloc(con_num, sizeof(*sess->s.con), GFP_KERNEL); - if (!sess->s.con) + srv_path->s.con = kcalloc(con_num, sizeof(*srv_path->s.con), + GFP_KERNEL); + if (!srv_path->s.con) goto err_free_dma_addr; - sess->state = RTRS_SRV_CONNECTING; - sess->srv = srv; - sess->cur_cq_vector = -1; - sess->s.dst_addr = cm_id->route.addr.dst_addr; - sess->s.src_addr = cm_id->route.addr.src_addr; + srv_path->state = RTRS_SRV_CONNECTING; + srv_path->srv = srv; + srv_path->cur_cq_vector = -1; + srv_path->s.dst_addr = cm_id->route.addr.dst_addr; + srv_path->s.src_addr = cm_id->route.addr.src_addr; /* temporary until receiving session-name from client */ - path.src = &sess->s.src_addr; - path.dst = &sess->s.dst_addr; + path.src = &srv_path->s.src_addr; + path.dst = &srv_path->s.dst_addr; rtrs_addr_to_str(&path, str, sizeof(str)); - strscpy(sess->s.sessname, str, sizeof(sess->s.sessname)); - - sess->s.con_num = con_num; - sess->s.recon_cnt = recon_cnt; - uuid_copy(&sess->s.uuid, uuid); - spin_lock_init(&sess->state_lock); - INIT_WORK(&sess->close_work, rtrs_srv_close_work); - rtrs_srv_init_hb(sess); - - sess->s.dev = rtrs_ib_dev_find_or_add(cm_id->device, &dev_pd); - if (!sess->s.dev) { + strscpy(srv_path->s.sessname, str, sizeof(srv_path->s.sessname)); + + srv_path->s.con_num = con_num; + srv_path->s.irq_con_num = con_num; + srv_path->s.recon_cnt = recon_cnt; + uuid_copy(&srv_path->s.uuid, uuid); + spin_lock_init(&srv_path->state_lock); + INIT_WORK(&srv_path->close_work, rtrs_srv_close_work); + rtrs_srv_init_hb(srv_path); + + srv_path->s.dev = rtrs_ib_dev_find_or_add(cm_id->device, &dev_pd); + if (!srv_path->s.dev) { err = -ENOMEM; goto err_free_con; } - err = map_cont_bufs(sess); + err = map_cont_bufs(srv_path); if (err) goto err_put_dev; - err = rtrs_srv_alloc_ops_ids(sess); + err = rtrs_srv_alloc_ops_ids(srv_path); if (err) goto err_unmap_bufs; - __add_path_to_srv(srv, sess); + __add_path_to_srv(srv, srv_path); - return sess; + return srv_path; err_unmap_bufs: - unmap_cont_bufs(sess); + unmap_cont_bufs(srv_path); err_put_dev: - rtrs_ib_dev_put(sess->s.dev); + rtrs_ib_dev_put(srv_path->s.dev); err_free_con: - kfree(sess->s.con); + kfree(srv_path->s.con); err_free_dma_addr: - kfree(sess->dma_addr); + kfree(srv_path->dma_addr); err_free_stats: - kfree(sess->stats); + kfree(srv_path->stats); err_free_sess: - kfree(sess); + kfree(srv_path); err: return ERR_PTR(err); } @@ -1810,8 +1826,8 @@ static int rtrs_rdma_connect(struct rdma_cm_id *cm_id, size_t len) { struct rtrs_srv_ctx *ctx = cm_id->context; - struct rtrs_srv_sess *sess; - struct rtrs_srv *srv; + struct rtrs_srv_path *srv_path; + struct rtrs_srv_sess *srv; u16 version, con_num, cid; u16 recon_cnt; @@ -1851,16 +1867,16 @@ static int rtrs_rdma_connect(struct rdma_cm_id *cm_id, goto reject_w_err; } mutex_lock(&srv->paths_mutex); - sess = __find_sess(srv, &msg->sess_uuid); - if (sess) { - struct rtrs_sess *s = &sess->s; + srv_path = __find_path(srv, &msg->sess_uuid); + if (srv_path) { + struct rtrs_path *s = &srv_path->s; /* Session already holds a reference */ put_srv(srv); - if (sess->state != RTRS_SRV_CONNECTING) { + if (srv_path->state != RTRS_SRV_CONNECTING) { rtrs_err(s, "Session in wrong state: %s\n", - rtrs_srv_state_str(sess->state)); + rtrs_srv_state_str(srv_path->state)); mutex_unlock(&srv->paths_mutex); goto reject_w_err; } @@ -1880,19 +1896,19 @@ static int rtrs_rdma_connect(struct rdma_cm_id *cm_id, goto reject_w_err; } } else { - sess = __alloc_sess(srv, cm_id, con_num, recon_cnt, + srv_path = __alloc_path(srv, cm_id, con_num, recon_cnt, &msg->sess_uuid); - if (IS_ERR(sess)) { + if (IS_ERR(srv_path)) { mutex_unlock(&srv->paths_mutex); put_srv(srv); - err = PTR_ERR(sess); + err = PTR_ERR(srv_path); pr_err("RTRS server session allocation failed: %d\n", err); goto reject_w_err; } } - err = create_con(sess, cm_id, cid); + err = create_con(srv_path, cm_id, cid); if (err) { - rtrs_err((&sess->s), "create_con(), error %d\n", err); + rtrs_err((&srv_path->s), "create_con(), error %d\n", err); rtrs_rdma_do_reject(cm_id, err); /* * Since session has other connections we follow normal way @@ -1901,9 +1917,9 @@ static int rtrs_rdma_connect(struct rdma_cm_id *cm_id, */ goto close_and_return_err; } - err = rtrs_rdma_do_accept(sess, cm_id); + err = rtrs_rdma_do_accept(srv_path, cm_id); if (err) { - rtrs_err((&sess->s), "rtrs_rdma_do_accept(), error %d\n", err); + rtrs_err((&srv_path->s), "rtrs_rdma_do_accept(), error %d\n", err); rtrs_rdma_do_reject(cm_id, err); /* * Since current connection was successfully added to the @@ -1923,7 +1939,7 @@ reject_w_err: close_and_return_err: mutex_unlock(&srv->paths_mutex); - close_sess(sess); + close_path(srv_path); return err; } @@ -1931,14 +1947,14 @@ close_and_return_err: static int rtrs_srv_rdma_cm_handler(struct rdma_cm_id *cm_id, struct rdma_cm_event *ev) { - struct rtrs_srv_sess *sess = NULL; - struct rtrs_sess *s = NULL; + struct rtrs_srv_path *srv_path = NULL; + struct rtrs_path *s = NULL; if (ev->event != RDMA_CM_EVENT_CONNECT_REQUEST) { struct rtrs_con *c = cm_id->context; - s = c->sess; - sess = to_srv_sess(s); + s = c->path; + srv_path = to_srv_path(s); } switch (ev->event) { @@ -1962,7 +1978,7 @@ static int rtrs_srv_rdma_cm_handler(struct rdma_cm_id *cm_id, case RDMA_CM_EVENT_ADDR_CHANGE: case RDMA_CM_EVENT_TIMEWAIT_EXIT: case RDMA_CM_EVENT_DEVICE_REMOVAL: - close_sess(sess); + close_path(srv_path); break; default: pr_err("Ignoring unexpected CM event %s, err %d\n", @@ -2170,23 +2186,23 @@ struct rtrs_srv_ctx *rtrs_srv_open(struct rtrs_srv_ops *ops, u16 port) } EXPORT_SYMBOL(rtrs_srv_open); -static void close_sessions(struct rtrs_srv *srv) +static void close_paths(struct rtrs_srv_sess *srv) { - struct rtrs_srv_sess *sess; + struct rtrs_srv_path *srv_path; mutex_lock(&srv->paths_mutex); - list_for_each_entry(sess, &srv->paths_list, s.entry) - close_sess(sess); + list_for_each_entry(srv_path, &srv->paths_list, s.entry) + close_path(srv_path); mutex_unlock(&srv->paths_mutex); } static void close_ctx(struct rtrs_srv_ctx *ctx) { - struct rtrs_srv *srv; + struct rtrs_srv_sess *srv; mutex_lock(&ctx->srv_mutex); list_for_each_entry(srv, &ctx->srv_list, ctx_list) - close_sessions(srv); + close_paths(srv); mutex_unlock(&ctx->srv_mutex); flush_workqueue(rtrs_wq); } diff --git a/drivers/infiniband/ulp/rtrs/rtrs-srv.h b/drivers/infiniband/ulp/rtrs/rtrs-srv.h index 9d8d2a91a235..6292e87f6afd 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs-srv.h +++ b/drivers/infiniband/ulp/rtrs/rtrs-srv.h @@ -37,7 +37,7 @@ struct rtrs_srv_stats_rdma_stats { struct rtrs_srv_stats { struct kobject kobj_stats; struct rtrs_srv_stats_rdma_stats rdma_stats; - struct rtrs_srv_sess *sess; + struct rtrs_srv_path *srv_path; }; struct rtrs_srv_con { @@ -71,9 +71,9 @@ struct rtrs_srv_mr { struct rtrs_iu *iu; /* send buffer for new rkey msg */ }; -struct rtrs_srv_sess { - struct rtrs_sess s; - struct rtrs_srv *srv; +struct rtrs_srv_path { + struct rtrs_path s; + struct rtrs_srv_sess *srv; struct work_struct close_work; enum rtrs_srv_state state; spinlock_t state_lock; @@ -90,7 +90,7 @@ struct rtrs_srv_sess { struct rtrs_srv_stats *stats; }; -struct rtrs_srv { +struct rtrs_srv_sess { struct list_head paths_list; int paths_up; struct mutex paths_ev_mutex; @@ -125,7 +125,7 @@ struct rtrs_srv_ib_ctx { extern struct class *rtrs_dev_class; -void close_sess(struct rtrs_srv_sess *sess); +void close_path(struct rtrs_srv_path *srv_path); static inline void rtrs_srv_update_rdma_stats(struct rtrs_srv_stats *s, size_t size, int d) @@ -136,14 +136,13 @@ static inline void rtrs_srv_update_rdma_stats(struct rtrs_srv_stats *s, /* functions which are implemented in rtrs-srv-stats.c */ int rtrs_srv_reset_rdma_stats(struct rtrs_srv_stats *stats, bool enable); -ssize_t rtrs_srv_stats_rdma_to_str(struct rtrs_srv_stats *stats, - char *page, size_t len); +ssize_t rtrs_srv_stats_rdma_to_str(struct rtrs_srv_stats *stats, char *page); int rtrs_srv_reset_all_stats(struct rtrs_srv_stats *stats, bool enable); ssize_t rtrs_srv_reset_all_help(struct rtrs_srv_stats *stats, char *page, size_t len); /* functions which are implemented in rtrs-srv-sysfs.c */ -int rtrs_srv_create_sess_files(struct rtrs_srv_sess *sess); -void rtrs_srv_destroy_sess_files(struct rtrs_srv_sess *sess); +int rtrs_srv_create_path_files(struct rtrs_srv_path *srv_path); +void rtrs_srv_destroy_path_files(struct rtrs_srv_path *srv_path); #endif /* RTRS_SRV_H */ diff --git a/drivers/infiniband/ulp/rtrs/rtrs.c b/drivers/infiniband/ulp/rtrs/rtrs.c index ca542e477d38..4da889103a5f 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs.c +++ b/drivers/infiniband/ulp/rtrs/rtrs.c @@ -69,16 +69,16 @@ EXPORT_SYMBOL_GPL(rtrs_iu_free); int rtrs_iu_post_recv(struct rtrs_con *con, struct rtrs_iu *iu) { - struct rtrs_sess *sess = con->sess; + struct rtrs_path *path = con->path; struct ib_recv_wr wr; struct ib_sge list; list.addr = iu->dma_addr; list.length = iu->size; - list.lkey = sess->dev->ib_pd->local_dma_lkey; + list.lkey = path->dev->ib_pd->local_dma_lkey; if (list.length == 0) { - rtrs_wrn(con->sess, + rtrs_wrn(con->path, "Posting receive work request failed, sg list is empty\n"); return -EINVAL; } @@ -126,7 +126,7 @@ static int rtrs_post_send(struct ib_qp *qp, struct ib_send_wr *head, int rtrs_iu_post_send(struct rtrs_con *con, struct rtrs_iu *iu, size_t size, struct ib_send_wr *head) { - struct rtrs_sess *sess = con->sess; + struct rtrs_path *path = con->path; struct ib_send_wr wr; struct ib_sge list; @@ -135,7 +135,7 @@ int rtrs_iu_post_send(struct rtrs_con *con, struct rtrs_iu *iu, size_t size, list.addr = iu->dma_addr; list.length = size; - list.lkey = sess->dev->ib_pd->local_dma_lkey; + list.lkey = path->dev->ib_pd->local_dma_lkey; wr = (struct ib_send_wr) { .wr_cqe = &iu->cqe, @@ -188,11 +188,11 @@ static int rtrs_post_rdma_write_imm_empty(struct rtrs_con *con, struct ib_send_wr *head) { struct ib_rdma_wr wr; - struct rtrs_sess *sess = con->sess; + struct rtrs_path *path = con->path; enum ib_send_flags sflags; atomic_dec_if_positive(&con->sq_wr_avail); - sflags = (atomic_inc_return(&con->wr_cnt) % sess->signal_interval) ? + sflags = (atomic_inc_return(&con->wr_cnt) % path->signal_interval) ? 0 : IB_SEND_SIGNALED; wr = (struct ib_rdma_wr) { @@ -211,26 +211,36 @@ static void qp_event_handler(struct ib_event *ev, void *ctx) switch (ev->event) { case IB_EVENT_COMM_EST: - rtrs_info(con->sess, "QP event %s (%d) received\n", + rtrs_info(con->path, "QP event %s (%d) received\n", ib_event_msg(ev->event), ev->event); rdma_notify(con->cm_id, IB_EVENT_COMM_EST); break; default: - rtrs_info(con->sess, "Unhandled QP event %s (%d) received\n", + rtrs_info(con->path, "Unhandled QP event %s (%d) received\n", ib_event_msg(ev->event), ev->event); break; } } +static bool is_pollqueue(struct rtrs_con *con) +{ + return con->cid >= con->path->irq_con_num; +} + static int create_cq(struct rtrs_con *con, int cq_vector, int nr_cqe, enum ib_poll_context poll_ctx) { struct rdma_cm_id *cm_id = con->cm_id; struct ib_cq *cq; - cq = ib_cq_pool_get(cm_id->device, nr_cqe, cq_vector, poll_ctx); + if (is_pollqueue(con)) + cq = ib_alloc_cq(cm_id->device, con, nr_cqe, cq_vector, + poll_ctx); + else + cq = ib_cq_pool_get(cm_id->device, nr_cqe, cq_vector, poll_ctx); + if (IS_ERR(cq)) { - rtrs_err(con->sess, "Creating completion queue failed, errno: %ld\n", + rtrs_err(con->path, "Creating completion queue failed, errno: %ld\n", PTR_ERR(cq)); return PTR_ERR(cq); } @@ -261,7 +271,7 @@ static int create_qp(struct rtrs_con *con, struct ib_pd *pd, ret = rdma_create_qp(cm_id, pd, &init_attr); if (ret) { - rtrs_err(con->sess, "Creating QP failed, err: %d\n", ret); + rtrs_err(con->path, "Creating QP failed, err: %d\n", ret); return ret; } con->qp = cm_id->qp; @@ -269,7 +279,18 @@ static int create_qp(struct rtrs_con *con, struct ib_pd *pd, return ret; } -int rtrs_cq_qp_create(struct rtrs_sess *sess, struct rtrs_con *con, +static void destroy_cq(struct rtrs_con *con) +{ + if (con->cq) { + if (is_pollqueue(con)) + ib_free_cq(con->cq); + else + ib_cq_pool_put(con->cq, con->nr_cqe); + } + con->cq = NULL; +} + +int rtrs_cq_qp_create(struct rtrs_path *path, struct rtrs_con *con, u32 max_send_sge, int cq_vector, int nr_cqe, u32 max_send_wr, u32 max_recv_wr, enum ib_poll_context poll_ctx) @@ -280,14 +301,13 @@ int rtrs_cq_qp_create(struct rtrs_sess *sess, struct rtrs_con *con, if (err) return err; - err = create_qp(con, sess->dev->ib_pd, max_send_wr, max_recv_wr, + err = create_qp(con, path->dev->ib_pd, max_send_wr, max_recv_wr, max_send_sge); if (err) { - ib_cq_pool_put(con->cq, con->nr_cqe); - con->cq = NULL; + destroy_cq(con); return err; } - con->sess = sess; + con->path = path; return 0; } @@ -299,31 +319,28 @@ void rtrs_cq_qp_destroy(struct rtrs_con *con) rdma_destroy_qp(con->cm_id); con->qp = NULL; } - if (con->cq) { - ib_cq_pool_put(con->cq, con->nr_cqe); - con->cq = NULL; - } + destroy_cq(con); } EXPORT_SYMBOL_GPL(rtrs_cq_qp_destroy); -static void schedule_hb(struct rtrs_sess *sess) +static void schedule_hb(struct rtrs_path *path) { - queue_delayed_work(sess->hb_wq, &sess->hb_dwork, - msecs_to_jiffies(sess->hb_interval_ms)); + queue_delayed_work(path->hb_wq, &path->hb_dwork, + msecs_to_jiffies(path->hb_interval_ms)); } -void rtrs_send_hb_ack(struct rtrs_sess *sess) +void rtrs_send_hb_ack(struct rtrs_path *path) { - struct rtrs_con *usr_con = sess->con[0]; + struct rtrs_con *usr_con = path->con[0]; u32 imm; int err; imm = rtrs_to_imm(RTRS_HB_ACK_IMM, 0); - err = rtrs_post_rdma_write_imm_empty(usr_con, sess->hb_cqe, imm, + err = rtrs_post_rdma_write_imm_empty(usr_con, path->hb_cqe, imm, NULL); if (err) { - rtrs_err(sess, "send HB ACK failed, errno: %d\n", err); - sess->hb_err_handler(usr_con); + rtrs_err(path, "send HB ACK failed, errno: %d\n", err); + path->hb_err_handler(usr_con); return; } } @@ -332,63 +349,63 @@ EXPORT_SYMBOL_GPL(rtrs_send_hb_ack); static void hb_work(struct work_struct *work) { struct rtrs_con *usr_con; - struct rtrs_sess *sess; + struct rtrs_path *path; u32 imm; int err; - sess = container_of(to_delayed_work(work), typeof(*sess), hb_dwork); - usr_con = sess->con[0]; + path = container_of(to_delayed_work(work), typeof(*path), hb_dwork); + usr_con = path->con[0]; - if (sess->hb_missed_cnt > sess->hb_missed_max) { - rtrs_err(sess, "HB missed max reached.\n"); - sess->hb_err_handler(usr_con); + if (path->hb_missed_cnt > path->hb_missed_max) { + rtrs_err(path, "HB missed max reached.\n"); + path->hb_err_handler(usr_con); return; } - if (sess->hb_missed_cnt++) { + if (path->hb_missed_cnt++) { /* Reschedule work without sending hb */ - schedule_hb(sess); + schedule_hb(path); return; } - sess->hb_last_sent = ktime_get(); + path->hb_last_sent = ktime_get(); imm = rtrs_to_imm(RTRS_HB_MSG_IMM, 0); - err = rtrs_post_rdma_write_imm_empty(usr_con, sess->hb_cqe, imm, + err = rtrs_post_rdma_write_imm_empty(usr_con, path->hb_cqe, imm, NULL); if (err) { - rtrs_err(sess, "HB send failed, errno: %d\n", err); - sess->hb_err_handler(usr_con); + rtrs_err(path, "HB send failed, errno: %d\n", err); + path->hb_err_handler(usr_con); return; } - schedule_hb(sess); + schedule_hb(path); } -void rtrs_init_hb(struct rtrs_sess *sess, struct ib_cqe *cqe, +void rtrs_init_hb(struct rtrs_path *path, struct ib_cqe *cqe, unsigned int interval_ms, unsigned int missed_max, void (*err_handler)(struct rtrs_con *con), struct workqueue_struct *wq) { - sess->hb_cqe = cqe; - sess->hb_interval_ms = interval_ms; - sess->hb_err_handler = err_handler; - sess->hb_wq = wq; - sess->hb_missed_max = missed_max; - sess->hb_missed_cnt = 0; - INIT_DELAYED_WORK(&sess->hb_dwork, hb_work); + path->hb_cqe = cqe; + path->hb_interval_ms = interval_ms; + path->hb_err_handler = err_handler; + path->hb_wq = wq; + path->hb_missed_max = missed_max; + path->hb_missed_cnt = 0; + INIT_DELAYED_WORK(&path->hb_dwork, hb_work); } EXPORT_SYMBOL_GPL(rtrs_init_hb); -void rtrs_start_hb(struct rtrs_sess *sess) +void rtrs_start_hb(struct rtrs_path *path) { - schedule_hb(sess); + schedule_hb(path); } EXPORT_SYMBOL_GPL(rtrs_start_hb); -void rtrs_stop_hb(struct rtrs_sess *sess) +void rtrs_stop_hb(struct rtrs_path *path) { - cancel_delayed_work_sync(&sess->hb_dwork); - sess->hb_missed_cnt = 0; + cancel_delayed_work_sync(&path->hb_dwork); + path->hb_missed_cnt = 0; } EXPORT_SYMBOL_GPL(rtrs_stop_hb); diff --git a/drivers/infiniband/ulp/rtrs/rtrs.h b/drivers/infiniband/ulp/rtrs/rtrs.h index 859c79685daf..5e57a7ccc7fb 100644 --- a/drivers/infiniband/ulp/rtrs/rtrs.h +++ b/drivers/infiniband/ulp/rtrs/rtrs.h @@ -13,9 +13,9 @@ #include <linux/scatterlist.h> struct rtrs_permit; -struct rtrs_clt; +struct rtrs_clt_sess; struct rtrs_srv_ctx; -struct rtrs_srv; +struct rtrs_srv_sess; struct rtrs_srv_op; /* @@ -52,14 +52,14 @@ struct rtrs_clt_ops { void (*link_ev)(void *priv, enum rtrs_clt_link_ev ev); }; -struct rtrs_clt *rtrs_clt_open(struct rtrs_clt_ops *ops, - const char *sessname, +struct rtrs_clt_sess *rtrs_clt_open(struct rtrs_clt_ops *ops, + const char *pathname, const struct rtrs_addr *paths, size_t path_cnt, u16 port, size_t pdu_sz, u8 reconnect_delay_sec, s16 max_reconnect_attempts, u32 nr_poll_queues); -void rtrs_clt_close(struct rtrs_clt *sess); +void rtrs_clt_close(struct rtrs_clt_sess *clt); enum wait_type { RTRS_PERMIT_NOWAIT = 0, @@ -77,11 +77,12 @@ enum rtrs_clt_con_type { RTRS_IO_CON }; -struct rtrs_permit *rtrs_clt_get_permit(struct rtrs_clt *sess, - enum rtrs_clt_con_type con_type, - enum wait_type wait); +struct rtrs_permit *rtrs_clt_get_permit(struct rtrs_clt_sess *sess, + enum rtrs_clt_con_type con_type, + enum wait_type wait); -void rtrs_clt_put_permit(struct rtrs_clt *sess, struct rtrs_permit *permit); +void rtrs_clt_put_permit(struct rtrs_clt_sess *sess, + struct rtrs_permit *permit); /** * rtrs_clt_req_ops - it holds the request confirmation callback @@ -98,10 +99,10 @@ struct rtrs_clt_req_ops { }; int rtrs_clt_request(int dir, struct rtrs_clt_req_ops *ops, - struct rtrs_clt *sess, struct rtrs_permit *permit, + struct rtrs_clt_sess *sess, struct rtrs_permit *permit, const struct kvec *vec, size_t nr, size_t len, struct scatterlist *sg, unsigned int sg_cnt); -int rtrs_clt_rdma_cq_direct(struct rtrs_clt *clt, unsigned int index); +int rtrs_clt_rdma_cq_direct(struct rtrs_clt_sess *clt, unsigned int index); /** * rtrs_attrs - RTRS session attributes @@ -112,7 +113,7 @@ struct rtrs_attrs { u32 max_segments; }; -int rtrs_clt_query(struct rtrs_clt *sess, struct rtrs_attrs *attr); +int rtrs_clt_query(struct rtrs_clt_sess *sess, struct rtrs_attrs *attr); /* * Here goes RTRS server API @@ -163,7 +164,7 @@ struct rtrs_srv_ops { * @priv: Private data from user if previously set with * rtrs_srv_set_sess_priv() */ - int (*link_ev)(struct rtrs_srv *sess, enum rtrs_srv_link_ev ev, + int (*link_ev)(struct rtrs_srv_sess *sess, enum rtrs_srv_link_ev ev, void *priv); }; @@ -173,11 +174,12 @@ void rtrs_srv_close(struct rtrs_srv_ctx *ctx); bool rtrs_srv_resp_rdma(struct rtrs_srv_op *id, int errno); -void rtrs_srv_set_sess_priv(struct rtrs_srv *sess, void *priv); +void rtrs_srv_set_sess_priv(struct rtrs_srv_sess *sess, void *priv); -int rtrs_srv_get_sess_name(struct rtrs_srv *sess, char *sessname, size_t len); +int rtrs_srv_get_path_name(struct rtrs_srv_sess *sess, char *pathname, + size_t len); -int rtrs_srv_get_queue_depth(struct rtrs_srv *sess); +int rtrs_srv_get_queue_depth(struct rtrs_srv_sess *sess); int rtrs_addr_to_sockaddr(const char *str, size_t len, u16 port, struct rtrs_addr *addr); diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 71eda91e810c..e174e853f8a4 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c @@ -1026,10 +1026,17 @@ out: */ static void srp_del_scsi_host_attr(struct Scsi_Host *shost) { - struct device_attribute **attr; + const struct attribute_group **g; + struct attribute **attr; - for (attr = shost->hostt->shost_attrs; attr && *attr; ++attr) - device_remove_file(&shost->shost_dev, *attr); + for (g = shost->hostt->shost_groups; *g; ++g) { + for (attr = (*g)->attrs; *attr; ++attr) { + struct device_attribute *dev_attr = + container_of(*attr, typeof(*dev_attr), attr); + + device_remove_file(&shost->shost_dev, dev_attr); + } + } } static void srp_remove_target(struct srp_target_port *target) @@ -1266,7 +1273,7 @@ static void srp_finish_req(struct srp_rdma_ch *ch, struct srp_request *req, if (scmnd) { srp_free_req(ch, req, scmnd, 0); scmnd->result = result; - scmnd->scsi_done(scmnd); + scsi_done(scmnd); } } @@ -1987,7 +1994,7 @@ static void srp_process_rsp(struct srp_rdma_ch *ch, struct srp_rsp *rsp) srp_free_req(ch, req, scmnd, be32_to_cpu(rsp->req_lim_delta)); - scmnd->scsi_done(scmnd); + scsi_done(scmnd); } } @@ -2239,7 +2246,7 @@ err_iu: err: if (scmnd->result) { - scmnd->scsi_done(scmnd); + scsi_done(scmnd); ret = 0; } else { ret = SCSI_MLQUEUE_HOST_BUSY; @@ -2811,7 +2818,7 @@ static int srp_abort(struct scsi_cmnd *scmnd) if (ret == SUCCESS) { srp_free_req(ch, req, scmnd, 0); scmnd->result = DID_ABORT << 16; - scmnd->scsi_done(scmnd); + scsi_done(scmnd); } return ret; @@ -3050,26 +3057,28 @@ static ssize_t allow_ext_sg_show(struct device *dev, static DEVICE_ATTR_RO(allow_ext_sg); -static struct device_attribute *srp_host_attrs[] = { - &dev_attr_id_ext, - &dev_attr_ioc_guid, - &dev_attr_service_id, - &dev_attr_pkey, - &dev_attr_sgid, - &dev_attr_dgid, - &dev_attr_orig_dgid, - &dev_attr_req_lim, - &dev_attr_zero_req_lim, - &dev_attr_local_ib_port, - &dev_attr_local_ib_device, - &dev_attr_ch_count, - &dev_attr_comp_vector, - &dev_attr_tl_retry_count, - &dev_attr_cmd_sg_entries, - &dev_attr_allow_ext_sg, +static struct attribute *srp_host_attrs[] = { + &dev_attr_id_ext.attr, + &dev_attr_ioc_guid.attr, + &dev_attr_service_id.attr, + &dev_attr_pkey.attr, + &dev_attr_sgid.attr, + &dev_attr_dgid.attr, + &dev_attr_orig_dgid.attr, + &dev_attr_req_lim.attr, + &dev_attr_zero_req_lim.attr, + &dev_attr_local_ib_port.attr, + &dev_attr_local_ib_device.attr, + &dev_attr_ch_count.attr, + &dev_attr_comp_vector.attr, + &dev_attr_tl_retry_count.attr, + &dev_attr_cmd_sg_entries.attr, + &dev_attr_allow_ext_sg.attr, NULL }; +ATTRIBUTE_GROUPS(srp_host); + static struct scsi_host_template srp_template = { .module = THIS_MODULE, .name = "InfiniBand SRP initiator", @@ -3090,7 +3099,7 @@ static struct scsi_host_template srp_template = { .can_queue = SRP_DEFAULT_CMD_SQ_SIZE, .this_id = -1, .cmd_per_lun = SRP_DEFAULT_CMD_SQ_SIZE, - .shost_attrs = srp_host_attrs, + .shost_groups = srp_host_groups, .track_queue_depth = 1, .cmd_size = sizeof(struct srp_request), }; diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c index 3cadf1295417..f86ee1c4b970 100644 --- a/drivers/infiniband/ulp/srpt/ib_srpt.c +++ b/drivers/infiniband/ulp/srpt/ib_srpt.c @@ -3705,47 +3705,17 @@ static struct configfs_attribute *srpt_da_attrs[] = { NULL, }; -static ssize_t srpt_tpg_enable_show(struct config_item *item, char *page) +static int srpt_enable_tpg(struct se_portal_group *se_tpg, bool enable) { - struct se_portal_group *se_tpg = to_tpg(item); struct srpt_port *sport = srpt_tpg_to_sport(se_tpg); - return sysfs_emit(page, "%d\n", sport->enabled); -} - -static ssize_t srpt_tpg_enable_store(struct config_item *item, - const char *page, size_t count) -{ - struct se_portal_group *se_tpg = to_tpg(item); - struct srpt_port *sport = srpt_tpg_to_sport(se_tpg); - unsigned long tmp; - int ret; - - ret = kstrtoul(page, 0, &tmp); - if (ret < 0) { - pr_err("Unable to extract srpt_tpg_store_enable\n"); - return -EINVAL; - } - - if ((tmp != 0) && (tmp != 1)) { - pr_err("Illegal value for srpt_tpg_store_enable: %lu\n", tmp); - return -EINVAL; - } - mutex_lock(&sport->mutex); - srpt_set_enabled(sport, tmp); + srpt_set_enabled(sport, enable); mutex_unlock(&sport->mutex); - return count; + return 0; } -CONFIGFS_ATTR(srpt_tpg_, enable); - -static struct configfs_attribute *srpt_tpg_attrs[] = { - &srpt_tpg_attr_enable, - NULL, -}; - /** * srpt_make_tpg - configfs callback invoked for mkdir /sys/kernel/config/target/$driver/$port/$tpg * @wwn: Corresponds to $driver/$port. @@ -3856,12 +3826,12 @@ static const struct target_core_fabric_ops srpt_template = { .fabric_make_wwn = srpt_make_tport, .fabric_drop_wwn = srpt_drop_tport, .fabric_make_tpg = srpt_make_tpg, + .fabric_enable_tpg = srpt_enable_tpg, .fabric_drop_tpg = srpt_drop_tpg, .fabric_init_nodeacl = srpt_init_nodeacl, .tfc_discovery_attrs = srpt_da_attrs, .tfc_wwn_attrs = srpt_wwn_attrs, - .tfc_tpg_base_attrs = srpt_tpg_attrs, .tfc_tpg_attrib_attrs = srpt_tpg_attrib_attrs, }; |
