2 * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2004 Infinicon Corporation. All rights reserved.
4 * Copyright (c) 2004 Intel Corporation. All rights reserved.
5 * Copyright (c) 2004 Topspin Corporation. All rights reserved.
6 * Copyright (c) 2004 Voltaire Corporation. All rights reserved.
7 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
8 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
10 * This software is available to you under a choice of one of two
11 * licenses. You may choose to be licensed under the terms of the GNU
12 * General Public License (GPL) Version 2, available from the file
13 * COPYING in the main directory of this source tree, or the
14 * OpenIB.org BSD license below:
16 * Redistribution and use in source and binary forms, with or
17 * without modification, are permitted provided that the following
20 * - Redistributions of source code must retain the above
21 * copyright notice, this list of conditions and the following
24 * - Redistributions in binary form must reproduce the above
25 * copyright notice, this list of conditions and the following
26 * disclaimer in the documentation and/or other materials
27 * provided with the distribution.
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
30 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
31 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
32 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
33 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
34 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
35 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
39 #include <linux/errno.h>
40 #include <linux/err.h>
41 #include <linux/export.h>
42 #include <linux/string.h>
43 #include <linux/slab.h>
45 #include <linux/in6.h>
46 #include <net/addrconf.h>
47 #include <linux/security.h>
49 #include <rdma/ib_verbs.h>
50 #include <rdma/ib_cache.h>
51 #include <rdma/ib_addr.h>
55 #include "core_priv.h"
56 #include <trace/events/rdma_core.h>
58 static int ib_resolve_eth_dmac(struct ib_device *device,
59 struct rdma_ah_attr *ah_attr);
61 static const char * const ib_events[] = {
62 [IB_EVENT_CQ_ERR] = "CQ error",
63 [IB_EVENT_QP_FATAL] = "QP fatal error",
64 [IB_EVENT_QP_REQ_ERR] = "QP request error",
65 [IB_EVENT_QP_ACCESS_ERR] = "QP access error",
66 [IB_EVENT_COMM_EST] = "communication established",
67 [IB_EVENT_SQ_DRAINED] = "send queue drained",
68 [IB_EVENT_PATH_MIG] = "path migration successful",
69 [IB_EVENT_PATH_MIG_ERR] = "path migration error",
70 [IB_EVENT_DEVICE_FATAL] = "device fatal error",
71 [IB_EVENT_PORT_ACTIVE] = "port active",
72 [IB_EVENT_PORT_ERR] = "port error",
73 [IB_EVENT_LID_CHANGE] = "LID change",
74 [IB_EVENT_PKEY_CHANGE] = "P_key change",
75 [IB_EVENT_SM_CHANGE] = "SM change",
76 [IB_EVENT_SRQ_ERR] = "SRQ error",
77 [IB_EVENT_SRQ_LIMIT_REACHED] = "SRQ limit reached",
78 [IB_EVENT_QP_LAST_WQE_REACHED] = "last WQE reached",
79 [IB_EVENT_CLIENT_REREGISTER] = "client reregister",
80 [IB_EVENT_GID_CHANGE] = "GID changed",
83 const char *__attribute_const__ ib_event_msg(enum ib_event_type event)
87 return (index < ARRAY_SIZE(ib_events) && ib_events[index]) ?
88 ib_events[index] : "unrecognized event";
90 EXPORT_SYMBOL(ib_event_msg);
92 static const char * const wc_statuses[] = {
93 [IB_WC_SUCCESS] = "success",
94 [IB_WC_LOC_LEN_ERR] = "local length error",
95 [IB_WC_LOC_QP_OP_ERR] = "local QP operation error",
96 [IB_WC_LOC_EEC_OP_ERR] = "local EE context operation error",
97 [IB_WC_LOC_PROT_ERR] = "local protection error",
98 [IB_WC_WR_FLUSH_ERR] = "WR flushed",
99 [IB_WC_MW_BIND_ERR] = "memory management operation error",
100 [IB_WC_BAD_RESP_ERR] = "bad response error",
101 [IB_WC_LOC_ACCESS_ERR] = "local access error",
102 [IB_WC_REM_INV_REQ_ERR] = "invalid request error",
103 [IB_WC_REM_ACCESS_ERR] = "remote access error",
104 [IB_WC_REM_OP_ERR] = "remote operation error",
105 [IB_WC_RETRY_EXC_ERR] = "transport retry counter exceeded",
106 [IB_WC_RNR_RETRY_EXC_ERR] = "RNR retry counter exceeded",
107 [IB_WC_LOC_RDD_VIOL_ERR] = "local RDD violation error",
108 [IB_WC_REM_INV_RD_REQ_ERR] = "remote invalid RD request",
109 [IB_WC_REM_ABORT_ERR] = "operation aborted",
110 [IB_WC_INV_EECN_ERR] = "invalid EE context number",
111 [IB_WC_INV_EEC_STATE_ERR] = "invalid EE context state",
112 [IB_WC_FATAL_ERR] = "fatal error",
113 [IB_WC_RESP_TIMEOUT_ERR] = "response timeout error",
114 [IB_WC_GENERAL_ERR] = "general error",
117 const char *__attribute_const__ ib_wc_status_msg(enum ib_wc_status status)
119 size_t index = status;
121 return (index < ARRAY_SIZE(wc_statuses) && wc_statuses[index]) ?
122 wc_statuses[index] : "unrecognized status";
124 EXPORT_SYMBOL(ib_wc_status_msg);
126 __attribute_const__ int ib_rate_to_mult(enum ib_rate rate)
129 case IB_RATE_2_5_GBPS: return 1;
130 case IB_RATE_5_GBPS: return 2;
131 case IB_RATE_10_GBPS: return 4;
132 case IB_RATE_20_GBPS: return 8;
133 case IB_RATE_30_GBPS: return 12;
134 case IB_RATE_40_GBPS: return 16;
135 case IB_RATE_60_GBPS: return 24;
136 case IB_RATE_80_GBPS: return 32;
137 case IB_RATE_120_GBPS: return 48;
138 case IB_RATE_14_GBPS: return 6;
139 case IB_RATE_56_GBPS: return 22;
140 case IB_RATE_112_GBPS: return 45;
141 case IB_RATE_168_GBPS: return 67;
142 case IB_RATE_25_GBPS: return 10;
143 case IB_RATE_100_GBPS: return 40;
144 case IB_RATE_200_GBPS: return 80;
145 case IB_RATE_300_GBPS: return 120;
146 case IB_RATE_28_GBPS: return 11;
147 case IB_RATE_50_GBPS: return 20;
148 case IB_RATE_400_GBPS: return 160;
149 case IB_RATE_600_GBPS: return 240;
153 EXPORT_SYMBOL(ib_rate_to_mult);
155 __attribute_const__ enum ib_rate mult_to_ib_rate(int mult)
158 case 1: return IB_RATE_2_5_GBPS;
159 case 2: return IB_RATE_5_GBPS;
160 case 4: return IB_RATE_10_GBPS;
161 case 8: return IB_RATE_20_GBPS;
162 case 12: return IB_RATE_30_GBPS;
163 case 16: return IB_RATE_40_GBPS;
164 case 24: return IB_RATE_60_GBPS;
165 case 32: return IB_RATE_80_GBPS;
166 case 48: return IB_RATE_120_GBPS;
167 case 6: return IB_RATE_14_GBPS;
168 case 22: return IB_RATE_56_GBPS;
169 case 45: return IB_RATE_112_GBPS;
170 case 67: return IB_RATE_168_GBPS;
171 case 10: return IB_RATE_25_GBPS;
172 case 40: return IB_RATE_100_GBPS;
173 case 80: return IB_RATE_200_GBPS;
174 case 120: return IB_RATE_300_GBPS;
175 case 11: return IB_RATE_28_GBPS;
176 case 20: return IB_RATE_50_GBPS;
177 case 160: return IB_RATE_400_GBPS;
178 case 240: return IB_RATE_600_GBPS;
179 default: return IB_RATE_PORT_CURRENT;
182 EXPORT_SYMBOL(mult_to_ib_rate);
184 __attribute_const__ int ib_rate_to_mbps(enum ib_rate rate)
187 case IB_RATE_2_5_GBPS: return 2500;
188 case IB_RATE_5_GBPS: return 5000;
189 case IB_RATE_10_GBPS: return 10000;
190 case IB_RATE_20_GBPS: return 20000;
191 case IB_RATE_30_GBPS: return 30000;
192 case IB_RATE_40_GBPS: return 40000;
193 case IB_RATE_60_GBPS: return 60000;
194 case IB_RATE_80_GBPS: return 80000;
195 case IB_RATE_120_GBPS: return 120000;
196 case IB_RATE_14_GBPS: return 14062;
197 case IB_RATE_56_GBPS: return 56250;
198 case IB_RATE_112_GBPS: return 112500;
199 case IB_RATE_168_GBPS: return 168750;
200 case IB_RATE_25_GBPS: return 25781;
201 case IB_RATE_100_GBPS: return 103125;
202 case IB_RATE_200_GBPS: return 206250;
203 case IB_RATE_300_GBPS: return 309375;
204 case IB_RATE_28_GBPS: return 28125;
205 case IB_RATE_50_GBPS: return 53125;
206 case IB_RATE_400_GBPS: return 425000;
207 case IB_RATE_600_GBPS: return 637500;
211 EXPORT_SYMBOL(ib_rate_to_mbps);
213 __attribute_const__ enum rdma_transport_type
214 rdma_node_get_transport(unsigned int node_type)
217 if (node_type == RDMA_NODE_USNIC)
218 return RDMA_TRANSPORT_USNIC;
219 if (node_type == RDMA_NODE_USNIC_UDP)
220 return RDMA_TRANSPORT_USNIC_UDP;
221 if (node_type == RDMA_NODE_RNIC)
222 return RDMA_TRANSPORT_IWARP;
223 if (node_type == RDMA_NODE_UNSPECIFIED)
224 return RDMA_TRANSPORT_UNSPECIFIED;
226 return RDMA_TRANSPORT_IB;
228 EXPORT_SYMBOL(rdma_node_get_transport);
230 enum rdma_link_layer rdma_port_get_link_layer(struct ib_device *device, u8 port_num)
232 enum rdma_transport_type lt;
233 if (device->ops.get_link_layer)
234 return device->ops.get_link_layer(device, port_num);
236 lt = rdma_node_get_transport(device->node_type);
237 if (lt == RDMA_TRANSPORT_IB)
238 return IB_LINK_LAYER_INFINIBAND;
240 return IB_LINK_LAYER_ETHERNET;
242 EXPORT_SYMBOL(rdma_port_get_link_layer);
244 /* Protection domains */
247 * ib_alloc_pd - Allocates an unused protection domain.
248 * @device: The device on which to allocate the protection domain.
249 * @flags: protection domain flags
250 * @caller: caller's build-time module name
252 * A protection domain object provides an association between QPs, shared
253 * receive queues, address handles, memory regions, and memory windows.
255 * Every PD has a local_dma_lkey which can be used as the lkey value for local
258 struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
262 int mr_access_flags = 0;
265 pd = rdma_zalloc_drv_obj(device, ib_pd);
267 return ERR_PTR(-ENOMEM);
271 pd->__internal_mr = NULL;
272 atomic_set(&pd->usecnt, 0);
275 pd->res.type = RDMA_RESTRACK_PD;
276 rdma_restrack_set_task(&pd->res, caller);
278 ret = device->ops.alloc_pd(pd, NULL);
283 rdma_restrack_kadd(&pd->res);
285 if (device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY)
286 pd->local_dma_lkey = device->local_dma_lkey;
288 mr_access_flags |= IB_ACCESS_LOCAL_WRITE;
290 if (flags & IB_PD_UNSAFE_GLOBAL_RKEY) {
291 pr_warn("%s: enabling unsafe global rkey\n", caller);
292 mr_access_flags |= IB_ACCESS_REMOTE_READ | IB_ACCESS_REMOTE_WRITE;
295 if (mr_access_flags) {
298 mr = pd->device->ops.get_dma_mr(pd, mr_access_flags);
304 mr->device = pd->device;
306 mr->type = IB_MR_TYPE_DMA;
308 mr->need_inval = false;
310 pd->__internal_mr = mr;
312 if (!(device->attrs.device_cap_flags & IB_DEVICE_LOCAL_DMA_LKEY))
313 pd->local_dma_lkey = pd->__internal_mr->lkey;
315 if (flags & IB_PD_UNSAFE_GLOBAL_RKEY)
316 pd->unsafe_global_rkey = pd->__internal_mr->rkey;
321 EXPORT_SYMBOL(__ib_alloc_pd);
324 * ib_dealloc_pd_user - Deallocates a protection domain.
325 * @pd: The protection domain to deallocate.
326 * @udata: Valid user data or NULL for kernel object
328 * It is an error to call this function while any resources in the pd still
329 * exist. The caller is responsible to synchronously destroy them and
330 * guarantee no new allocations will happen.
332 void ib_dealloc_pd_user(struct ib_pd *pd, struct ib_udata *udata)
336 if (pd->__internal_mr) {
337 ret = pd->device->ops.dereg_mr(pd->__internal_mr, NULL);
339 pd->__internal_mr = NULL;
342 /* uverbs manipulates usecnt with proper locking, while the kabi
343 requires the caller to guarantee we can't race here. */
344 WARN_ON(atomic_read(&pd->usecnt));
346 rdma_restrack_del(&pd->res);
347 pd->device->ops.dealloc_pd(pd, udata);
350 EXPORT_SYMBOL(ib_dealloc_pd_user);
352 /* Address handles */
355 * rdma_copy_ah_attr - Copy rdma ah attribute from source to destination.
356 * @dest: Pointer to destination ah_attr. Contents of the destination
357 * pointer is assumed to be invalid and attribute are overwritten.
358 * @src: Pointer to source ah_attr.
360 void rdma_copy_ah_attr(struct rdma_ah_attr *dest,
361 const struct rdma_ah_attr *src)
364 if (dest->grh.sgid_attr)
365 rdma_hold_gid_attr(dest->grh.sgid_attr);
367 EXPORT_SYMBOL(rdma_copy_ah_attr);
370 * rdma_replace_ah_attr - Replace valid ah_attr with new new one.
371 * @old: Pointer to existing ah_attr which needs to be replaced.
372 * old is assumed to be valid or zero'd
373 * @new: Pointer to the new ah_attr.
375 * rdma_replace_ah_attr() first releases any reference in the old ah_attr if
376 * old the ah_attr is valid; after that it copies the new attribute and holds
377 * the reference to the replaced ah_attr.
379 void rdma_replace_ah_attr(struct rdma_ah_attr *old,
380 const struct rdma_ah_attr *new)
382 rdma_destroy_ah_attr(old);
384 if (old->grh.sgid_attr)
385 rdma_hold_gid_attr(old->grh.sgid_attr);
387 EXPORT_SYMBOL(rdma_replace_ah_attr);
390 * rdma_move_ah_attr - Move ah_attr pointed by source to destination.
391 * @dest: Pointer to destination ah_attr to copy to.
392 * dest is assumed to be valid or zero'd
393 * @src: Pointer to the new ah_attr.
395 * rdma_move_ah_attr() first releases any reference in the destination ah_attr
396 * if it is valid. This also transfers ownership of internal references from
397 * src to dest, making src invalid in the process. No new reference of the src
400 void rdma_move_ah_attr(struct rdma_ah_attr *dest, struct rdma_ah_attr *src)
402 rdma_destroy_ah_attr(dest);
404 src->grh.sgid_attr = NULL;
406 EXPORT_SYMBOL(rdma_move_ah_attr);
409 * Validate that the rdma_ah_attr is valid for the device before passing it
412 static int rdma_check_ah_attr(struct ib_device *device,
413 struct rdma_ah_attr *ah_attr)
415 if (!rdma_is_port_valid(device, ah_attr->port_num))
418 if ((rdma_is_grh_required(device, ah_attr->port_num) ||
419 ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) &&
420 !(ah_attr->ah_flags & IB_AH_GRH))
423 if (ah_attr->grh.sgid_attr) {
425 * Make sure the passed sgid_attr is consistent with the
428 if (ah_attr->grh.sgid_attr->index != ah_attr->grh.sgid_index ||
429 ah_attr->grh.sgid_attr->port_num != ah_attr->port_num)
436 * If the ah requires a GRH then ensure that sgid_attr pointer is filled in.
437 * On success the caller is responsible to call rdma_unfill_sgid_attr().
439 static int rdma_fill_sgid_attr(struct ib_device *device,
440 struct rdma_ah_attr *ah_attr,
441 const struct ib_gid_attr **old_sgid_attr)
443 const struct ib_gid_attr *sgid_attr;
444 struct ib_global_route *grh;
447 *old_sgid_attr = ah_attr->grh.sgid_attr;
449 ret = rdma_check_ah_attr(device, ah_attr);
453 if (!(ah_attr->ah_flags & IB_AH_GRH))
456 grh = rdma_ah_retrieve_grh(ah_attr);
461 rdma_get_gid_attr(device, ah_attr->port_num, grh->sgid_index);
462 if (IS_ERR(sgid_attr))
463 return PTR_ERR(sgid_attr);
465 /* Move ownerhip of the kref into the ah_attr */
466 grh->sgid_attr = sgid_attr;
470 static void rdma_unfill_sgid_attr(struct rdma_ah_attr *ah_attr,
471 const struct ib_gid_attr *old_sgid_attr)
474 * Fill didn't change anything, the caller retains ownership of
477 if (ah_attr->grh.sgid_attr == old_sgid_attr)
481 * Otherwise, we need to undo what rdma_fill_sgid_attr so the caller
482 * doesn't see any change in the rdma_ah_attr. If we get here
483 * old_sgid_attr is NULL.
485 rdma_destroy_ah_attr(ah_attr);
488 static const struct ib_gid_attr *
489 rdma_update_sgid_attr(struct rdma_ah_attr *ah_attr,
490 const struct ib_gid_attr *old_attr)
493 rdma_put_gid_attr(old_attr);
494 if (ah_attr->ah_flags & IB_AH_GRH) {
495 rdma_hold_gid_attr(ah_attr->grh.sgid_attr);
496 return ah_attr->grh.sgid_attr;
501 static struct ib_ah *_rdma_create_ah(struct ib_pd *pd,
502 struct rdma_ah_attr *ah_attr,
504 struct ib_udata *udata,
505 struct net_device *xmit_slave)
507 struct rdma_ah_init_attr init_attr = {};
508 struct ib_device *device = pd->device;
512 might_sleep_if(flags & RDMA_CREATE_AH_SLEEPABLE);
514 if (!device->ops.create_ah)
515 return ERR_PTR(-EOPNOTSUPP);
517 ah = rdma_zalloc_drv_obj_gfp(
519 (flags & RDMA_CREATE_AH_SLEEPABLE) ? GFP_KERNEL : GFP_ATOMIC);
521 return ERR_PTR(-ENOMEM);
525 ah->type = ah_attr->type;
526 ah->sgid_attr = rdma_update_sgid_attr(ah_attr, NULL);
527 init_attr.ah_attr = ah_attr;
528 init_attr.flags = flags;
529 init_attr.xmit_slave = xmit_slave;
531 ret = device->ops.create_ah(ah, &init_attr, udata);
537 atomic_inc(&pd->usecnt);
542 * rdma_create_ah - Creates an address handle for the
543 * given address vector.
544 * @pd: The protection domain associated with the address handle.
545 * @ah_attr: The attributes of the address vector.
546 * @flags: Create address handle flags (see enum rdma_create_ah_flags).
548 * It returns 0 on success and returns appropriate error code on error.
549 * The address handle is used to reference a local or global destination
550 * in all UD QP post sends.
552 struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr,
555 const struct ib_gid_attr *old_sgid_attr;
556 struct net_device *slave;
560 ret = rdma_fill_sgid_attr(pd->device, ah_attr, &old_sgid_attr);
563 slave = rdma_lag_get_ah_roce_slave(pd->device, ah_attr,
564 (flags & RDMA_CREATE_AH_SLEEPABLE) ?
565 GFP_KERNEL : GFP_ATOMIC);
567 rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
568 return (void *)slave;
570 ah = _rdma_create_ah(pd, ah_attr, flags, NULL, slave);
571 rdma_lag_put_ah_roce_slave(slave);
572 rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
575 EXPORT_SYMBOL(rdma_create_ah);
578 * rdma_create_user_ah - Creates an address handle for the
579 * given address vector.
580 * It resolves destination mac address for ah attribute of RoCE type.
581 * @pd: The protection domain associated with the address handle.
582 * @ah_attr: The attributes of the address vector.
583 * @udata: pointer to user's input output buffer information need by
586 * It returns 0 on success and returns appropriate error code on error.
587 * The address handle is used to reference a local or global destination
588 * in all UD QP post sends.
590 struct ib_ah *rdma_create_user_ah(struct ib_pd *pd,
591 struct rdma_ah_attr *ah_attr,
592 struct ib_udata *udata)
594 const struct ib_gid_attr *old_sgid_attr;
598 err = rdma_fill_sgid_attr(pd->device, ah_attr, &old_sgid_attr);
602 if (ah_attr->type == RDMA_AH_ATTR_TYPE_ROCE) {
603 err = ib_resolve_eth_dmac(pd->device, ah_attr);
610 ah = _rdma_create_ah(pd, ah_attr, RDMA_CREATE_AH_SLEEPABLE,
614 rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
617 EXPORT_SYMBOL(rdma_create_user_ah);
619 int ib_get_rdma_header_version(const union rdma_network_hdr *hdr)
621 const struct iphdr *ip4h = (struct iphdr *)&hdr->roce4grh;
622 struct iphdr ip4h_checked;
623 const struct ipv6hdr *ip6h = (struct ipv6hdr *)&hdr->ibgrh;
625 /* If it's IPv6, the version must be 6, otherwise, the first
626 * 20 bytes (before the IPv4 header) are garbled.
628 if (ip6h->version != 6)
629 return (ip4h->version == 4) ? 4 : 0;
630 /* version may be 6 or 4 because the first 20 bytes could be garbled */
632 /* RoCE v2 requires no options, thus header length
639 * We can't write on scattered buffers so we need to copy to
642 memcpy(&ip4h_checked, ip4h, sizeof(ip4h_checked));
643 ip4h_checked.check = 0;
644 ip4h_checked.check = ip_fast_csum((u8 *)&ip4h_checked, 5);
645 /* if IPv4 header checksum is OK, believe it */
646 if (ip4h->check == ip4h_checked.check)
650 EXPORT_SYMBOL(ib_get_rdma_header_version);
652 static enum rdma_network_type ib_get_net_type_by_grh(struct ib_device *device,
654 const struct ib_grh *grh)
658 if (rdma_protocol_ib(device, port_num))
659 return RDMA_NETWORK_IB;
661 grh_version = ib_get_rdma_header_version((union rdma_network_hdr *)grh);
663 if (grh_version == 4)
664 return RDMA_NETWORK_IPV4;
666 if (grh->next_hdr == IPPROTO_UDP)
667 return RDMA_NETWORK_IPV6;
669 return RDMA_NETWORK_ROCE_V1;
672 struct find_gid_index_context {
674 enum ib_gid_type gid_type;
677 static bool find_gid_index(const union ib_gid *gid,
678 const struct ib_gid_attr *gid_attr,
681 struct find_gid_index_context *ctx = context;
682 u16 vlan_id = 0xffff;
685 if (ctx->gid_type != gid_attr->gid_type)
688 ret = rdma_read_gid_l2_fields(gid_attr, &vlan_id, NULL);
692 return ctx->vlan_id == vlan_id;
695 static const struct ib_gid_attr *
696 get_sgid_attr_from_eth(struct ib_device *device, u8 port_num,
697 u16 vlan_id, const union ib_gid *sgid,
698 enum ib_gid_type gid_type)
700 struct find_gid_index_context context = {.vlan_id = vlan_id,
701 .gid_type = gid_type};
703 return rdma_find_gid_by_filter(device, sgid, port_num, find_gid_index,
707 int ib_get_gids_from_rdma_hdr(const union rdma_network_hdr *hdr,
708 enum rdma_network_type net_type,
709 union ib_gid *sgid, union ib_gid *dgid)
711 struct sockaddr_in src_in;
712 struct sockaddr_in dst_in;
713 __be32 src_saddr, dst_saddr;
718 if (net_type == RDMA_NETWORK_IPV4) {
719 memcpy(&src_in.sin_addr.s_addr,
720 &hdr->roce4grh.saddr, 4);
721 memcpy(&dst_in.sin_addr.s_addr,
722 &hdr->roce4grh.daddr, 4);
723 src_saddr = src_in.sin_addr.s_addr;
724 dst_saddr = dst_in.sin_addr.s_addr;
725 ipv6_addr_set_v4mapped(src_saddr,
726 (struct in6_addr *)sgid);
727 ipv6_addr_set_v4mapped(dst_saddr,
728 (struct in6_addr *)dgid);
730 } else if (net_type == RDMA_NETWORK_IPV6 ||
731 net_type == RDMA_NETWORK_IB) {
732 *dgid = hdr->ibgrh.dgid;
733 *sgid = hdr->ibgrh.sgid;
739 EXPORT_SYMBOL(ib_get_gids_from_rdma_hdr);
741 /* Resolve destination mac address and hop limit for unicast destination
742 * GID entry, considering the source GID entry as well.
743 * ah_attribute must have have valid port_num, sgid_index.
745 static int ib_resolve_unicast_gid_dmac(struct ib_device *device,
746 struct rdma_ah_attr *ah_attr)
748 struct ib_global_route *grh = rdma_ah_retrieve_grh(ah_attr);
749 const struct ib_gid_attr *sgid_attr = grh->sgid_attr;
750 int hop_limit = 0xff;
753 /* If destination is link local and source GID is RoCEv1,
754 * IP stack is not used.
756 if (rdma_link_local_addr((struct in6_addr *)grh->dgid.raw) &&
757 sgid_attr->gid_type == IB_GID_TYPE_ROCE) {
758 rdma_get_ll_mac((struct in6_addr *)grh->dgid.raw,
763 ret = rdma_addr_find_l2_eth_by_grh(&sgid_attr->gid, &grh->dgid,
765 sgid_attr, &hop_limit);
767 grh->hop_limit = hop_limit;
772 * This function initializes address handle attributes from the incoming packet.
773 * Incoming packet has dgid of the receiver node on which this code is
774 * getting executed and, sgid contains the GID of the sender.
776 * When resolving mac address of destination, the arrived dgid is used
777 * as sgid and, sgid is used as dgid because sgid contains destinations
778 * GID whom to respond to.
780 * On success the caller is responsible to call rdma_destroy_ah_attr on the
783 int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num,
784 const struct ib_wc *wc, const struct ib_grh *grh,
785 struct rdma_ah_attr *ah_attr)
789 enum rdma_network_type net_type = RDMA_NETWORK_IB;
790 enum ib_gid_type gid_type = IB_GID_TYPE_IB;
791 const struct ib_gid_attr *sgid_attr;
798 memset(ah_attr, 0, sizeof *ah_attr);
799 ah_attr->type = rdma_ah_find_type(device, port_num);
800 if (rdma_cap_eth_ah(device, port_num)) {
801 if (wc->wc_flags & IB_WC_WITH_NETWORK_HDR_TYPE)
802 net_type = wc->network_hdr_type;
804 net_type = ib_get_net_type_by_grh(device, port_num, grh);
805 gid_type = ib_network_to_gid_type(net_type);
807 ret = ib_get_gids_from_rdma_hdr((union rdma_network_hdr *)grh, net_type,
812 rdma_ah_set_sl(ah_attr, wc->sl);
813 rdma_ah_set_port_num(ah_attr, port_num);
815 if (rdma_protocol_roce(device, port_num)) {
816 u16 vlan_id = wc->wc_flags & IB_WC_WITH_VLAN ?
817 wc->vlan_id : 0xffff;
819 if (!(wc->wc_flags & IB_WC_GRH))
822 sgid_attr = get_sgid_attr_from_eth(device, port_num,
825 if (IS_ERR(sgid_attr))
826 return PTR_ERR(sgid_attr);
828 flow_class = be32_to_cpu(grh->version_tclass_flow);
829 rdma_move_grh_sgid_attr(ah_attr,
831 flow_class & 0xFFFFF,
833 (flow_class >> 20) & 0xFF,
836 ret = ib_resolve_unicast_gid_dmac(device, ah_attr);
838 rdma_destroy_ah_attr(ah_attr);
842 rdma_ah_set_dlid(ah_attr, wc->slid);
843 rdma_ah_set_path_bits(ah_attr, wc->dlid_path_bits);
845 if ((wc->wc_flags & IB_WC_GRH) == 0)
848 if (dgid.global.interface_id !=
849 cpu_to_be64(IB_SA_WELL_KNOWN_GUID)) {
850 sgid_attr = rdma_find_gid_by_port(
851 device, &dgid, IB_GID_TYPE_IB, port_num, NULL);
853 sgid_attr = rdma_get_gid_attr(device, port_num, 0);
855 if (IS_ERR(sgid_attr))
856 return PTR_ERR(sgid_attr);
857 flow_class = be32_to_cpu(grh->version_tclass_flow);
858 rdma_move_grh_sgid_attr(ah_attr,
860 flow_class & 0xFFFFF,
862 (flow_class >> 20) & 0xFF,
868 EXPORT_SYMBOL(ib_init_ah_attr_from_wc);
871 * rdma_move_grh_sgid_attr - Sets the sgid attribute of GRH, taking ownership
874 * @attr: Pointer to AH attribute structure
875 * @dgid: Destination GID
876 * @flow_label: Flow label
877 * @hop_limit: Hop limit
878 * @traffic_class: traffic class
879 * @sgid_attr: Pointer to SGID attribute
881 * This takes ownership of the sgid_attr reference. The caller must ensure
882 * rdma_destroy_ah_attr() is called before destroying the rdma_ah_attr after
883 * calling this function.
885 void rdma_move_grh_sgid_attr(struct rdma_ah_attr *attr, union ib_gid *dgid,
886 u32 flow_label, u8 hop_limit, u8 traffic_class,
887 const struct ib_gid_attr *sgid_attr)
889 rdma_ah_set_grh(attr, dgid, flow_label, sgid_attr->index, hop_limit,
891 attr->grh.sgid_attr = sgid_attr;
893 EXPORT_SYMBOL(rdma_move_grh_sgid_attr);
896 * rdma_destroy_ah_attr - Release reference to SGID attribute of
898 * @ah_attr: Pointer to ah attribute
900 * Release reference to the SGID attribute of the ah attribute if it is
901 * non NULL. It is safe to call this multiple times, and safe to call it on
902 * a zero initialized ah_attr.
904 void rdma_destroy_ah_attr(struct rdma_ah_attr *ah_attr)
906 if (ah_attr->grh.sgid_attr) {
907 rdma_put_gid_attr(ah_attr->grh.sgid_attr);
908 ah_attr->grh.sgid_attr = NULL;
911 EXPORT_SYMBOL(rdma_destroy_ah_attr);
913 struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
914 const struct ib_grh *grh, u8 port_num)
916 struct rdma_ah_attr ah_attr;
920 ret = ib_init_ah_attr_from_wc(pd->device, port_num, wc, grh, &ah_attr);
924 ah = rdma_create_ah(pd, &ah_attr, RDMA_CREATE_AH_SLEEPABLE);
926 rdma_destroy_ah_attr(&ah_attr);
929 EXPORT_SYMBOL(ib_create_ah_from_wc);
931 int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr)
933 const struct ib_gid_attr *old_sgid_attr;
936 if (ah->type != ah_attr->type)
939 ret = rdma_fill_sgid_attr(ah->device, ah_attr, &old_sgid_attr);
943 ret = ah->device->ops.modify_ah ?
944 ah->device->ops.modify_ah(ah, ah_attr) :
947 ah->sgid_attr = rdma_update_sgid_attr(ah_attr, ah->sgid_attr);
948 rdma_unfill_sgid_attr(ah_attr, old_sgid_attr);
951 EXPORT_SYMBOL(rdma_modify_ah);
953 int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr)
955 ah_attr->grh.sgid_attr = NULL;
957 return ah->device->ops.query_ah ?
958 ah->device->ops.query_ah(ah, ah_attr) :
961 EXPORT_SYMBOL(rdma_query_ah);
963 int rdma_destroy_ah_user(struct ib_ah *ah, u32 flags, struct ib_udata *udata)
965 const struct ib_gid_attr *sgid_attr = ah->sgid_attr;
968 might_sleep_if(flags & RDMA_DESTROY_AH_SLEEPABLE);
972 ah->device->ops.destroy_ah(ah, flags);
973 atomic_dec(&pd->usecnt);
975 rdma_put_gid_attr(sgid_attr);
980 EXPORT_SYMBOL(rdma_destroy_ah_user);
982 /* Shared receive queues */
985 * ib_create_srq_user - Creates a SRQ associated with the specified protection
987 * @pd: The protection domain associated with the SRQ.
988 * @srq_init_attr: A list of initial attributes required to create the
989 * SRQ. If SRQ creation succeeds, then the attributes are updated to
990 * the actual capabilities of the created SRQ.
991 * @uobject: uobject pointer if this is not a kernel SRQ
992 * @udata: udata pointer if this is not a kernel SRQ
994 * srq_attr->max_wr and srq_attr->max_sge are read the determine the
995 * requested size of the SRQ, and set to the actual values allocated
996 * on return. If ib_create_srq() succeeds, then max_wr and max_sge
997 * will always be at least as large as the requested values.
999 struct ib_srq *ib_create_srq_user(struct ib_pd *pd,
1000 struct ib_srq_init_attr *srq_init_attr,
1001 struct ib_usrq_object *uobject,
1002 struct ib_udata *udata)
1007 srq = rdma_zalloc_drv_obj(pd->device, ib_srq);
1009 return ERR_PTR(-ENOMEM);
1011 srq->device = pd->device;
1013 srq->event_handler = srq_init_attr->event_handler;
1014 srq->srq_context = srq_init_attr->srq_context;
1015 srq->srq_type = srq_init_attr->srq_type;
1016 srq->uobject = uobject;
1018 if (ib_srq_has_cq(srq->srq_type)) {
1019 srq->ext.cq = srq_init_attr->ext.cq;
1020 atomic_inc(&srq->ext.cq->usecnt);
1022 if (srq->srq_type == IB_SRQT_XRC) {
1023 srq->ext.xrc.xrcd = srq_init_attr->ext.xrc.xrcd;
1024 atomic_inc(&srq->ext.xrc.xrcd->usecnt);
1026 atomic_inc(&pd->usecnt);
1028 ret = pd->device->ops.create_srq(srq, srq_init_attr, udata);
1030 atomic_dec(&srq->pd->usecnt);
1031 if (srq->srq_type == IB_SRQT_XRC)
1032 atomic_dec(&srq->ext.xrc.xrcd->usecnt);
1033 if (ib_srq_has_cq(srq->srq_type))
1034 atomic_dec(&srq->ext.cq->usecnt);
1036 return ERR_PTR(ret);
1041 EXPORT_SYMBOL(ib_create_srq_user);
1043 int ib_modify_srq(struct ib_srq *srq,
1044 struct ib_srq_attr *srq_attr,
1045 enum ib_srq_attr_mask srq_attr_mask)
1047 return srq->device->ops.modify_srq ?
1048 srq->device->ops.modify_srq(srq, srq_attr, srq_attr_mask,
1049 NULL) : -EOPNOTSUPP;
1051 EXPORT_SYMBOL(ib_modify_srq);
1053 int ib_query_srq(struct ib_srq *srq,
1054 struct ib_srq_attr *srq_attr)
1056 return srq->device->ops.query_srq ?
1057 srq->device->ops.query_srq(srq, srq_attr) : -EOPNOTSUPP;
1059 EXPORT_SYMBOL(ib_query_srq);
1061 int ib_destroy_srq_user(struct ib_srq *srq, struct ib_udata *udata)
1063 if (atomic_read(&srq->usecnt))
1066 srq->device->ops.destroy_srq(srq, udata);
1068 atomic_dec(&srq->pd->usecnt);
1069 if (srq->srq_type == IB_SRQT_XRC)
1070 atomic_dec(&srq->ext.xrc.xrcd->usecnt);
1071 if (ib_srq_has_cq(srq->srq_type))
1072 atomic_dec(&srq->ext.cq->usecnt);
1077 EXPORT_SYMBOL(ib_destroy_srq_user);
1081 static void __ib_shared_qp_event_handler(struct ib_event *event, void *context)
1083 struct ib_qp *qp = context;
1084 unsigned long flags;
1086 spin_lock_irqsave(&qp->device->qp_open_list_lock, flags);
1087 list_for_each_entry(event->element.qp, &qp->open_list, open_list)
1088 if (event->element.qp->event_handler)
1089 event->element.qp->event_handler(event, event->element.qp->qp_context);
1090 spin_unlock_irqrestore(&qp->device->qp_open_list_lock, flags);
1093 static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp,
1094 void (*event_handler)(struct ib_event *, void *),
1098 unsigned long flags;
1101 qp = kzalloc(sizeof *qp, GFP_KERNEL);
1103 return ERR_PTR(-ENOMEM);
1105 qp->real_qp = real_qp;
1106 err = ib_open_shared_qp_security(qp, real_qp->device);
1109 return ERR_PTR(err);
1112 qp->real_qp = real_qp;
1113 atomic_inc(&real_qp->usecnt);
1114 qp->device = real_qp->device;
1115 qp->event_handler = event_handler;
1116 qp->qp_context = qp_context;
1117 qp->qp_num = real_qp->qp_num;
1118 qp->qp_type = real_qp->qp_type;
1120 spin_lock_irqsave(&real_qp->device->qp_open_list_lock, flags);
1121 list_add(&qp->open_list, &real_qp->open_list);
1122 spin_unlock_irqrestore(&real_qp->device->qp_open_list_lock, flags);
1127 struct ib_qp *ib_open_qp(struct ib_xrcd *xrcd,
1128 struct ib_qp_open_attr *qp_open_attr)
1130 struct ib_qp *qp, *real_qp;
1132 if (qp_open_attr->qp_type != IB_QPT_XRC_TGT)
1133 return ERR_PTR(-EINVAL);
1135 down_read(&xrcd->tgt_qps_rwsem);
1136 real_qp = xa_load(&xrcd->tgt_qps, qp_open_attr->qp_num);
1138 up_read(&xrcd->tgt_qps_rwsem);
1139 return ERR_PTR(-EINVAL);
1141 qp = __ib_open_qp(real_qp, qp_open_attr->event_handler,
1142 qp_open_attr->qp_context);
1143 up_read(&xrcd->tgt_qps_rwsem);
1146 EXPORT_SYMBOL(ib_open_qp);
1148 static struct ib_qp *create_xrc_qp_user(struct ib_qp *qp,
1149 struct ib_qp_init_attr *qp_init_attr)
1151 struct ib_qp *real_qp = qp;
1154 qp->event_handler = __ib_shared_qp_event_handler;
1155 qp->qp_context = qp;
1157 qp->send_cq = qp->recv_cq = NULL;
1159 qp->xrcd = qp_init_attr->xrcd;
1160 atomic_inc(&qp_init_attr->xrcd->usecnt);
1161 INIT_LIST_HEAD(&qp->open_list);
1163 qp = __ib_open_qp(real_qp, qp_init_attr->event_handler,
1164 qp_init_attr->qp_context);
1168 err = xa_err(xa_store(&qp_init_attr->xrcd->tgt_qps, real_qp->qp_num,
1169 real_qp, GFP_KERNEL));
1172 return ERR_PTR(err);
1178 * ib_create_qp - Creates a kernel QP associated with the specified protection
1180 * @pd: The protection domain associated with the QP.
1181 * @qp_init_attr: A list of initial attributes required to create the
1182 * QP. If QP creation succeeds, then the attributes are updated to
1183 * the actual capabilities of the created QP.
1185 * NOTE: for user qp use ib_create_qp_user with valid udata!
1187 struct ib_qp *ib_create_qp(struct ib_pd *pd,
1188 struct ib_qp_init_attr *qp_init_attr)
1190 struct ib_device *device = pd ? pd->device : qp_init_attr->xrcd->device;
1194 if (qp_init_attr->rwq_ind_tbl &&
1195 (qp_init_attr->recv_cq ||
1196 qp_init_attr->srq || qp_init_attr->cap.max_recv_wr ||
1197 qp_init_attr->cap.max_recv_sge))
1198 return ERR_PTR(-EINVAL);
1200 if ((qp_init_attr->create_flags & IB_QP_CREATE_INTEGRITY_EN) &&
1201 !(device->attrs.device_cap_flags & IB_DEVICE_INTEGRITY_HANDOVER))
1202 return ERR_PTR(-EINVAL);
1205 * If the callers is using the RDMA API calculate the resources
1206 * needed for the RDMA READ/WRITE operations.
1208 * Note that these callers need to pass in a port number.
1210 if (qp_init_attr->cap.max_rdma_ctxs)
1211 rdma_rw_init_qp(device, qp_init_attr);
1213 qp = _ib_create_qp(device, pd, qp_init_attr, NULL, NULL);
1217 ret = ib_create_qp_security(qp, device);
1221 if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) {
1222 struct ib_qp *xrc_qp =
1223 create_xrc_qp_user(qp, qp_init_attr);
1225 if (IS_ERR(xrc_qp)) {
1226 ret = PTR_ERR(xrc_qp);
1232 qp->event_handler = qp_init_attr->event_handler;
1233 qp->qp_context = qp_init_attr->qp_context;
1234 if (qp_init_attr->qp_type == IB_QPT_XRC_INI) {
1238 qp->recv_cq = qp_init_attr->recv_cq;
1239 if (qp_init_attr->recv_cq)
1240 atomic_inc(&qp_init_attr->recv_cq->usecnt);
1241 qp->srq = qp_init_attr->srq;
1243 atomic_inc(&qp_init_attr->srq->usecnt);
1246 qp->send_cq = qp_init_attr->send_cq;
1249 atomic_inc(&pd->usecnt);
1250 if (qp_init_attr->send_cq)
1251 atomic_inc(&qp_init_attr->send_cq->usecnt);
1252 if (qp_init_attr->rwq_ind_tbl)
1253 atomic_inc(&qp->rwq_ind_tbl->usecnt);
1255 if (qp_init_attr->cap.max_rdma_ctxs) {
1256 ret = rdma_rw_init_mrs(qp, qp_init_attr);
1262 * Note: all hw drivers guarantee that max_send_sge is lower than
1263 * the device RDMA WRITE SGE limit but not all hw drivers ensure that
1264 * max_send_sge <= max_sge_rd.
1266 qp->max_write_sge = qp_init_attr->cap.max_send_sge;
1267 qp->max_read_sge = min_t(u32, qp_init_attr->cap.max_send_sge,
1268 device->attrs.max_sge_rd);
1269 if (qp_init_attr->create_flags & IB_QP_CREATE_INTEGRITY_EN)
1270 qp->integrity_en = true;
1276 return ERR_PTR(ret);
1279 EXPORT_SYMBOL(ib_create_qp);
1281 static const struct {
1283 enum ib_qp_attr_mask req_param[IB_QPT_MAX];
1284 enum ib_qp_attr_mask opt_param[IB_QPT_MAX];
1285 } qp_state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
1287 [IB_QPS_RESET] = { .valid = 1 },
1291 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
1294 [IB_QPT_RAW_PACKET] = IB_QP_PORT,
1295 [IB_QPT_UC] = (IB_QP_PKEY_INDEX |
1297 IB_QP_ACCESS_FLAGS),
1298 [IB_QPT_RC] = (IB_QP_PKEY_INDEX |
1300 IB_QP_ACCESS_FLAGS),
1301 [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX |
1303 IB_QP_ACCESS_FLAGS),
1304 [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX |
1306 IB_QP_ACCESS_FLAGS),
1307 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
1309 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
1315 [IB_QPS_RESET] = { .valid = 1 },
1316 [IB_QPS_ERR] = { .valid = 1 },
1320 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
1323 [IB_QPT_UC] = (IB_QP_PKEY_INDEX |
1325 IB_QP_ACCESS_FLAGS),
1326 [IB_QPT_RC] = (IB_QP_PKEY_INDEX |
1328 IB_QP_ACCESS_FLAGS),
1329 [IB_QPT_XRC_INI] = (IB_QP_PKEY_INDEX |
1331 IB_QP_ACCESS_FLAGS),
1332 [IB_QPT_XRC_TGT] = (IB_QP_PKEY_INDEX |
1334 IB_QP_ACCESS_FLAGS),
1335 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
1337 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
1344 [IB_QPT_UC] = (IB_QP_AV |
1348 [IB_QPT_RC] = (IB_QP_AV |
1352 IB_QP_MAX_DEST_RD_ATOMIC |
1353 IB_QP_MIN_RNR_TIMER),
1354 [IB_QPT_XRC_INI] = (IB_QP_AV |
1358 [IB_QPT_XRC_TGT] = (IB_QP_AV |
1362 IB_QP_MAX_DEST_RD_ATOMIC |
1363 IB_QP_MIN_RNR_TIMER),
1366 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
1368 [IB_QPT_UC] = (IB_QP_ALT_PATH |
1369 IB_QP_ACCESS_FLAGS |
1371 [IB_QPT_RC] = (IB_QP_ALT_PATH |
1372 IB_QP_ACCESS_FLAGS |
1374 [IB_QPT_XRC_INI] = (IB_QP_ALT_PATH |
1375 IB_QP_ACCESS_FLAGS |
1377 [IB_QPT_XRC_TGT] = (IB_QP_ALT_PATH |
1378 IB_QP_ACCESS_FLAGS |
1380 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
1382 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
1388 [IB_QPS_RESET] = { .valid = 1 },
1389 [IB_QPS_ERR] = { .valid = 1 },
1393 [IB_QPT_UD] = IB_QP_SQ_PSN,
1394 [IB_QPT_UC] = IB_QP_SQ_PSN,
1395 [IB_QPT_RC] = (IB_QP_TIMEOUT |
1399 IB_QP_MAX_QP_RD_ATOMIC),
1400 [IB_QPT_XRC_INI] = (IB_QP_TIMEOUT |
1404 IB_QP_MAX_QP_RD_ATOMIC),
1405 [IB_QPT_XRC_TGT] = (IB_QP_TIMEOUT |
1407 [IB_QPT_SMI] = IB_QP_SQ_PSN,
1408 [IB_QPT_GSI] = IB_QP_SQ_PSN,
1411 [IB_QPT_UD] = (IB_QP_CUR_STATE |
1413 [IB_QPT_UC] = (IB_QP_CUR_STATE |
1415 IB_QP_ACCESS_FLAGS |
1416 IB_QP_PATH_MIG_STATE),
1417 [IB_QPT_RC] = (IB_QP_CUR_STATE |
1419 IB_QP_ACCESS_FLAGS |
1420 IB_QP_MIN_RNR_TIMER |
1421 IB_QP_PATH_MIG_STATE),
1422 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
1424 IB_QP_ACCESS_FLAGS |
1425 IB_QP_PATH_MIG_STATE),
1426 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
1428 IB_QP_ACCESS_FLAGS |
1429 IB_QP_MIN_RNR_TIMER |
1430 IB_QP_PATH_MIG_STATE),
1431 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
1433 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
1435 [IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT,
1440 [IB_QPS_RESET] = { .valid = 1 },
1441 [IB_QPS_ERR] = { .valid = 1 },
1445 [IB_QPT_UD] = (IB_QP_CUR_STATE |
1447 [IB_QPT_UC] = (IB_QP_CUR_STATE |
1448 IB_QP_ACCESS_FLAGS |
1450 IB_QP_PATH_MIG_STATE),
1451 [IB_QPT_RC] = (IB_QP_CUR_STATE |
1452 IB_QP_ACCESS_FLAGS |
1454 IB_QP_PATH_MIG_STATE |
1455 IB_QP_MIN_RNR_TIMER),
1456 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
1457 IB_QP_ACCESS_FLAGS |
1459 IB_QP_PATH_MIG_STATE),
1460 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
1461 IB_QP_ACCESS_FLAGS |
1463 IB_QP_PATH_MIG_STATE |
1464 IB_QP_MIN_RNR_TIMER),
1465 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
1467 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
1469 [IB_QPT_RAW_PACKET] = IB_QP_RATE_LIMIT,
1475 [IB_QPT_UD] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1476 [IB_QPT_UC] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1477 [IB_QPT_RC] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1478 [IB_QPT_XRC_INI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1479 [IB_QPT_XRC_TGT] = IB_QP_EN_SQD_ASYNC_NOTIFY, /* ??? */
1480 [IB_QPT_SMI] = IB_QP_EN_SQD_ASYNC_NOTIFY,
1481 [IB_QPT_GSI] = IB_QP_EN_SQD_ASYNC_NOTIFY
1486 [IB_QPS_RESET] = { .valid = 1 },
1487 [IB_QPS_ERR] = { .valid = 1 },
1491 [IB_QPT_UD] = (IB_QP_CUR_STATE |
1493 [IB_QPT_UC] = (IB_QP_CUR_STATE |
1495 IB_QP_ACCESS_FLAGS |
1496 IB_QP_PATH_MIG_STATE),
1497 [IB_QPT_RC] = (IB_QP_CUR_STATE |
1499 IB_QP_ACCESS_FLAGS |
1500 IB_QP_MIN_RNR_TIMER |
1501 IB_QP_PATH_MIG_STATE),
1502 [IB_QPT_XRC_INI] = (IB_QP_CUR_STATE |
1504 IB_QP_ACCESS_FLAGS |
1505 IB_QP_PATH_MIG_STATE),
1506 [IB_QPT_XRC_TGT] = (IB_QP_CUR_STATE |
1508 IB_QP_ACCESS_FLAGS |
1509 IB_QP_MIN_RNR_TIMER |
1510 IB_QP_PATH_MIG_STATE),
1511 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
1513 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
1520 [IB_QPT_UD] = (IB_QP_PKEY_INDEX |
1522 [IB_QPT_UC] = (IB_QP_AV |
1524 IB_QP_ACCESS_FLAGS |
1526 IB_QP_PATH_MIG_STATE),
1527 [IB_QPT_RC] = (IB_QP_PORT |
1532 IB_QP_MAX_QP_RD_ATOMIC |
1533 IB_QP_MAX_DEST_RD_ATOMIC |
1535 IB_QP_ACCESS_FLAGS |
1537 IB_QP_MIN_RNR_TIMER |
1538 IB_QP_PATH_MIG_STATE),
1539 [IB_QPT_XRC_INI] = (IB_QP_PORT |
1544 IB_QP_MAX_QP_RD_ATOMIC |
1546 IB_QP_ACCESS_FLAGS |
1548 IB_QP_PATH_MIG_STATE),
1549 [IB_QPT_XRC_TGT] = (IB_QP_PORT |
1552 IB_QP_MAX_DEST_RD_ATOMIC |
1554 IB_QP_ACCESS_FLAGS |
1556 IB_QP_MIN_RNR_TIMER |
1557 IB_QP_PATH_MIG_STATE),
1558 [IB_QPT_SMI] = (IB_QP_PKEY_INDEX |
1560 [IB_QPT_GSI] = (IB_QP_PKEY_INDEX |
1566 [IB_QPS_RESET] = { .valid = 1 },
1567 [IB_QPS_ERR] = { .valid = 1 },
1571 [IB_QPT_UD] = (IB_QP_CUR_STATE |
1573 [IB_QPT_UC] = (IB_QP_CUR_STATE |
1574 IB_QP_ACCESS_FLAGS),
1575 [IB_QPT_SMI] = (IB_QP_CUR_STATE |
1577 [IB_QPT_GSI] = (IB_QP_CUR_STATE |
1583 [IB_QPS_RESET] = { .valid = 1 },
1584 [IB_QPS_ERR] = { .valid = 1 }
1588 bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
1589 enum ib_qp_type type, enum ib_qp_attr_mask mask)
1591 enum ib_qp_attr_mask req_param, opt_param;
1593 if (mask & IB_QP_CUR_STATE &&
1594 cur_state != IB_QPS_RTR && cur_state != IB_QPS_RTS &&
1595 cur_state != IB_QPS_SQD && cur_state != IB_QPS_SQE)
1598 if (!qp_state_table[cur_state][next_state].valid)
1601 req_param = qp_state_table[cur_state][next_state].req_param[type];
1602 opt_param = qp_state_table[cur_state][next_state].opt_param[type];
1604 if ((mask & req_param) != req_param)
1607 if (mask & ~(req_param | opt_param | IB_QP_STATE))
1612 EXPORT_SYMBOL(ib_modify_qp_is_ok);
1615 * ib_resolve_eth_dmac - Resolve destination mac address
1616 * @device: Device to consider
1617 * @ah_attr: address handle attribute which describes the
1618 * source and destination parameters
1619 * ib_resolve_eth_dmac() resolves destination mac address and L3 hop limit It
1620 * returns 0 on success or appropriate error code. It initializes the
1621 * necessary ah_attr fields when call is successful.
1623 static int ib_resolve_eth_dmac(struct ib_device *device,
1624 struct rdma_ah_attr *ah_attr)
1628 if (rdma_is_multicast_addr((struct in6_addr *)ah_attr->grh.dgid.raw)) {
1629 if (ipv6_addr_v4mapped((struct in6_addr *)ah_attr->grh.dgid.raw)) {
1632 memcpy(&addr, ah_attr->grh.dgid.raw + 12, 4);
1633 ip_eth_mc_map(addr, (char *)ah_attr->roce.dmac);
1635 ipv6_eth_mc_map((struct in6_addr *)ah_attr->grh.dgid.raw,
1636 (char *)ah_attr->roce.dmac);
1639 ret = ib_resolve_unicast_gid_dmac(device, ah_attr);
1644 static bool is_qp_type_connected(const struct ib_qp *qp)
1646 return (qp->qp_type == IB_QPT_UC ||
1647 qp->qp_type == IB_QPT_RC ||
1648 qp->qp_type == IB_QPT_XRC_INI ||
1649 qp->qp_type == IB_QPT_XRC_TGT);
1653 * IB core internal function to perform QP attributes modification.
1655 static int _ib_modify_qp(struct ib_qp *qp, struct ib_qp_attr *attr,
1656 int attr_mask, struct ib_udata *udata)
1658 u8 port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
1659 const struct ib_gid_attr *old_sgid_attr_av;
1660 const struct ib_gid_attr *old_sgid_attr_alt_av;
1663 attr->xmit_slave = NULL;
1664 if (attr_mask & IB_QP_AV) {
1665 ret = rdma_fill_sgid_attr(qp->device, &attr->ah_attr,
1670 if (attr->ah_attr.type == RDMA_AH_ATTR_TYPE_ROCE &&
1671 is_qp_type_connected(qp)) {
1672 struct net_device *slave;
1675 * If the user provided the qp_attr then we have to
1676 * resolve it. Kerne users have to provide already
1677 * resolved rdma_ah_attr's.
1680 ret = ib_resolve_eth_dmac(qp->device,
1685 slave = rdma_lag_get_ah_roce_slave(qp->device,
1690 attr->xmit_slave = slave;
1693 if (attr_mask & IB_QP_ALT_PATH) {
1695 * FIXME: This does not track the migration state, so if the
1696 * user loads a new alternate path after the HW has migrated
1697 * from primary->alternate we will keep the wrong
1698 * references. This is OK for IB because the reference
1699 * counting does not serve any functional purpose.
1701 ret = rdma_fill_sgid_attr(qp->device, &attr->alt_ah_attr,
1702 &old_sgid_attr_alt_av);
1707 * Today the core code can only handle alternate paths and APM
1708 * for IB. Ban them in roce mode.
1710 if (!(rdma_protocol_ib(qp->device,
1711 attr->alt_ah_attr.port_num) &&
1712 rdma_protocol_ib(qp->device, port))) {
1718 if (rdma_ib_or_roce(qp->device, port)) {
1719 if (attr_mask & IB_QP_RQ_PSN && attr->rq_psn & ~0xffffff) {
1720 dev_warn(&qp->device->dev,
1721 "%s rq_psn overflow, masking to 24 bits\n",
1723 attr->rq_psn &= 0xffffff;
1726 if (attr_mask & IB_QP_SQ_PSN && attr->sq_psn & ~0xffffff) {
1727 dev_warn(&qp->device->dev,
1728 " %s sq_psn overflow, masking to 24 bits\n",
1730 attr->sq_psn &= 0xffffff;
1735 * Bind this qp to a counter automatically based on the rdma counter
1736 * rules. This only set in RST2INIT with port specified
1738 if (!qp->counter && (attr_mask & IB_QP_PORT) &&
1739 ((attr_mask & IB_QP_STATE) && attr->qp_state == IB_QPS_INIT))
1740 rdma_counter_bind_qp_auto(qp, attr->port_num);
1742 ret = ib_security_modify_qp(qp, attr, attr_mask, udata);
1746 if (attr_mask & IB_QP_PORT)
1747 qp->port = attr->port_num;
1748 if (attr_mask & IB_QP_AV)
1750 rdma_update_sgid_attr(&attr->ah_attr, qp->av_sgid_attr);
1751 if (attr_mask & IB_QP_ALT_PATH)
1752 qp->alt_path_sgid_attr = rdma_update_sgid_attr(
1753 &attr->alt_ah_attr, qp->alt_path_sgid_attr);
1756 if (attr_mask & IB_QP_ALT_PATH)
1757 rdma_unfill_sgid_attr(&attr->alt_ah_attr, old_sgid_attr_alt_av);
1759 if (attr_mask & IB_QP_AV) {
1760 rdma_lag_put_ah_roce_slave(attr->xmit_slave);
1761 rdma_unfill_sgid_attr(&attr->ah_attr, old_sgid_attr_av);
1767 * ib_modify_qp_with_udata - Modifies the attributes for the specified QP.
1768 * @ib_qp: The QP to modify.
1769 * @attr: On input, specifies the QP attributes to modify. On output,
1770 * the current values of selected QP attributes are returned.
1771 * @attr_mask: A bit-mask used to specify which attributes of the QP
1772 * are being modified.
1773 * @udata: pointer to user's input output buffer information
1774 * are being modified.
1775 * It returns 0 on success and returns appropriate error code on error.
1777 int ib_modify_qp_with_udata(struct ib_qp *ib_qp, struct ib_qp_attr *attr,
1778 int attr_mask, struct ib_udata *udata)
1780 return _ib_modify_qp(ib_qp->real_qp, attr, attr_mask, udata);
1782 EXPORT_SYMBOL(ib_modify_qp_with_udata);
1784 int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width)
1788 struct net_device *netdev;
1789 struct ethtool_link_ksettings lksettings;
1791 if (rdma_port_get_link_layer(dev, port_num) != IB_LINK_LAYER_ETHERNET)
1794 netdev = ib_device_get_netdev(dev, port_num);
1799 rc = __ethtool_get_link_ksettings(netdev, &lksettings);
1804 if (!rc && lksettings.base.speed != (u32)SPEED_UNKNOWN) {
1805 netdev_speed = lksettings.base.speed;
1807 netdev_speed = SPEED_1000;
1808 pr_warn("%s speed is unknown, defaulting to %d\n", netdev->name,
1812 if (netdev_speed <= SPEED_1000) {
1813 *width = IB_WIDTH_1X;
1814 *speed = IB_SPEED_SDR;
1815 } else if (netdev_speed <= SPEED_10000) {
1816 *width = IB_WIDTH_1X;
1817 *speed = IB_SPEED_FDR10;
1818 } else if (netdev_speed <= SPEED_20000) {
1819 *width = IB_WIDTH_4X;
1820 *speed = IB_SPEED_DDR;
1821 } else if (netdev_speed <= SPEED_25000) {
1822 *width = IB_WIDTH_1X;
1823 *speed = IB_SPEED_EDR;
1824 } else if (netdev_speed <= SPEED_40000) {
1825 *width = IB_WIDTH_4X;
1826 *speed = IB_SPEED_FDR10;
1828 *width = IB_WIDTH_4X;
1829 *speed = IB_SPEED_EDR;
1834 EXPORT_SYMBOL(ib_get_eth_speed);
1836 int ib_modify_qp(struct ib_qp *qp,
1837 struct ib_qp_attr *qp_attr,
1840 return _ib_modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL);
1842 EXPORT_SYMBOL(ib_modify_qp);
1844 int ib_query_qp(struct ib_qp *qp,
1845 struct ib_qp_attr *qp_attr,
1847 struct ib_qp_init_attr *qp_init_attr)
1849 qp_attr->ah_attr.grh.sgid_attr = NULL;
1850 qp_attr->alt_ah_attr.grh.sgid_attr = NULL;
1852 return qp->device->ops.query_qp ?
1853 qp->device->ops.query_qp(qp->real_qp, qp_attr, qp_attr_mask,
1854 qp_init_attr) : -EOPNOTSUPP;
1856 EXPORT_SYMBOL(ib_query_qp);
1858 int ib_close_qp(struct ib_qp *qp)
1860 struct ib_qp *real_qp;
1861 unsigned long flags;
1863 real_qp = qp->real_qp;
1867 spin_lock_irqsave(&real_qp->device->qp_open_list_lock, flags);
1868 list_del(&qp->open_list);
1869 spin_unlock_irqrestore(&real_qp->device->qp_open_list_lock, flags);
1871 atomic_dec(&real_qp->usecnt);
1873 ib_close_shared_qp_security(qp->qp_sec);
1878 EXPORT_SYMBOL(ib_close_qp);
1880 static int __ib_destroy_shared_qp(struct ib_qp *qp)
1882 struct ib_xrcd *xrcd;
1883 struct ib_qp *real_qp;
1886 real_qp = qp->real_qp;
1887 xrcd = real_qp->xrcd;
1888 down_write(&xrcd->tgt_qps_rwsem);
1890 if (atomic_read(&real_qp->usecnt) == 0)
1891 xa_erase(&xrcd->tgt_qps, real_qp->qp_num);
1894 up_write(&xrcd->tgt_qps_rwsem);
1897 ret = ib_destroy_qp(real_qp);
1899 atomic_dec(&xrcd->usecnt);
1905 int ib_destroy_qp_user(struct ib_qp *qp, struct ib_udata *udata)
1907 const struct ib_gid_attr *alt_path_sgid_attr = qp->alt_path_sgid_attr;
1908 const struct ib_gid_attr *av_sgid_attr = qp->av_sgid_attr;
1910 struct ib_cq *scq, *rcq;
1912 struct ib_rwq_ind_table *ind_tbl;
1913 struct ib_qp_security *sec;
1916 WARN_ON_ONCE(qp->mrs_used > 0);
1918 if (atomic_read(&qp->usecnt))
1921 if (qp->real_qp != qp)
1922 return __ib_destroy_shared_qp(qp);
1928 ind_tbl = qp->rwq_ind_tbl;
1931 ib_destroy_qp_security_begin(sec);
1934 rdma_rw_cleanup_mrs(qp);
1936 rdma_counter_unbind_qp(qp, true);
1937 rdma_restrack_del(&qp->res);
1938 ret = qp->device->ops.destroy_qp(qp, udata);
1940 if (alt_path_sgid_attr)
1941 rdma_put_gid_attr(alt_path_sgid_attr);
1943 rdma_put_gid_attr(av_sgid_attr);
1945 atomic_dec(&pd->usecnt);
1947 atomic_dec(&scq->usecnt);
1949 atomic_dec(&rcq->usecnt);
1951 atomic_dec(&srq->usecnt);
1953 atomic_dec(&ind_tbl->usecnt);
1955 ib_destroy_qp_security_end(sec);
1958 ib_destroy_qp_security_abort(sec);
1963 EXPORT_SYMBOL(ib_destroy_qp_user);
1965 /* Completion queues */
1967 struct ib_cq *__ib_create_cq(struct ib_device *device,
1968 ib_comp_handler comp_handler,
1969 void (*event_handler)(struct ib_event *, void *),
1971 const struct ib_cq_init_attr *cq_attr,
1977 cq = rdma_zalloc_drv_obj(device, ib_cq);
1979 return ERR_PTR(-ENOMEM);
1981 cq->device = device;
1983 cq->comp_handler = comp_handler;
1984 cq->event_handler = event_handler;
1985 cq->cq_context = cq_context;
1986 atomic_set(&cq->usecnt, 0);
1987 cq->res.type = RDMA_RESTRACK_CQ;
1988 rdma_restrack_set_task(&cq->res, caller);
1990 ret = device->ops.create_cq(cq, cq_attr, NULL);
1993 return ERR_PTR(ret);
1996 rdma_restrack_kadd(&cq->res);
1999 EXPORT_SYMBOL(__ib_create_cq);
2001 int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period)
2006 return cq->device->ops.modify_cq ?
2007 cq->device->ops.modify_cq(cq, cq_count,
2008 cq_period) : -EOPNOTSUPP;
2010 EXPORT_SYMBOL(rdma_set_cq_moderation);
2012 int ib_destroy_cq_user(struct ib_cq *cq, struct ib_udata *udata)
2014 if (WARN_ON_ONCE(cq->shared))
2017 if (atomic_read(&cq->usecnt))
2020 rdma_restrack_del(&cq->res);
2021 cq->device->ops.destroy_cq(cq, udata);
2025 EXPORT_SYMBOL(ib_destroy_cq_user);
2027 int ib_resize_cq(struct ib_cq *cq, int cqe)
2032 return cq->device->ops.resize_cq ?
2033 cq->device->ops.resize_cq(cq, cqe, NULL) : -EOPNOTSUPP;
2035 EXPORT_SYMBOL(ib_resize_cq);
2037 /* Memory regions */
2039 struct ib_mr *ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
2040 u64 virt_addr, int access_flags)
2044 if (access_flags & IB_ACCESS_ON_DEMAND) {
2045 if (!(pd->device->attrs.device_cap_flags &
2046 IB_DEVICE_ON_DEMAND_PAGING)) {
2047 pr_debug("ODP support not available\n");
2048 return ERR_PTR(-EINVAL);
2052 mr = pd->device->ops.reg_user_mr(pd, start, length, virt_addr,
2053 access_flags, NULL);
2058 mr->device = pd->device;
2061 atomic_inc(&pd->usecnt);
2062 mr->res.type = RDMA_RESTRACK_MR;
2063 rdma_restrack_kadd(&mr->res);
2067 EXPORT_SYMBOL(ib_reg_user_mr);
2069 int ib_advise_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice,
2070 u32 flags, struct ib_sge *sg_list, u32 num_sge)
2072 if (!pd->device->ops.advise_mr)
2078 return pd->device->ops.advise_mr(pd, advice, flags, sg_list, num_sge,
2081 EXPORT_SYMBOL(ib_advise_mr);
2083 int ib_dereg_mr_user(struct ib_mr *mr, struct ib_udata *udata)
2085 struct ib_pd *pd = mr->pd;
2086 struct ib_dm *dm = mr->dm;
2087 struct ib_sig_attrs *sig_attrs = mr->sig_attrs;
2091 rdma_restrack_del(&mr->res);
2092 ret = mr->device->ops.dereg_mr(mr, udata);
2094 atomic_dec(&pd->usecnt);
2096 atomic_dec(&dm->usecnt);
2102 EXPORT_SYMBOL(ib_dereg_mr_user);
2105 * ib_alloc_mr() - Allocates a memory region
2106 * @pd: protection domain associated with the region
2107 * @mr_type: memory region type
2108 * @max_num_sg: maximum sg entries available for registration.
2111 * Memory registeration page/sg lists must not exceed max_num_sg.
2112 * For mr_type IB_MR_TYPE_MEM_REG, the total length cannot exceed
2113 * max_num_sg * used_page_size.
2116 struct ib_mr *ib_alloc_mr(struct ib_pd *pd, enum ib_mr_type mr_type,
2121 if (!pd->device->ops.alloc_mr) {
2122 mr = ERR_PTR(-EOPNOTSUPP);
2126 if (mr_type == IB_MR_TYPE_INTEGRITY) {
2128 mr = ERR_PTR(-EINVAL);
2132 mr = pd->device->ops.alloc_mr(pd, mr_type, max_num_sg);
2136 mr->device = pd->device;
2140 atomic_inc(&pd->usecnt);
2141 mr->need_inval = false;
2142 mr->res.type = RDMA_RESTRACK_MR;
2143 rdma_restrack_kadd(&mr->res);
2145 mr->sig_attrs = NULL;
2148 trace_mr_alloc(pd, mr_type, max_num_sg, mr);
2151 EXPORT_SYMBOL(ib_alloc_mr);
2154 * ib_alloc_mr_integrity() - Allocates an integrity memory region
2155 * @pd: protection domain associated with the region
2156 * @max_num_data_sg: maximum data sg entries available for registration
2157 * @max_num_meta_sg: maximum metadata sg entries available for
2161 * Memory registration page/sg lists must not exceed max_num_sg,
2162 * also the integrity page/sg lists must not exceed max_num_meta_sg.
2165 struct ib_mr *ib_alloc_mr_integrity(struct ib_pd *pd,
2166 u32 max_num_data_sg,
2167 u32 max_num_meta_sg)
2170 struct ib_sig_attrs *sig_attrs;
2172 if (!pd->device->ops.alloc_mr_integrity ||
2173 !pd->device->ops.map_mr_sg_pi) {
2174 mr = ERR_PTR(-EOPNOTSUPP);
2178 if (!max_num_meta_sg) {
2179 mr = ERR_PTR(-EINVAL);
2183 sig_attrs = kzalloc(sizeof(struct ib_sig_attrs), GFP_KERNEL);
2185 mr = ERR_PTR(-ENOMEM);
2189 mr = pd->device->ops.alloc_mr_integrity(pd, max_num_data_sg,
2196 mr->device = pd->device;
2200 atomic_inc(&pd->usecnt);
2201 mr->need_inval = false;
2202 mr->res.type = RDMA_RESTRACK_MR;
2203 rdma_restrack_kadd(&mr->res);
2204 mr->type = IB_MR_TYPE_INTEGRITY;
2205 mr->sig_attrs = sig_attrs;
2208 trace_mr_integ_alloc(pd, max_num_data_sg, max_num_meta_sg, mr);
2211 EXPORT_SYMBOL(ib_alloc_mr_integrity);
2213 /* Multicast groups */
2215 static bool is_valid_mcast_lid(struct ib_qp *qp, u16 lid)
2217 struct ib_qp_init_attr init_attr = {};
2218 struct ib_qp_attr attr = {};
2219 int num_eth_ports = 0;
2222 /* If QP state >= init, it is assigned to a port and we can check this
2225 if (!ib_query_qp(qp, &attr, IB_QP_STATE | IB_QP_PORT, &init_attr)) {
2226 if (attr.qp_state >= IB_QPS_INIT) {
2227 if (rdma_port_get_link_layer(qp->device, attr.port_num) !=
2228 IB_LINK_LAYER_INFINIBAND)
2234 /* Can't get a quick answer, iterate over all ports */
2235 for (port = 0; port < qp->device->phys_port_cnt; port++)
2236 if (rdma_port_get_link_layer(qp->device, port) !=
2237 IB_LINK_LAYER_INFINIBAND)
2240 /* If we have at lease one Ethernet port, RoCE annex declares that
2241 * multicast LID should be ignored. We can't tell at this step if the
2242 * QP belongs to an IB or Ethernet port.
2247 /* If all the ports are IB, we can check according to IB spec. */
2249 return !(lid < be16_to_cpu(IB_MULTICAST_LID_BASE) ||
2250 lid == be16_to_cpu(IB_LID_PERMISSIVE));
2253 int ib_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
2257 if (!qp->device->ops.attach_mcast)
2260 if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) ||
2261 qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid))
2264 ret = qp->device->ops.attach_mcast(qp, gid, lid);
2266 atomic_inc(&qp->usecnt);
2269 EXPORT_SYMBOL(ib_attach_mcast);
2271 int ib_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid)
2275 if (!qp->device->ops.detach_mcast)
2278 if (!rdma_is_multicast_addr((struct in6_addr *)gid->raw) ||
2279 qp->qp_type != IB_QPT_UD || !is_valid_mcast_lid(qp, lid))
2282 ret = qp->device->ops.detach_mcast(qp, gid, lid);
2284 atomic_dec(&qp->usecnt);
2287 EXPORT_SYMBOL(ib_detach_mcast);
2290 * ib_alloc_xrcd_user - Allocates an XRC domain.
2291 * @device: The device on which to allocate the XRC domain.
2292 * @inode: inode to connect XRCD
2293 * @udata: Valid user data or NULL for kernel object
2295 struct ib_xrcd *ib_alloc_xrcd_user(struct ib_device *device,
2296 struct inode *inode, struct ib_udata *udata)
2298 struct ib_xrcd *xrcd;
2301 if (!device->ops.alloc_xrcd)
2302 return ERR_PTR(-EOPNOTSUPP);
2304 xrcd = rdma_zalloc_drv_obj(device, ib_xrcd);
2306 return ERR_PTR(-ENOMEM);
2308 xrcd->device = device;
2309 xrcd->inode = inode;
2310 atomic_set(&xrcd->usecnt, 0);
2311 init_rwsem(&xrcd->tgt_qps_rwsem);
2312 xa_init(&xrcd->tgt_qps);
2314 ret = device->ops.alloc_xrcd(xrcd, udata);
2320 return ERR_PTR(ret);
2322 EXPORT_SYMBOL(ib_alloc_xrcd_user);
2325 * ib_dealloc_xrcd_user - Deallocates an XRC domain.
2326 * @xrcd: The XRC domain to deallocate.
2327 * @udata: Valid user data or NULL for kernel object
2329 int ib_dealloc_xrcd_user(struct ib_xrcd *xrcd, struct ib_udata *udata)
2331 if (atomic_read(&xrcd->usecnt))
2334 WARN_ON(!xa_empty(&xrcd->tgt_qps));
2335 xrcd->device->ops.dealloc_xrcd(xrcd, udata);
2339 EXPORT_SYMBOL(ib_dealloc_xrcd_user);
2342 * ib_create_wq - Creates a WQ associated with the specified protection
2344 * @pd: The protection domain associated with the WQ.
2345 * @wq_attr: A list of initial attributes required to create the
2346 * WQ. If WQ creation succeeds, then the attributes are updated to
2347 * the actual capabilities of the created WQ.
2349 * wq_attr->max_wr and wq_attr->max_sge determine
2350 * the requested size of the WQ, and set to the actual values allocated
2352 * If ib_create_wq() succeeds, then max_wr and max_sge will always be
2353 * at least as large as the requested values.
2355 struct ib_wq *ib_create_wq(struct ib_pd *pd,
2356 struct ib_wq_init_attr *wq_attr)
2360 if (!pd->device->ops.create_wq)
2361 return ERR_PTR(-EOPNOTSUPP);
2363 wq = pd->device->ops.create_wq(pd, wq_attr, NULL);
2365 wq->event_handler = wq_attr->event_handler;
2366 wq->wq_context = wq_attr->wq_context;
2367 wq->wq_type = wq_attr->wq_type;
2368 wq->cq = wq_attr->cq;
2369 wq->device = pd->device;
2372 atomic_inc(&pd->usecnt);
2373 atomic_inc(&wq_attr->cq->usecnt);
2374 atomic_set(&wq->usecnt, 0);
2378 EXPORT_SYMBOL(ib_create_wq);
2381 * ib_destroy_wq - Destroys the specified user WQ.
2382 * @wq: The WQ to destroy.
2383 * @udata: Valid user data
2385 int ib_destroy_wq(struct ib_wq *wq, struct ib_udata *udata)
2387 struct ib_cq *cq = wq->cq;
2388 struct ib_pd *pd = wq->pd;
2390 if (atomic_read(&wq->usecnt))
2393 wq->device->ops.destroy_wq(wq, udata);
2394 atomic_dec(&pd->usecnt);
2395 atomic_dec(&cq->usecnt);
2399 EXPORT_SYMBOL(ib_destroy_wq);
2402 * ib_modify_wq - Modifies the specified WQ.
2403 * @wq: The WQ to modify.
2404 * @wq_attr: On input, specifies the WQ attributes to modify.
2405 * @wq_attr_mask: A bit-mask used to specify which attributes of the WQ
2406 * are being modified.
2407 * On output, the current values of selected WQ attributes are returned.
2409 int ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
2414 if (!wq->device->ops.modify_wq)
2417 err = wq->device->ops.modify_wq(wq, wq_attr, wq_attr_mask, NULL);
2420 EXPORT_SYMBOL(ib_modify_wq);
2423 * ib_destroy_rwq_ind_table - Destroys the specified Indirection Table.
2424 * @wq_ind_table: The Indirection Table to destroy.
2426 int ib_destroy_rwq_ind_table(struct ib_rwq_ind_table *rwq_ind_table)
2429 u32 table_size = (1 << rwq_ind_table->log_ind_tbl_size);
2430 struct ib_wq **ind_tbl = rwq_ind_table->ind_tbl;
2432 if (atomic_read(&rwq_ind_table->usecnt))
2435 err = rwq_ind_table->device->ops.destroy_rwq_ind_table(rwq_ind_table);
2437 for (i = 0; i < table_size; i++)
2438 atomic_dec(&ind_tbl[i]->usecnt);
2443 EXPORT_SYMBOL(ib_destroy_rwq_ind_table);
2445 int ib_check_mr_status(struct ib_mr *mr, u32 check_mask,
2446 struct ib_mr_status *mr_status)
2448 if (!mr->device->ops.check_mr_status)
2451 return mr->device->ops.check_mr_status(mr, check_mask, mr_status);
2453 EXPORT_SYMBOL(ib_check_mr_status);
2455 int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
2458 if (!device->ops.set_vf_link_state)
2461 return device->ops.set_vf_link_state(device, vf, port, state);
2463 EXPORT_SYMBOL(ib_set_vf_link_state);
2465 int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
2466 struct ifla_vf_info *info)
2468 if (!device->ops.get_vf_config)
2471 return device->ops.get_vf_config(device, vf, port, info);
2473 EXPORT_SYMBOL(ib_get_vf_config);
2475 int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
2476 struct ifla_vf_stats *stats)
2478 if (!device->ops.get_vf_stats)
2481 return device->ops.get_vf_stats(device, vf, port, stats);
2483 EXPORT_SYMBOL(ib_get_vf_stats);
2485 int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
2488 if (!device->ops.set_vf_guid)
2491 return device->ops.set_vf_guid(device, vf, port, guid, type);
2493 EXPORT_SYMBOL(ib_set_vf_guid);
2495 int ib_get_vf_guid(struct ib_device *device, int vf, u8 port,
2496 struct ifla_vf_guid *node_guid,
2497 struct ifla_vf_guid *port_guid)
2499 if (!device->ops.get_vf_guid)
2502 return device->ops.get_vf_guid(device, vf, port, node_guid, port_guid);
2504 EXPORT_SYMBOL(ib_get_vf_guid);
2506 * ib_map_mr_sg_pi() - Map the dma mapped SG lists for PI (protection
2507 * information) and set an appropriate memory region for registration.
2508 * @mr: memory region
2509 * @data_sg: dma mapped scatterlist for data
2510 * @data_sg_nents: number of entries in data_sg
2511 * @data_sg_offset: offset in bytes into data_sg
2512 * @meta_sg: dma mapped scatterlist for metadata
2513 * @meta_sg_nents: number of entries in meta_sg
2514 * @meta_sg_offset: offset in bytes into meta_sg
2515 * @page_size: page vector desired page size
2518 * - The MR must be allocated with type IB_MR_TYPE_INTEGRITY.
2520 * Return: 0 on success.
2522 * After this completes successfully, the memory region
2523 * is ready for registration.
2525 int ib_map_mr_sg_pi(struct ib_mr *mr, struct scatterlist *data_sg,
2526 int data_sg_nents, unsigned int *data_sg_offset,
2527 struct scatterlist *meta_sg, int meta_sg_nents,
2528 unsigned int *meta_sg_offset, unsigned int page_size)
2530 if (unlikely(!mr->device->ops.map_mr_sg_pi ||
2531 WARN_ON_ONCE(mr->type != IB_MR_TYPE_INTEGRITY)))
2534 mr->page_size = page_size;
2536 return mr->device->ops.map_mr_sg_pi(mr, data_sg, data_sg_nents,
2537 data_sg_offset, meta_sg,
2538 meta_sg_nents, meta_sg_offset);
2540 EXPORT_SYMBOL(ib_map_mr_sg_pi);
2543 * ib_map_mr_sg() - Map the largest prefix of a dma mapped SG list
2544 * and set it the memory region.
2545 * @mr: memory region
2546 * @sg: dma mapped scatterlist
2547 * @sg_nents: number of entries in sg
2548 * @sg_offset: offset in bytes into sg
2549 * @page_size: page vector desired page size
2553 * - The first sg element is allowed to have an offset.
2554 * - Each sg element must either be aligned to page_size or virtually
2555 * contiguous to the previous element. In case an sg element has a
2556 * non-contiguous offset, the mapping prefix will not include it.
2557 * - The last sg element is allowed to have length less than page_size.
2558 * - If sg_nents total byte length exceeds the mr max_num_sge * page_size
2559 * then only max_num_sg entries will be mapped.
2560 * - If the MR was allocated with type IB_MR_TYPE_SG_GAPS, none of these
2561 * constraints holds and the page_size argument is ignored.
2563 * Returns the number of sg elements that were mapped to the memory region.
2565 * After this completes successfully, the memory region
2566 * is ready for registration.
2568 int ib_map_mr_sg(struct ib_mr *mr, struct scatterlist *sg, int sg_nents,
2569 unsigned int *sg_offset, unsigned int page_size)
2571 if (unlikely(!mr->device->ops.map_mr_sg))
2574 mr->page_size = page_size;
2576 return mr->device->ops.map_mr_sg(mr, sg, sg_nents, sg_offset);
2578 EXPORT_SYMBOL(ib_map_mr_sg);
2581 * ib_sg_to_pages() - Convert the largest prefix of a sg list
2583 * @mr: memory region
2584 * @sgl: dma mapped scatterlist
2585 * @sg_nents: number of entries in sg
2586 * @sg_offset_p: ==== =======================================================
2587 * IN start offset in bytes into sg
2588 * OUT offset in bytes for element n of the sg of the first
2589 * byte that has not been processed where n is the return
2590 * value of this function.
2591 * ==== =======================================================
2592 * @set_page: driver page assignment function pointer
2594 * Core service helper for drivers to convert the largest
2595 * prefix of given sg list to a page vector. The sg list
2596 * prefix converted is the prefix that meet the requirements
2599 * Returns the number of sg elements that were assigned to
2602 int ib_sg_to_pages(struct ib_mr *mr, struct scatterlist *sgl, int sg_nents,
2603 unsigned int *sg_offset_p, int (*set_page)(struct ib_mr *, u64))
2605 struct scatterlist *sg;
2606 u64 last_end_dma_addr = 0;
2607 unsigned int sg_offset = sg_offset_p ? *sg_offset_p : 0;
2608 unsigned int last_page_off = 0;
2609 u64 page_mask = ~((u64)mr->page_size - 1);
2612 if (unlikely(sg_nents <= 0 || sg_offset > sg_dma_len(&sgl[0])))
2615 mr->iova = sg_dma_address(&sgl[0]) + sg_offset;
2618 for_each_sg(sgl, sg, sg_nents, i) {
2619 u64 dma_addr = sg_dma_address(sg) + sg_offset;
2620 u64 prev_addr = dma_addr;
2621 unsigned int dma_len = sg_dma_len(sg) - sg_offset;
2622 u64 end_dma_addr = dma_addr + dma_len;
2623 u64 page_addr = dma_addr & page_mask;
2626 * For the second and later elements, check whether either the
2627 * end of element i-1 or the start of element i is not aligned
2628 * on a page boundary.
2630 if (i && (last_page_off != 0 || page_addr != dma_addr)) {
2631 /* Stop mapping if there is a gap. */
2632 if (last_end_dma_addr != dma_addr)
2636 * Coalesce this element with the last. If it is small
2637 * enough just update mr->length. Otherwise start
2638 * mapping from the next page.
2644 ret = set_page(mr, page_addr);
2645 if (unlikely(ret < 0)) {
2646 sg_offset = prev_addr - sg_dma_address(sg);
2647 mr->length += prev_addr - dma_addr;
2649 *sg_offset_p = sg_offset;
2650 return i || sg_offset ? i : ret;
2652 prev_addr = page_addr;
2654 page_addr += mr->page_size;
2655 } while (page_addr < end_dma_addr);
2657 mr->length += dma_len;
2658 last_end_dma_addr = end_dma_addr;
2659 last_page_off = end_dma_addr & ~page_mask;
2668 EXPORT_SYMBOL(ib_sg_to_pages);
2670 struct ib_drain_cqe {
2672 struct completion done;
2675 static void ib_drain_qp_done(struct ib_cq *cq, struct ib_wc *wc)
2677 struct ib_drain_cqe *cqe = container_of(wc->wr_cqe, struct ib_drain_cqe,
2680 complete(&cqe->done);
2684 * Post a WR and block until its completion is reaped for the SQ.
2686 static void __ib_drain_sq(struct ib_qp *qp)
2688 struct ib_cq *cq = qp->send_cq;
2689 struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
2690 struct ib_drain_cqe sdrain;
2691 struct ib_rdma_wr swr = {
2694 { .wr_cqe = &sdrain.cqe, },
2695 .opcode = IB_WR_RDMA_WRITE,
2700 ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
2702 WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
2706 sdrain.cqe.done = ib_drain_qp_done;
2707 init_completion(&sdrain.done);
2709 ret = ib_post_send(qp, &swr.wr, NULL);
2711 WARN_ONCE(ret, "failed to drain send queue: %d\n", ret);
2715 if (cq->poll_ctx == IB_POLL_DIRECT)
2716 while (wait_for_completion_timeout(&sdrain.done, HZ / 10) <= 0)
2717 ib_process_cq_direct(cq, -1);
2719 wait_for_completion(&sdrain.done);
2723 * Post a WR and block until its completion is reaped for the RQ.
2725 static void __ib_drain_rq(struct ib_qp *qp)
2727 struct ib_cq *cq = qp->recv_cq;
2728 struct ib_qp_attr attr = { .qp_state = IB_QPS_ERR };
2729 struct ib_drain_cqe rdrain;
2730 struct ib_recv_wr rwr = {};
2733 ret = ib_modify_qp(qp, &attr, IB_QP_STATE);
2735 WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
2739 rwr.wr_cqe = &rdrain.cqe;
2740 rdrain.cqe.done = ib_drain_qp_done;
2741 init_completion(&rdrain.done);
2743 ret = ib_post_recv(qp, &rwr, NULL);
2745 WARN_ONCE(ret, "failed to drain recv queue: %d\n", ret);
2749 if (cq->poll_ctx == IB_POLL_DIRECT)
2750 while (wait_for_completion_timeout(&rdrain.done, HZ / 10) <= 0)
2751 ib_process_cq_direct(cq, -1);
2753 wait_for_completion(&rdrain.done);
2757 * ib_drain_sq() - Block until all SQ CQEs have been consumed by the
2759 * @qp: queue pair to drain
2761 * If the device has a provider-specific drain function, then
2762 * call that. Otherwise call the generic drain function
2767 * ensure there is room in the CQ and SQ for the drain work request and
2770 * allocate the CQ using ib_alloc_cq().
2772 * ensure that there are no other contexts that are posting WRs concurrently.
2773 * Otherwise the drain is not guaranteed.
2775 void ib_drain_sq(struct ib_qp *qp)
2777 if (qp->device->ops.drain_sq)
2778 qp->device->ops.drain_sq(qp);
2781 trace_cq_drain_complete(qp->send_cq);
2783 EXPORT_SYMBOL(ib_drain_sq);
2786 * ib_drain_rq() - Block until all RQ CQEs have been consumed by the
2788 * @qp: queue pair to drain
2790 * If the device has a provider-specific drain function, then
2791 * call that. Otherwise call the generic drain function
2796 * ensure there is room in the CQ and RQ for the drain work request and
2799 * allocate the CQ using ib_alloc_cq().
2801 * ensure that there are no other contexts that are posting WRs concurrently.
2802 * Otherwise the drain is not guaranteed.
2804 void ib_drain_rq(struct ib_qp *qp)
2806 if (qp->device->ops.drain_rq)
2807 qp->device->ops.drain_rq(qp);
2810 trace_cq_drain_complete(qp->recv_cq);
2812 EXPORT_SYMBOL(ib_drain_rq);
2815 * ib_drain_qp() - Block until all CQEs have been consumed by the
2816 * application on both the RQ and SQ.
2817 * @qp: queue pair to drain
2821 * ensure there is room in the CQ(s), SQ, and RQ for drain work requests
2824 * allocate the CQs using ib_alloc_cq().
2826 * ensure that there are no other contexts that are posting WRs concurrently.
2827 * Otherwise the drain is not guaranteed.
2829 void ib_drain_qp(struct ib_qp *qp)
2835 EXPORT_SYMBOL(ib_drain_qp);
2837 struct net_device *rdma_alloc_netdev(struct ib_device *device, u8 port_num,
2838 enum rdma_netdev_t type, const char *name,
2839 unsigned char name_assign_type,
2840 void (*setup)(struct net_device *))
2842 struct rdma_netdev_alloc_params params;
2843 struct net_device *netdev;
2846 if (!device->ops.rdma_netdev_get_params)
2847 return ERR_PTR(-EOPNOTSUPP);
2849 rc = device->ops.rdma_netdev_get_params(device, port_num, type,
2854 netdev = alloc_netdev_mqs(params.sizeof_priv, name, name_assign_type,
2855 setup, params.txqs, params.rxqs);
2857 return ERR_PTR(-ENOMEM);
2861 EXPORT_SYMBOL(rdma_alloc_netdev);
2863 int rdma_init_netdev(struct ib_device *device, u8 port_num,
2864 enum rdma_netdev_t type, const char *name,
2865 unsigned char name_assign_type,
2866 void (*setup)(struct net_device *),
2867 struct net_device *netdev)
2869 struct rdma_netdev_alloc_params params;
2872 if (!device->ops.rdma_netdev_get_params)
2875 rc = device->ops.rdma_netdev_get_params(device, port_num, type,
2880 return params.initialize_rdma_netdev(device, port_num,
2881 netdev, params.param);
2883 EXPORT_SYMBOL(rdma_init_netdev);
2885 void __rdma_block_iter_start(struct ib_block_iter *biter,
2886 struct scatterlist *sglist, unsigned int nents,
2889 memset(biter, 0, sizeof(struct ib_block_iter));
2890 biter->__sg = sglist;
2891 biter->__sg_nents = nents;
2893 /* Driver provides best block size to use */
2894 biter->__pg_bit = __fls(pgsz);
2896 EXPORT_SYMBOL(__rdma_block_iter_start);
2898 bool __rdma_block_iter_next(struct ib_block_iter *biter)
2900 unsigned int block_offset;
2902 if (!biter->__sg_nents || !biter->__sg)
2905 biter->__dma_addr = sg_dma_address(biter->__sg) + biter->__sg_advance;
2906 block_offset = biter->__dma_addr & (BIT_ULL(biter->__pg_bit) - 1);
2907 biter->__sg_advance += BIT_ULL(biter->__pg_bit) - block_offset;
2909 if (biter->__sg_advance >= sg_dma_len(biter->__sg)) {
2910 biter->__sg_advance = 0;
2911 biter->__sg = sg_next(biter->__sg);
2912 biter->__sg_nents--;
2917 EXPORT_SYMBOL(__rdma_block_iter_next);