Merge remote-tracking branches 'asoc/fix/ak4613', 'asoc/fix/atmel', 'asoc/fix/compres...
[sfrench/cifs-2.6.git] / drivers / infiniband / hw / mlx4 / mad.c
1 /*
2  * Copyright (c) 2007 Cisco Systems, Inc. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32
33 #include <rdma/ib_mad.h>
34 #include <rdma/ib_smi.h>
35 #include <rdma/ib_sa.h>
36 #include <rdma/ib_cache.h>
37
38 #include <linux/random.h>
39 #include <linux/mlx4/cmd.h>
40 #include <linux/gfp.h>
41 #include <rdma/ib_pma.h>
42 #include <linux/ip.h>
43 #include <net/ipv6.h>
44
45 #include <linux/mlx4/driver.h>
46 #include "mlx4_ib.h"
47
48 enum {
49         MLX4_IB_VENDOR_CLASS1 = 0x9,
50         MLX4_IB_VENDOR_CLASS2 = 0xa
51 };
52
53 #define MLX4_TUN_SEND_WRID_SHIFT 34
54 #define MLX4_TUN_QPN_SHIFT 32
55 #define MLX4_TUN_WRID_RECV (((u64) 1) << MLX4_TUN_SEND_WRID_SHIFT)
56 #define MLX4_TUN_SET_WRID_QPN(a) (((u64) ((a) & 0x3)) << MLX4_TUN_QPN_SHIFT)
57
58 #define MLX4_TUN_IS_RECV(a)  (((a) >>  MLX4_TUN_SEND_WRID_SHIFT) & 0x1)
59 #define MLX4_TUN_WRID_QPN(a) (((a) >> MLX4_TUN_QPN_SHIFT) & 0x3)
60
61  /* Port mgmt change event handling */
62
63 #define GET_BLK_PTR_FROM_EQE(eqe) be32_to_cpu(eqe->event.port_mgmt_change.params.tbl_change_info.block_ptr)
64 #define GET_MASK_FROM_EQE(eqe) be32_to_cpu(eqe->event.port_mgmt_change.params.tbl_change_info.tbl_entries_mask)
65 #define NUM_IDX_IN_PKEY_TBL_BLK 32
66 #define GUID_TBL_ENTRY_SIZE 8      /* size in bytes */
67 #define GUID_TBL_BLK_NUM_ENTRIES 8
68 #define GUID_TBL_BLK_SIZE (GUID_TBL_ENTRY_SIZE * GUID_TBL_BLK_NUM_ENTRIES)
69
70 struct mlx4_mad_rcv_buf {
71         struct ib_grh grh;
72         u8 payload[256];
73 } __packed;
74
75 struct mlx4_mad_snd_buf {
76         u8 payload[256];
77 } __packed;
78
79 struct mlx4_tunnel_mad {
80         struct ib_grh grh;
81         struct mlx4_ib_tunnel_header hdr;
82         struct ib_mad mad;
83 } __packed;
84
85 struct mlx4_rcv_tunnel_mad {
86         struct mlx4_rcv_tunnel_hdr hdr;
87         struct ib_grh grh;
88         struct ib_mad mad;
89 } __packed;
90
91 static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u8 port_num);
92 static void handle_lid_change_event(struct mlx4_ib_dev *dev, u8 port_num);
93 static void __propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num,
94                                 int block, u32 change_bitmap);
95
96 __be64 mlx4_ib_gen_node_guid(void)
97 {
98 #define NODE_GUID_HI    ((u64) (((u64)IB_OPENIB_OUI) << 40))
99         return cpu_to_be64(NODE_GUID_HI | prandom_u32());
100 }
101
102 __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx)
103 {
104         return cpu_to_be64(atomic_inc_return(&ctx->tid)) |
105                 cpu_to_be64(0xff00000000000000LL);
106 }
107
108 int mlx4_MAD_IFC(struct mlx4_ib_dev *dev, int mad_ifc_flags,
109                  int port, const struct ib_wc *in_wc,
110                  const struct ib_grh *in_grh,
111                  const void *in_mad, void *response_mad)
112 {
113         struct mlx4_cmd_mailbox *inmailbox, *outmailbox;
114         void *inbox;
115         int err;
116         u32 in_modifier = port;
117         u8 op_modifier = 0;
118
119         inmailbox = mlx4_alloc_cmd_mailbox(dev->dev);
120         if (IS_ERR(inmailbox))
121                 return PTR_ERR(inmailbox);
122         inbox = inmailbox->buf;
123
124         outmailbox = mlx4_alloc_cmd_mailbox(dev->dev);
125         if (IS_ERR(outmailbox)) {
126                 mlx4_free_cmd_mailbox(dev->dev, inmailbox);
127                 return PTR_ERR(outmailbox);
128         }
129
130         memcpy(inbox, in_mad, 256);
131
132         /*
133          * Key check traps can't be generated unless we have in_wc to
134          * tell us where to send the trap.
135          */
136         if ((mad_ifc_flags & MLX4_MAD_IFC_IGNORE_MKEY) || !in_wc)
137                 op_modifier |= 0x1;
138         if ((mad_ifc_flags & MLX4_MAD_IFC_IGNORE_BKEY) || !in_wc)
139                 op_modifier |= 0x2;
140         if (mlx4_is_mfunc(dev->dev) &&
141             (mad_ifc_flags & MLX4_MAD_IFC_NET_VIEW || in_wc))
142                 op_modifier |= 0x8;
143
144         if (in_wc) {
145                 struct {
146                         __be32          my_qpn;
147                         u32             reserved1;
148                         __be32          rqpn;
149                         u8              sl;
150                         u8              g_path;
151                         u16             reserved2[2];
152                         __be16          pkey;
153                         u32             reserved3[11];
154                         u8              grh[40];
155                 } *ext_info;
156
157                 memset(inbox + 256, 0, 256);
158                 ext_info = inbox + 256;
159
160                 ext_info->my_qpn = cpu_to_be32(in_wc->qp->qp_num);
161                 ext_info->rqpn   = cpu_to_be32(in_wc->src_qp);
162                 ext_info->sl     = in_wc->sl << 4;
163                 ext_info->g_path = in_wc->dlid_path_bits |
164                         (in_wc->wc_flags & IB_WC_GRH ? 0x80 : 0);
165                 ext_info->pkey   = cpu_to_be16(in_wc->pkey_index);
166
167                 if (in_grh)
168                         memcpy(ext_info->grh, in_grh, 40);
169
170                 op_modifier |= 0x4;
171
172                 in_modifier |= in_wc->slid << 16;
173         }
174
175         err = mlx4_cmd_box(dev->dev, inmailbox->dma, outmailbox->dma, in_modifier,
176                            mlx4_is_master(dev->dev) ? (op_modifier & ~0x8) : op_modifier,
177                            MLX4_CMD_MAD_IFC, MLX4_CMD_TIME_CLASS_C,
178                            (op_modifier & 0x8) ? MLX4_CMD_NATIVE : MLX4_CMD_WRAPPED);
179
180         if (!err)
181                 memcpy(response_mad, outmailbox->buf, 256);
182
183         mlx4_free_cmd_mailbox(dev->dev, inmailbox);
184         mlx4_free_cmd_mailbox(dev->dev, outmailbox);
185
186         return err;
187 }
188
189 static void update_sm_ah(struct mlx4_ib_dev *dev, u8 port_num, u16 lid, u8 sl)
190 {
191         struct ib_ah *new_ah;
192         struct rdma_ah_attr ah_attr;
193         unsigned long flags;
194
195         if (!dev->send_agent[port_num - 1][0])
196                 return;
197
198         memset(&ah_attr, 0, sizeof ah_attr);
199         ah_attr.type = rdma_ah_find_type(&dev->ib_dev, port_num);
200         rdma_ah_set_dlid(&ah_attr, lid);
201         rdma_ah_set_sl(&ah_attr, sl);
202         rdma_ah_set_port_num(&ah_attr, port_num);
203
204         new_ah = rdma_create_ah(dev->send_agent[port_num - 1][0]->qp->pd,
205                                 &ah_attr);
206         if (IS_ERR(new_ah))
207                 return;
208
209         spin_lock_irqsave(&dev->sm_lock, flags);
210         if (dev->sm_ah[port_num - 1])
211                 rdma_destroy_ah(dev->sm_ah[port_num - 1]);
212         dev->sm_ah[port_num - 1] = new_ah;
213         spin_unlock_irqrestore(&dev->sm_lock, flags);
214 }
215
216 /*
217  * Snoop SM MADs for port info, GUID info, and  P_Key table sets, so we can
218  * synthesize LID change, Client-Rereg, GID change, and P_Key change events.
219  */
220 static void smp_snoop(struct ib_device *ibdev, u8 port_num, const struct ib_mad *mad,
221                       u16 prev_lid)
222 {
223         struct ib_port_info *pinfo;
224         u16 lid;
225         __be16 *base;
226         u32 bn, pkey_change_bitmap;
227         int i;
228
229
230         struct mlx4_ib_dev *dev = to_mdev(ibdev);
231         if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
232              mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
233             mad->mad_hdr.method == IB_MGMT_METHOD_SET)
234                 switch (mad->mad_hdr.attr_id) {
235                 case IB_SMP_ATTR_PORT_INFO:
236                         if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV)
237                                 return;
238                         pinfo = (struct ib_port_info *) ((struct ib_smp *) mad)->data;
239                         lid = be16_to_cpu(pinfo->lid);
240
241                         update_sm_ah(dev, port_num,
242                                      be16_to_cpu(pinfo->sm_lid),
243                                      pinfo->neighbormtu_mastersmsl & 0xf);
244
245                         if (pinfo->clientrereg_resv_subnetto & 0x80)
246                                 handle_client_rereg_event(dev, port_num);
247
248                         if (prev_lid != lid)
249                                 handle_lid_change_event(dev, port_num);
250                         break;
251
252                 case IB_SMP_ATTR_PKEY_TABLE:
253                         if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV)
254                                 return;
255                         if (!mlx4_is_mfunc(dev->dev)) {
256                                 mlx4_ib_dispatch_event(dev, port_num,
257                                                        IB_EVENT_PKEY_CHANGE);
258                                 break;
259                         }
260
261                         /* at this point, we are running in the master.
262                          * Slaves do not receive SMPs.
263                          */
264                         bn  = be32_to_cpu(((struct ib_smp *)mad)->attr_mod) & 0xFFFF;
265                         base = (__be16 *) &(((struct ib_smp *)mad)->data[0]);
266                         pkey_change_bitmap = 0;
267                         for (i = 0; i < 32; i++) {
268                                 pr_debug("PKEY[%d] = x%x\n",
269                                          i + bn*32, be16_to_cpu(base[i]));
270                                 if (be16_to_cpu(base[i]) !=
271                                     dev->pkeys.phys_pkey_cache[port_num - 1][i + bn*32]) {
272                                         pkey_change_bitmap |= (1 << i);
273                                         dev->pkeys.phys_pkey_cache[port_num - 1][i + bn*32] =
274                                                 be16_to_cpu(base[i]);
275                                 }
276                         }
277                         pr_debug("PKEY Change event: port=%d, "
278                                  "block=0x%x, change_bitmap=0x%x\n",
279                                  port_num, bn, pkey_change_bitmap);
280
281                         if (pkey_change_bitmap) {
282                                 mlx4_ib_dispatch_event(dev, port_num,
283                                                        IB_EVENT_PKEY_CHANGE);
284                                 if (!dev->sriov.is_going_down)
285                                         __propagate_pkey_ev(dev, port_num, bn,
286                                                             pkey_change_bitmap);
287                         }
288                         break;
289
290                 case IB_SMP_ATTR_GUID_INFO:
291                         if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV)
292                                 return;
293                         /* paravirtualized master's guid is guid 0 -- does not change */
294                         if (!mlx4_is_master(dev->dev))
295                                 mlx4_ib_dispatch_event(dev, port_num,
296                                                        IB_EVENT_GID_CHANGE);
297                         /*if master, notify relevant slaves*/
298                         if (mlx4_is_master(dev->dev) &&
299                             !dev->sriov.is_going_down) {
300                                 bn = be32_to_cpu(((struct ib_smp *)mad)->attr_mod);
301                                 mlx4_ib_update_cache_on_guid_change(dev, bn, port_num,
302                                                                     (u8 *)(&((struct ib_smp *)mad)->data));
303                                 mlx4_ib_notify_slaves_on_guid_change(dev, bn, port_num,
304                                                                      (u8 *)(&((struct ib_smp *)mad)->data));
305                         }
306                         break;
307
308                 case IB_SMP_ATTR_SL_TO_VL_TABLE:
309                         /* cache sl to vl mapping changes for use in
310                          * filling QP1 LRH VL field when sending packets
311                          */
312                         if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_PORT_MNG_CHG_EV &&
313                             dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT)
314                                 return;
315                         if (!mlx4_is_slave(dev->dev)) {
316                                 union sl2vl_tbl_to_u64 sl2vl64;
317                                 int jj;
318
319                                 for (jj = 0; jj < 8; jj++) {
320                                         sl2vl64.sl8[jj] = ((struct ib_smp *)mad)->data[jj];
321                                         pr_debug("port %u, sl2vl[%d] = %02x\n",
322                                                  port_num, jj, sl2vl64.sl8[jj]);
323                                 }
324                                 atomic64_set(&dev->sl2vl[port_num - 1], sl2vl64.sl64);
325                         }
326                         break;
327
328                 default:
329                         break;
330                 }
331 }
332
333 static void __propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num,
334                                 int block, u32 change_bitmap)
335 {
336         int i, ix, slave, err;
337         int have_event = 0;
338
339         for (slave = 0; slave < dev->dev->caps.sqp_demux; slave++) {
340                 if (slave == mlx4_master_func_num(dev->dev))
341                         continue;
342                 if (!mlx4_is_slave_active(dev->dev, slave))
343                         continue;
344
345                 have_event = 0;
346                 for (i = 0; i < 32; i++) {
347                         if (!(change_bitmap & (1 << i)))
348                                 continue;
349                         for (ix = 0;
350                              ix < dev->dev->caps.pkey_table_len[port_num]; ix++) {
351                                 if (dev->pkeys.virt2phys_pkey[slave][port_num - 1]
352                                     [ix] == i + 32 * block) {
353                                         err = mlx4_gen_pkey_eqe(dev->dev, slave, port_num);
354                                         pr_debug("propagate_pkey_ev: slave %d,"
355                                                  " port %d, ix %d (%d)\n",
356                                                  slave, port_num, ix, err);
357                                         have_event = 1;
358                                         break;
359                                 }
360                         }
361                         if (have_event)
362                                 break;
363                 }
364         }
365 }
366
367 static void node_desc_override(struct ib_device *dev,
368                                struct ib_mad *mad)
369 {
370         unsigned long flags;
371
372         if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
373              mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
374             mad->mad_hdr.method == IB_MGMT_METHOD_GET_RESP &&
375             mad->mad_hdr.attr_id == IB_SMP_ATTR_NODE_DESC) {
376                 spin_lock_irqsave(&to_mdev(dev)->sm_lock, flags);
377                 memcpy(((struct ib_smp *) mad)->data, dev->node_desc,
378                        IB_DEVICE_NODE_DESC_MAX);
379                 spin_unlock_irqrestore(&to_mdev(dev)->sm_lock, flags);
380         }
381 }
382
383 static void forward_trap(struct mlx4_ib_dev *dev, u8 port_num, const struct ib_mad *mad)
384 {
385         int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED;
386         struct ib_mad_send_buf *send_buf;
387         struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn];
388         int ret;
389         unsigned long flags;
390
391         if (agent) {
392                 send_buf = ib_create_send_mad(agent, qpn, 0, 0, IB_MGMT_MAD_HDR,
393                                               IB_MGMT_MAD_DATA, GFP_ATOMIC,
394                                               IB_MGMT_BASE_VERSION);
395                 if (IS_ERR(send_buf))
396                         return;
397                 /*
398                  * We rely here on the fact that MLX QPs don't use the
399                  * address handle after the send is posted (this is
400                  * wrong following the IB spec strictly, but we know
401                  * it's OK for our devices).
402                  */
403                 spin_lock_irqsave(&dev->sm_lock, flags);
404                 memcpy(send_buf->mad, mad, sizeof *mad);
405                 if ((send_buf->ah = dev->sm_ah[port_num - 1]))
406                         ret = ib_post_send_mad(send_buf, NULL);
407                 else
408                         ret = -EINVAL;
409                 spin_unlock_irqrestore(&dev->sm_lock, flags);
410
411                 if (ret)
412                         ib_free_send_mad(send_buf);
413         }
414 }
415
416 static int mlx4_ib_demux_sa_handler(struct ib_device *ibdev, int port, int slave,
417                                                              struct ib_sa_mad *sa_mad)
418 {
419         int ret = 0;
420
421         /* dispatch to different sa handlers */
422         switch (be16_to_cpu(sa_mad->mad_hdr.attr_id)) {
423         case IB_SA_ATTR_MC_MEMBER_REC:
424                 ret = mlx4_ib_mcg_demux_handler(ibdev, port, slave, sa_mad);
425                 break;
426         default:
427                 break;
428         }
429         return ret;
430 }
431
432 int mlx4_ib_find_real_gid(struct ib_device *ibdev, u8 port, __be64 guid)
433 {
434         struct mlx4_ib_dev *dev = to_mdev(ibdev);
435         int i;
436
437         for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
438                 if (dev->sriov.demux[port - 1].guid_cache[i] == guid)
439                         return i;
440         }
441         return -1;
442 }
443
444
445 static int find_slave_port_pkey_ix(struct mlx4_ib_dev *dev, int slave,
446                                    u8 port, u16 pkey, u16 *ix)
447 {
448         int i, ret;
449         u8 unassigned_pkey_ix, pkey_ix, partial_ix = 0xFF;
450         u16 slot_pkey;
451
452         if (slave == mlx4_master_func_num(dev->dev))
453                 return ib_find_cached_pkey(&dev->ib_dev, port, pkey, ix);
454
455         unassigned_pkey_ix = dev->dev->phys_caps.pkey_phys_table_len[port] - 1;
456
457         for (i = 0; i < dev->dev->caps.pkey_table_len[port]; i++) {
458                 if (dev->pkeys.virt2phys_pkey[slave][port - 1][i] == unassigned_pkey_ix)
459                         continue;
460
461                 pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][i];
462
463                 ret = ib_get_cached_pkey(&dev->ib_dev, port, pkey_ix, &slot_pkey);
464                 if (ret)
465                         continue;
466                 if ((slot_pkey & 0x7FFF) == (pkey & 0x7FFF)) {
467                         if (slot_pkey & 0x8000) {
468                                 *ix = (u16) pkey_ix;
469                                 return 0;
470                         } else {
471                                 /* take first partial pkey index found */
472                                 if (partial_ix == 0xFF)
473                                         partial_ix = pkey_ix;
474                         }
475                 }
476         }
477
478         if (partial_ix < 0xFF) {
479                 *ix = (u16) partial_ix;
480                 return 0;
481         }
482
483         return -EINVAL;
484 }
485
486 static int get_gids_from_l3_hdr(struct ib_grh *grh, union ib_gid *sgid,
487                                 union ib_gid *dgid)
488 {
489         int version = ib_get_rdma_header_version((const union rdma_network_hdr *)grh);
490         enum rdma_network_type net_type;
491
492         if (version == 4)
493                 net_type = RDMA_NETWORK_IPV4;
494         else if (version == 6)
495                 net_type = RDMA_NETWORK_IPV6;
496         else
497                 return -EINVAL;
498
499         return ib_get_gids_from_rdma_hdr((union rdma_network_hdr *)grh, net_type,
500                                          sgid, dgid);
501 }
502
503 int mlx4_ib_send_to_slave(struct mlx4_ib_dev *dev, int slave, u8 port,
504                           enum ib_qp_type dest_qpt, struct ib_wc *wc,
505                           struct ib_grh *grh, struct ib_mad *mad)
506 {
507         struct ib_sge list;
508         struct ib_ud_wr wr;
509         struct ib_send_wr *bad_wr;
510         struct mlx4_ib_demux_pv_ctx *tun_ctx;
511         struct mlx4_ib_demux_pv_qp *tun_qp;
512         struct mlx4_rcv_tunnel_mad *tun_mad;
513         struct rdma_ah_attr attr;
514         struct ib_ah *ah;
515         struct ib_qp *src_qp = NULL;
516         unsigned tun_tx_ix = 0;
517         int dqpn;
518         int ret = 0;
519         u16 tun_pkey_ix;
520         u16 cached_pkey;
521         u8 is_eth = dev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
522
523         if (dest_qpt > IB_QPT_GSI)
524                 return -EINVAL;
525
526         tun_ctx = dev->sriov.demux[port-1].tun[slave];
527
528         /* check if proxy qp created */
529         if (!tun_ctx || tun_ctx->state != DEMUX_PV_STATE_ACTIVE)
530                 return -EAGAIN;
531
532         if (!dest_qpt)
533                 tun_qp = &tun_ctx->qp[0];
534         else
535                 tun_qp = &tun_ctx->qp[1];
536
537         /* compute P_Key index to put in tunnel header for slave */
538         if (dest_qpt) {
539                 u16 pkey_ix;
540                 ret = ib_get_cached_pkey(&dev->ib_dev, port, wc->pkey_index, &cached_pkey);
541                 if (ret)
542                         return -EINVAL;
543
544                 ret = find_slave_port_pkey_ix(dev, slave, port, cached_pkey, &pkey_ix);
545                 if (ret)
546                         return -EINVAL;
547                 tun_pkey_ix = pkey_ix;
548         } else
549                 tun_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][0];
550
551         dqpn = dev->dev->phys_caps.base_proxy_sqpn + 8 * slave + port + (dest_qpt * 2) - 1;
552
553         /* get tunnel tx data buf for slave */
554         src_qp = tun_qp->qp;
555
556         /* create ah. Just need an empty one with the port num for the post send.
557          * The driver will set the force loopback bit in post_send */
558         memset(&attr, 0, sizeof attr);
559         attr.type = rdma_ah_find_type(&dev->ib_dev, port);
560
561         rdma_ah_set_port_num(&attr, port);
562         if (is_eth) {
563                 union ib_gid sgid;
564                 union ib_gid dgid;
565
566                 if (get_gids_from_l3_hdr(grh, &sgid, &dgid))
567                         return -EINVAL;
568                 rdma_ah_set_grh(&attr, &dgid, 0, 0, 0, 0);
569         }
570         ah = rdma_create_ah(tun_ctx->pd, &attr);
571         if (IS_ERR(ah))
572                 return -ENOMEM;
573
574         /* allocate tunnel tx buf after pass failure returns */
575         spin_lock(&tun_qp->tx_lock);
576         if (tun_qp->tx_ix_head - tun_qp->tx_ix_tail >=
577             (MLX4_NUM_TUNNEL_BUFS - 1))
578                 ret = -EAGAIN;
579         else
580                 tun_tx_ix = (++tun_qp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1);
581         spin_unlock(&tun_qp->tx_lock);
582         if (ret)
583                 goto end;
584
585         tun_mad = (struct mlx4_rcv_tunnel_mad *) (tun_qp->tx_ring[tun_tx_ix].buf.addr);
586         if (tun_qp->tx_ring[tun_tx_ix].ah)
587                 rdma_destroy_ah(tun_qp->tx_ring[tun_tx_ix].ah);
588         tun_qp->tx_ring[tun_tx_ix].ah = ah;
589         ib_dma_sync_single_for_cpu(&dev->ib_dev,
590                                    tun_qp->tx_ring[tun_tx_ix].buf.map,
591                                    sizeof (struct mlx4_rcv_tunnel_mad),
592                                    DMA_TO_DEVICE);
593
594         /* copy over to tunnel buffer */
595         if (grh)
596                 memcpy(&tun_mad->grh, grh, sizeof *grh);
597         memcpy(&tun_mad->mad, mad, sizeof *mad);
598
599         /* adjust tunnel data */
600         tun_mad->hdr.pkey_index = cpu_to_be16(tun_pkey_ix);
601         tun_mad->hdr.flags_src_qp = cpu_to_be32(wc->src_qp & 0xFFFFFF);
602         tun_mad->hdr.g_ml_path = (grh && (wc->wc_flags & IB_WC_GRH)) ? 0x80 : 0;
603
604         if (is_eth) {
605                 u16 vlan = 0;
606                 if (mlx4_get_slave_default_vlan(dev->dev, port, slave, &vlan,
607                                                 NULL)) {
608                         /* VST mode */
609                         if (vlan != wc->vlan_id)
610                                 /* Packet vlan is not the VST-assigned vlan.
611                                  * Drop the packet.
612                                  */
613                                 goto out;
614                          else
615                                 /* Remove the vlan tag before forwarding
616                                  * the packet to the VF.
617                                  */
618                                 vlan = 0xffff;
619                 } else {
620                         vlan = wc->vlan_id;
621                 }
622
623                 tun_mad->hdr.sl_vid = cpu_to_be16(vlan);
624                 memcpy((char *)&tun_mad->hdr.mac_31_0, &(wc->smac[0]), 4);
625                 memcpy((char *)&tun_mad->hdr.slid_mac_47_32, &(wc->smac[4]), 2);
626         } else {
627                 tun_mad->hdr.sl_vid = cpu_to_be16(((u16)(wc->sl)) << 12);
628                 tun_mad->hdr.slid_mac_47_32 = cpu_to_be16(wc->slid);
629         }
630
631         ib_dma_sync_single_for_device(&dev->ib_dev,
632                                       tun_qp->tx_ring[tun_tx_ix].buf.map,
633                                       sizeof (struct mlx4_rcv_tunnel_mad),
634                                       DMA_TO_DEVICE);
635
636         list.addr = tun_qp->tx_ring[tun_tx_ix].buf.map;
637         list.length = sizeof (struct mlx4_rcv_tunnel_mad);
638         list.lkey = tun_ctx->pd->local_dma_lkey;
639
640         wr.ah = ah;
641         wr.port_num = port;
642         wr.remote_qkey = IB_QP_SET_QKEY;
643         wr.remote_qpn = dqpn;
644         wr.wr.next = NULL;
645         wr.wr.wr_id = ((u64) tun_tx_ix) | MLX4_TUN_SET_WRID_QPN(dest_qpt);
646         wr.wr.sg_list = &list;
647         wr.wr.num_sge = 1;
648         wr.wr.opcode = IB_WR_SEND;
649         wr.wr.send_flags = IB_SEND_SIGNALED;
650
651         ret = ib_post_send(src_qp, &wr.wr, &bad_wr);
652         if (!ret)
653                 return 0;
654  out:
655         spin_lock(&tun_qp->tx_lock);
656         tun_qp->tx_ix_tail++;
657         spin_unlock(&tun_qp->tx_lock);
658         tun_qp->tx_ring[tun_tx_ix].ah = NULL;
659 end:
660         rdma_destroy_ah(ah);
661         return ret;
662 }
663
664 static int mlx4_ib_demux_mad(struct ib_device *ibdev, u8 port,
665                         struct ib_wc *wc, struct ib_grh *grh,
666                         struct ib_mad *mad)
667 {
668         struct mlx4_ib_dev *dev = to_mdev(ibdev);
669         int err, other_port;
670         int slave = -1;
671         u8 *slave_id;
672         int is_eth = 0;
673
674         if (rdma_port_get_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND)
675                 is_eth = 0;
676         else
677                 is_eth = 1;
678
679         if (is_eth) {
680                 union ib_gid dgid;
681                 union ib_gid sgid;
682
683                 if (get_gids_from_l3_hdr(grh, &sgid, &dgid))
684                         return -EINVAL;
685                 if (!(wc->wc_flags & IB_WC_GRH)) {
686                         mlx4_ib_warn(ibdev, "RoCE grh not present.\n");
687                         return -EINVAL;
688                 }
689                 if (mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_CM) {
690                         mlx4_ib_warn(ibdev, "RoCE mgmt class is not CM\n");
691                         return -EINVAL;
692                 }
693                 err = mlx4_get_slave_from_roce_gid(dev->dev, port, dgid.raw, &slave);
694                 if (err && mlx4_is_mf_bonded(dev->dev)) {
695                         other_port = (port == 1) ? 2 : 1;
696                         err = mlx4_get_slave_from_roce_gid(dev->dev, other_port, dgid.raw, &slave);
697                         if (!err) {
698                                 port = other_port;
699                                 pr_debug("resolved slave %d from gid %pI6 wire port %d other %d\n",
700                                          slave, grh->dgid.raw, port, other_port);
701                         }
702                 }
703                 if (err) {
704                         mlx4_ib_warn(ibdev, "failed matching grh\n");
705                         return -ENOENT;
706                 }
707                 if (slave >= dev->dev->caps.sqp_demux) {
708                         mlx4_ib_warn(ibdev, "slave id: %d is bigger than allowed:%d\n",
709                                      slave, dev->dev->caps.sqp_demux);
710                         return -ENOENT;
711                 }
712
713                 if (mlx4_ib_demux_cm_handler(ibdev, port, NULL, mad))
714                         return 0;
715
716                 err = mlx4_ib_send_to_slave(dev, slave, port, wc->qp->qp_type, wc, grh, mad);
717                 if (err)
718                         pr_debug("failed sending to slave %d via tunnel qp (%d)\n",
719                                  slave, err);
720                 return 0;
721         }
722
723         /* Initially assume that this mad is for us */
724         slave = mlx4_master_func_num(dev->dev);
725
726         /* See if the slave id is encoded in a response mad */
727         if (mad->mad_hdr.method & 0x80) {
728                 slave_id = (u8 *) &mad->mad_hdr.tid;
729                 slave = *slave_id;
730                 if (slave != 255) /*255 indicates the dom0*/
731                         *slave_id = 0; /* remap tid */
732         }
733
734         /* If a grh is present, we demux according to it */
735         if (wc->wc_flags & IB_WC_GRH) {
736                 if (grh->dgid.global.interface_id ==
737                         cpu_to_be64(IB_SA_WELL_KNOWN_GUID) &&
738                     grh->dgid.global.subnet_prefix == cpu_to_be64(
739                         atomic64_read(&dev->sriov.demux[port - 1].subnet_prefix))) {
740                         slave = 0;
741                 } else {
742                         slave = mlx4_ib_find_real_gid(ibdev, port,
743                                                       grh->dgid.global.interface_id);
744                         if (slave < 0) {
745                                 mlx4_ib_warn(ibdev, "failed matching grh\n");
746                                 return -ENOENT;
747                         }
748                 }
749         }
750         /* Class-specific handling */
751         switch (mad->mad_hdr.mgmt_class) {
752         case IB_MGMT_CLASS_SUBN_LID_ROUTED:
753         case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
754                 /* 255 indicates the dom0 */
755                 if (slave != 255 && slave != mlx4_master_func_num(dev->dev)) {
756                         if (!mlx4_vf_smi_enabled(dev->dev, slave, port))
757                                 return -EPERM;
758                         /* for a VF. drop unsolicited MADs */
759                         if (!(mad->mad_hdr.method & IB_MGMT_METHOD_RESP)) {
760                                 mlx4_ib_warn(ibdev, "demux QP0. rejecting unsolicited mad for slave %d class 0x%x, method 0x%x\n",
761                                              slave, mad->mad_hdr.mgmt_class,
762                                              mad->mad_hdr.method);
763                                 return -EINVAL;
764                         }
765                 }
766                 break;
767         case IB_MGMT_CLASS_SUBN_ADM:
768                 if (mlx4_ib_demux_sa_handler(ibdev, port, slave,
769                                              (struct ib_sa_mad *) mad))
770                         return 0;
771                 break;
772         case IB_MGMT_CLASS_CM:
773                 if (mlx4_ib_demux_cm_handler(ibdev, port, &slave, mad))
774                         return 0;
775                 break;
776         case IB_MGMT_CLASS_DEVICE_MGMT:
777                 if (mad->mad_hdr.method != IB_MGMT_METHOD_GET_RESP)
778                         return 0;
779                 break;
780         default:
781                 /* Drop unsupported classes for slaves in tunnel mode */
782                 if (slave != mlx4_master_func_num(dev->dev)) {
783                         pr_debug("dropping unsupported ingress mad from class:%d "
784                                  "for slave:%d\n", mad->mad_hdr.mgmt_class, slave);
785                         return 0;
786                 }
787         }
788         /*make sure that no slave==255 was not handled yet.*/
789         if (slave >= dev->dev->caps.sqp_demux) {
790                 mlx4_ib_warn(ibdev, "slave id: %d is bigger than allowed:%d\n",
791                              slave, dev->dev->caps.sqp_demux);
792                 return -ENOENT;
793         }
794
795         err = mlx4_ib_send_to_slave(dev, slave, port, wc->qp->qp_type, wc, grh, mad);
796         if (err)
797                 pr_debug("failed sending to slave %d via tunnel qp (%d)\n",
798                          slave, err);
799         return 0;
800 }
801
802 static int ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
803                         const struct ib_wc *in_wc, const struct ib_grh *in_grh,
804                         const struct ib_mad *in_mad, struct ib_mad *out_mad)
805 {
806         u16 slid, prev_lid = 0;
807         int err;
808         struct ib_port_attr pattr;
809
810         if (in_wc && in_wc->qp->qp_num) {
811                 pr_debug("received MAD: slid:%d sqpn:%d "
812                         "dlid_bits:%d dqpn:%d wc_flags:0x%x, cls %x, mtd %x, atr %x\n",
813                         in_wc->slid, in_wc->src_qp,
814                         in_wc->dlid_path_bits,
815                         in_wc->qp->qp_num,
816                         in_wc->wc_flags,
817                         in_mad->mad_hdr.mgmt_class, in_mad->mad_hdr.method,
818                         be16_to_cpu(in_mad->mad_hdr.attr_id));
819                 if (in_wc->wc_flags & IB_WC_GRH) {
820                         pr_debug("sgid_hi:0x%016llx sgid_lo:0x%016llx\n",
821                                  be64_to_cpu(in_grh->sgid.global.subnet_prefix),
822                                  be64_to_cpu(in_grh->sgid.global.interface_id));
823                         pr_debug("dgid_hi:0x%016llx dgid_lo:0x%016llx\n",
824                                  be64_to_cpu(in_grh->dgid.global.subnet_prefix),
825                                  be64_to_cpu(in_grh->dgid.global.interface_id));
826                 }
827         }
828
829         slid = in_wc ? in_wc->slid : be16_to_cpu(IB_LID_PERMISSIVE);
830
831         if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && slid == 0) {
832                 forward_trap(to_mdev(ibdev), port_num, in_mad);
833                 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
834         }
835
836         if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
837             in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) {
838                 if (in_mad->mad_hdr.method   != IB_MGMT_METHOD_GET &&
839                     in_mad->mad_hdr.method   != IB_MGMT_METHOD_SET &&
840                     in_mad->mad_hdr.method   != IB_MGMT_METHOD_TRAP_REPRESS)
841                         return IB_MAD_RESULT_SUCCESS;
842
843                 /*
844                  * Don't process SMInfo queries -- the SMA can't handle them.
845                  */
846                 if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO)
847                         return IB_MAD_RESULT_SUCCESS;
848         } else if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT ||
849                    in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS1   ||
850                    in_mad->mad_hdr.mgmt_class == MLX4_IB_VENDOR_CLASS2   ||
851                    in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_CONG_MGMT) {
852                 if (in_mad->mad_hdr.method  != IB_MGMT_METHOD_GET &&
853                     in_mad->mad_hdr.method  != IB_MGMT_METHOD_SET)
854                         return IB_MAD_RESULT_SUCCESS;
855         } else
856                 return IB_MAD_RESULT_SUCCESS;
857
858         if ((in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED ||
859              in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) &&
860             in_mad->mad_hdr.method == IB_MGMT_METHOD_SET &&
861             in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO &&
862             !ib_query_port(ibdev, port_num, &pattr))
863                 prev_lid = pattr.lid;
864
865         err = mlx4_MAD_IFC(to_mdev(ibdev),
866                            (mad_flags & IB_MAD_IGNORE_MKEY ? MLX4_MAD_IFC_IGNORE_MKEY : 0) |
867                            (mad_flags & IB_MAD_IGNORE_BKEY ? MLX4_MAD_IFC_IGNORE_BKEY : 0) |
868                            MLX4_MAD_IFC_NET_VIEW,
869                            port_num, in_wc, in_grh, in_mad, out_mad);
870         if (err)
871                 return IB_MAD_RESULT_FAILURE;
872
873         if (!out_mad->mad_hdr.status) {
874                 smp_snoop(ibdev, port_num, in_mad, prev_lid);
875                 /* slaves get node desc from FW */
876                 if (!mlx4_is_slave(to_mdev(ibdev)->dev))
877                         node_desc_override(ibdev, out_mad);
878         }
879
880         /* set return bit in status of directed route responses */
881         if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE)
882                 out_mad->mad_hdr.status |= cpu_to_be16(1 << 15);
883
884         if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS)
885                 /* no response for trap repress */
886                 return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED;
887
888         return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
889 }
890
891 static void edit_counter(struct mlx4_counter *cnt, void *counters,
892                          __be16 attr_id)
893 {
894         switch (attr_id) {
895         case IB_PMA_PORT_COUNTERS:
896         {
897                 struct ib_pma_portcounters *pma_cnt =
898                         (struct ib_pma_portcounters *)counters;
899
900                 ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_data,
901                                      (be64_to_cpu(cnt->tx_bytes) >> 2));
902                 ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_data,
903                                      (be64_to_cpu(cnt->rx_bytes) >> 2));
904                 ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_packets,
905                                      be64_to_cpu(cnt->tx_frames));
906                 ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_packets,
907                                      be64_to_cpu(cnt->rx_frames));
908                 break;
909         }
910         case IB_PMA_PORT_COUNTERS_EXT:
911         {
912                 struct ib_pma_portcounters_ext *pma_cnt_ext =
913                         (struct ib_pma_portcounters_ext *)counters;
914
915                 pma_cnt_ext->port_xmit_data =
916                         cpu_to_be64(be64_to_cpu(cnt->tx_bytes) >> 2);
917                 pma_cnt_ext->port_rcv_data =
918                         cpu_to_be64(be64_to_cpu(cnt->rx_bytes) >> 2);
919                 pma_cnt_ext->port_xmit_packets = cnt->tx_frames;
920                 pma_cnt_ext->port_rcv_packets = cnt->rx_frames;
921                 break;
922         }
923         }
924 }
925
926 static int iboe_process_mad_port_info(void *out_mad)
927 {
928         struct ib_class_port_info cpi = {};
929
930         cpi.capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
931         memcpy(out_mad, &cpi, sizeof(cpi));
932         return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
933 }
934
935 static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
936                         const struct ib_wc *in_wc, const struct ib_grh *in_grh,
937                         const struct ib_mad *in_mad, struct ib_mad *out_mad)
938 {
939         struct mlx4_counter counter_stats;
940         struct mlx4_ib_dev *dev = to_mdev(ibdev);
941         struct counter_index *tmp_counter;
942         int err = IB_MAD_RESULT_FAILURE, stats_avail = 0;
943
944         if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT)
945                 return -EINVAL;
946
947         if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO)
948                 return iboe_process_mad_port_info((void *)(out_mad->data + 40));
949
950         memset(&counter_stats, 0, sizeof(counter_stats));
951         mutex_lock(&dev->counters_table[port_num - 1].mutex);
952         list_for_each_entry(tmp_counter,
953                             &dev->counters_table[port_num - 1].counters_list,
954                             list) {
955                 err = mlx4_get_counter_stats(dev->dev,
956                                              tmp_counter->index,
957                                              &counter_stats, 0);
958                 if (err) {
959                         err = IB_MAD_RESULT_FAILURE;
960                         stats_avail = 0;
961                         break;
962                 }
963                 stats_avail = 1;
964         }
965         mutex_unlock(&dev->counters_table[port_num - 1].mutex);
966         if (stats_avail) {
967                 memset(out_mad->data, 0, sizeof out_mad->data);
968                 switch (counter_stats.counter_mode & 0xf) {
969                 case 0:
970                         edit_counter(&counter_stats,
971                                      (void *)(out_mad->data + 40),
972                                      in_mad->mad_hdr.attr_id);
973                         err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
974                         break;
975                 default:
976                         err = IB_MAD_RESULT_FAILURE;
977                 }
978         }
979
980         return err;
981 }
982
983 int mlx4_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
984                         const struct ib_wc *in_wc, const struct ib_grh *in_grh,
985                         const struct ib_mad_hdr *in, size_t in_mad_size,
986                         struct ib_mad_hdr *out, size_t *out_mad_size,
987                         u16 *out_mad_pkey_index)
988 {
989         struct mlx4_ib_dev *dev = to_mdev(ibdev);
990         const struct ib_mad *in_mad = (const struct ib_mad *)in;
991         struct ib_mad *out_mad = (struct ib_mad *)out;
992         enum rdma_link_layer link = rdma_port_get_link_layer(ibdev, port_num);
993
994         if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) ||
995                          *out_mad_size != sizeof(*out_mad)))
996                 return IB_MAD_RESULT_FAILURE;
997
998         /* iboe_process_mad() which uses the HCA flow-counters to implement IB PMA
999          * queries, should be called only by VFs and for that specific purpose
1000          */
1001         if (link == IB_LINK_LAYER_INFINIBAND) {
1002                 if (mlx4_is_slave(dev->dev) &&
1003                     (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT &&
1004                      (in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS ||
1005                       in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS_EXT ||
1006                       in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO)))
1007                         return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
1008                                                 in_grh, in_mad, out_mad);
1009
1010                 return ib_process_mad(ibdev, mad_flags, port_num, in_wc,
1011                                       in_grh, in_mad, out_mad);
1012         }
1013
1014         if (link == IB_LINK_LAYER_ETHERNET)
1015                 return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
1016                                         in_grh, in_mad, out_mad);
1017
1018         return -EINVAL;
1019 }
1020
1021 static void send_handler(struct ib_mad_agent *agent,
1022                          struct ib_mad_send_wc *mad_send_wc)
1023 {
1024         if (mad_send_wc->send_buf->context[0])
1025                 rdma_destroy_ah(mad_send_wc->send_buf->context[0]);
1026         ib_free_send_mad(mad_send_wc->send_buf);
1027 }
1028
1029 int mlx4_ib_mad_init(struct mlx4_ib_dev *dev)
1030 {
1031         struct ib_mad_agent *agent;
1032         int p, q;
1033         int ret;
1034         enum rdma_link_layer ll;
1035
1036         for (p = 0; p < dev->num_ports; ++p) {
1037                 ll = rdma_port_get_link_layer(&dev->ib_dev, p + 1);
1038                 for (q = 0; q <= 1; ++q) {
1039                         if (ll == IB_LINK_LAYER_INFINIBAND) {
1040                                 agent = ib_register_mad_agent(&dev->ib_dev, p + 1,
1041                                                               q ? IB_QPT_GSI : IB_QPT_SMI,
1042                                                               NULL, 0, send_handler,
1043                                                               NULL, NULL, 0);
1044                                 if (IS_ERR(agent)) {
1045                                         ret = PTR_ERR(agent);
1046                                         goto err;
1047                                 }
1048                                 dev->send_agent[p][q] = agent;
1049                         } else
1050                                 dev->send_agent[p][q] = NULL;
1051                 }
1052         }
1053
1054         return 0;
1055
1056 err:
1057         for (p = 0; p < dev->num_ports; ++p)
1058                 for (q = 0; q <= 1; ++q)
1059                         if (dev->send_agent[p][q])
1060                                 ib_unregister_mad_agent(dev->send_agent[p][q]);
1061
1062         return ret;
1063 }
1064
1065 void mlx4_ib_mad_cleanup(struct mlx4_ib_dev *dev)
1066 {
1067         struct ib_mad_agent *agent;
1068         int p, q;
1069
1070         for (p = 0; p < dev->num_ports; ++p) {
1071                 for (q = 0; q <= 1; ++q) {
1072                         agent = dev->send_agent[p][q];
1073                         if (agent) {
1074                                 dev->send_agent[p][q] = NULL;
1075                                 ib_unregister_mad_agent(agent);
1076                         }
1077                 }
1078
1079                 if (dev->sm_ah[p])
1080                         rdma_destroy_ah(dev->sm_ah[p]);
1081         }
1082 }
1083
1084 static void handle_lid_change_event(struct mlx4_ib_dev *dev, u8 port_num)
1085 {
1086         mlx4_ib_dispatch_event(dev, port_num, IB_EVENT_LID_CHANGE);
1087
1088         if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down)
1089                 mlx4_gen_slaves_port_mgt_ev(dev->dev, port_num,
1090                                             MLX4_EQ_PORT_INFO_LID_CHANGE_MASK);
1091 }
1092
1093 static void handle_client_rereg_event(struct mlx4_ib_dev *dev, u8 port_num)
1094 {
1095         /* re-configure the alias-guid and mcg's */
1096         if (mlx4_is_master(dev->dev)) {
1097                 mlx4_ib_invalidate_all_guid_record(dev, port_num);
1098
1099                 if (!dev->sriov.is_going_down) {
1100                         mlx4_ib_mcg_port_cleanup(&dev->sriov.demux[port_num - 1], 0);
1101                         mlx4_gen_slaves_port_mgt_ev(dev->dev, port_num,
1102                                                     MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK);
1103                 }
1104         }
1105
1106         /* Update the sl to vl table from inside client rereg
1107          * only if in secure-host mode (snooping is not possible)
1108          * and the sl-to-vl change event is not generated by FW.
1109          */
1110         if (!mlx4_is_slave(dev->dev) &&
1111             dev->dev->flags & MLX4_FLAG_SECURE_HOST &&
1112             !(dev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_SL_TO_VL_CHANGE_EVENT)) {
1113                 if (mlx4_is_master(dev->dev))
1114                         /* already in work queue from mlx4_ib_event queueing
1115                          * mlx4_handle_port_mgmt_change_event, which calls
1116                          * this procedure. Therefore, call sl2vl_update directly.
1117                          */
1118                         mlx4_ib_sl2vl_update(dev, port_num);
1119                 else
1120                         mlx4_sched_ib_sl2vl_update_work(dev, port_num);
1121         }
1122         mlx4_ib_dispatch_event(dev, port_num, IB_EVENT_CLIENT_REREGISTER);
1123 }
1124
1125 static void propagate_pkey_ev(struct mlx4_ib_dev *dev, int port_num,
1126                               struct mlx4_eqe *eqe)
1127 {
1128         __propagate_pkey_ev(dev, port_num, GET_BLK_PTR_FROM_EQE(eqe),
1129                             GET_MASK_FROM_EQE(eqe));
1130 }
1131
1132 static void handle_slaves_guid_change(struct mlx4_ib_dev *dev, u8 port_num,
1133                                       u32 guid_tbl_blk_num, u32 change_bitmap)
1134 {
1135         struct ib_smp *in_mad  = NULL;
1136         struct ib_smp *out_mad  = NULL;
1137         u16 i;
1138
1139         if (!mlx4_is_mfunc(dev->dev) || !mlx4_is_master(dev->dev))
1140                 return;
1141
1142         in_mad  = kmalloc(sizeof *in_mad, GFP_KERNEL);
1143         out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
1144         if (!in_mad || !out_mad)
1145                 goto out;
1146
1147         guid_tbl_blk_num  *= 4;
1148
1149         for (i = 0; i < 4; i++) {
1150                 if (change_bitmap && (!((change_bitmap >> (8 * i)) & 0xff)))
1151                         continue;
1152                 memset(in_mad, 0, sizeof *in_mad);
1153                 memset(out_mad, 0, sizeof *out_mad);
1154
1155                 in_mad->base_version  = 1;
1156                 in_mad->mgmt_class    = IB_MGMT_CLASS_SUBN_LID_ROUTED;
1157                 in_mad->class_version = 1;
1158                 in_mad->method        = IB_MGMT_METHOD_GET;
1159                 in_mad->attr_id       = IB_SMP_ATTR_GUID_INFO;
1160                 in_mad->attr_mod      = cpu_to_be32(guid_tbl_blk_num + i);
1161
1162                 if (mlx4_MAD_IFC(dev,
1163                                  MLX4_MAD_IFC_IGNORE_KEYS | MLX4_MAD_IFC_NET_VIEW,
1164                                  port_num, NULL, NULL, in_mad, out_mad)) {
1165                         mlx4_ib_warn(&dev->ib_dev, "Failed in get GUID INFO MAD_IFC\n");
1166                         goto out;
1167                 }
1168
1169                 mlx4_ib_update_cache_on_guid_change(dev, guid_tbl_blk_num + i,
1170                                                     port_num,
1171                                                     (u8 *)(&((struct ib_smp *)out_mad)->data));
1172                 mlx4_ib_notify_slaves_on_guid_change(dev, guid_tbl_blk_num + i,
1173                                                      port_num,
1174                                                      (u8 *)(&((struct ib_smp *)out_mad)->data));
1175         }
1176
1177 out:
1178         kfree(in_mad);
1179         kfree(out_mad);
1180         return;
1181 }
1182
1183 void handle_port_mgmt_change_event(struct work_struct *work)
1184 {
1185         struct ib_event_work *ew = container_of(work, struct ib_event_work, work);
1186         struct mlx4_ib_dev *dev = ew->ib_dev;
1187         struct mlx4_eqe *eqe = &(ew->ib_eqe);
1188         u8 port = eqe->event.port_mgmt_change.port;
1189         u32 changed_attr;
1190         u32 tbl_block;
1191         u32 change_bitmap;
1192
1193         switch (eqe->subtype) {
1194         case MLX4_DEV_PMC_SUBTYPE_PORT_INFO:
1195                 changed_attr = be32_to_cpu(eqe->event.port_mgmt_change.params.port_info.changed_attr);
1196
1197                 /* Update the SM ah - This should be done before handling
1198                    the other changed attributes so that MADs can be sent to the SM */
1199                 if (changed_attr & MSTR_SM_CHANGE_MASK) {
1200                         u16 lid = be16_to_cpu(eqe->event.port_mgmt_change.params.port_info.mstr_sm_lid);
1201                         u8 sl = eqe->event.port_mgmt_change.params.port_info.mstr_sm_sl & 0xf;
1202                         update_sm_ah(dev, port, lid, sl);
1203                 }
1204
1205                 /* Check if it is a lid change event */
1206                 if (changed_attr & MLX4_EQ_PORT_INFO_LID_CHANGE_MASK)
1207                         handle_lid_change_event(dev, port);
1208
1209                 /* Generate GUID changed event */
1210                 if (changed_attr & MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK) {
1211                         if (mlx4_is_master(dev->dev)) {
1212                                 union ib_gid gid;
1213                                 int err = 0;
1214
1215                                 if (!eqe->event.port_mgmt_change.params.port_info.gid_prefix)
1216                                         err = __mlx4_ib_query_gid(&dev->ib_dev, port, 0, &gid, 1);
1217                                 else
1218                                         gid.global.subnet_prefix =
1219                                                 eqe->event.port_mgmt_change.params.port_info.gid_prefix;
1220                                 if (err) {
1221                                         pr_warn("Could not change QP1 subnet prefix for port %d: query_gid error (%d)\n",
1222                                                 port, err);
1223                                 } else {
1224                                         pr_debug("Changing QP1 subnet prefix for port %d. old=0x%llx. new=0x%llx\n",
1225                                                  port,
1226                                                  (u64)atomic64_read(&dev->sriov.demux[port - 1].subnet_prefix),
1227                                                  be64_to_cpu(gid.global.subnet_prefix));
1228                                         atomic64_set(&dev->sriov.demux[port - 1].subnet_prefix,
1229                                                      be64_to_cpu(gid.global.subnet_prefix));
1230                                 }
1231                         }
1232                         mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
1233                         /*if master, notify all slaves*/
1234                         if (mlx4_is_master(dev->dev))
1235                                 mlx4_gen_slaves_port_mgt_ev(dev->dev, port,
1236                                                             MLX4_EQ_PORT_INFO_GID_PFX_CHANGE_MASK);
1237                 }
1238
1239                 if (changed_attr & MLX4_EQ_PORT_INFO_CLIENT_REREG_MASK)
1240                         handle_client_rereg_event(dev, port);
1241                 break;
1242
1243         case MLX4_DEV_PMC_SUBTYPE_PKEY_TABLE:
1244                 mlx4_ib_dispatch_event(dev, port, IB_EVENT_PKEY_CHANGE);
1245                 if (mlx4_is_master(dev->dev) && !dev->sriov.is_going_down)
1246                         propagate_pkey_ev(dev, port, eqe);
1247                 break;
1248         case MLX4_DEV_PMC_SUBTYPE_GUID_INFO:
1249                 /* paravirtualized master's guid is guid 0 -- does not change */
1250                 if (!mlx4_is_master(dev->dev))
1251                         mlx4_ib_dispatch_event(dev, port, IB_EVENT_GID_CHANGE);
1252                 /*if master, notify relevant slaves*/
1253                 else if (!dev->sriov.is_going_down) {
1254                         tbl_block = GET_BLK_PTR_FROM_EQE(eqe);
1255                         change_bitmap = GET_MASK_FROM_EQE(eqe);
1256                         handle_slaves_guid_change(dev, port, tbl_block, change_bitmap);
1257                 }
1258                 break;
1259
1260         case MLX4_DEV_PMC_SUBTYPE_SL_TO_VL_MAP:
1261                 /* cache sl to vl mapping changes for use in
1262                  * filling QP1 LRH VL field when sending packets
1263                  */
1264                 if (!mlx4_is_slave(dev->dev)) {
1265                         union sl2vl_tbl_to_u64 sl2vl64;
1266                         int jj;
1267
1268                         for (jj = 0; jj < 8; jj++) {
1269                                 sl2vl64.sl8[jj] =
1270                                         eqe->event.port_mgmt_change.params.sl2vl_tbl_change_info.sl2vl_table[jj];
1271                                 pr_debug("port %u, sl2vl[%d] = %02x\n",
1272                                          port, jj, sl2vl64.sl8[jj]);
1273                         }
1274                         atomic64_set(&dev->sl2vl[port - 1], sl2vl64.sl64);
1275                 }
1276                 break;
1277         default:
1278                 pr_warn("Unsupported subtype 0x%x for "
1279                         "Port Management Change event\n", eqe->subtype);
1280         }
1281
1282         kfree(ew);
1283 }
1284
1285 void mlx4_ib_dispatch_event(struct mlx4_ib_dev *dev, u8 port_num,
1286                             enum ib_event_type type)
1287 {
1288         struct ib_event event;
1289
1290         event.device            = &dev->ib_dev;
1291         event.element.port_num  = port_num;
1292         event.event             = type;
1293
1294         ib_dispatch_event(&event);
1295 }
1296
1297 static void mlx4_ib_tunnel_comp_handler(struct ib_cq *cq, void *arg)
1298 {
1299         unsigned long flags;
1300         struct mlx4_ib_demux_pv_ctx *ctx = cq->cq_context;
1301         struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
1302         spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
1303         if (!dev->sriov.is_going_down && ctx->state == DEMUX_PV_STATE_ACTIVE)
1304                 queue_work(ctx->wq, &ctx->work);
1305         spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
1306 }
1307
1308 static int mlx4_ib_post_pv_qp_buf(struct mlx4_ib_demux_pv_ctx *ctx,
1309                                   struct mlx4_ib_demux_pv_qp *tun_qp,
1310                                   int index)
1311 {
1312         struct ib_sge sg_list;
1313         struct ib_recv_wr recv_wr, *bad_recv_wr;
1314         int size;
1315
1316         size = (tun_qp->qp->qp_type == IB_QPT_UD) ?
1317                 sizeof (struct mlx4_tunnel_mad) : sizeof (struct mlx4_mad_rcv_buf);
1318
1319         sg_list.addr = tun_qp->ring[index].map;
1320         sg_list.length = size;
1321         sg_list.lkey = ctx->pd->local_dma_lkey;
1322
1323         recv_wr.next = NULL;
1324         recv_wr.sg_list = &sg_list;
1325         recv_wr.num_sge = 1;
1326         recv_wr.wr_id = (u64) index | MLX4_TUN_WRID_RECV |
1327                 MLX4_TUN_SET_WRID_QPN(tun_qp->proxy_qpt);
1328         ib_dma_sync_single_for_device(ctx->ib_dev, tun_qp->ring[index].map,
1329                                       size, DMA_FROM_DEVICE);
1330         return ib_post_recv(tun_qp->qp, &recv_wr, &bad_recv_wr);
1331 }
1332
1333 static int mlx4_ib_multiplex_sa_handler(struct ib_device *ibdev, int port,
1334                 int slave, struct ib_sa_mad *sa_mad)
1335 {
1336         int ret = 0;
1337
1338         /* dispatch to different sa handlers */
1339         switch (be16_to_cpu(sa_mad->mad_hdr.attr_id)) {
1340         case IB_SA_ATTR_MC_MEMBER_REC:
1341                 ret = mlx4_ib_mcg_multiplex_handler(ibdev, port, slave, sa_mad);
1342                 break;
1343         default:
1344                 break;
1345         }
1346         return ret;
1347 }
1348
1349 static int is_proxy_qp0(struct mlx4_ib_dev *dev, int qpn, int slave)
1350 {
1351         int proxy_start = dev->dev->phys_caps.base_proxy_sqpn + 8 * slave;
1352
1353         return (qpn >= proxy_start && qpn <= proxy_start + 1);
1354 }
1355
1356
1357 int mlx4_ib_send_to_wire(struct mlx4_ib_dev *dev, int slave, u8 port,
1358                          enum ib_qp_type dest_qpt, u16 pkey_index,
1359                          u32 remote_qpn, u32 qkey, struct rdma_ah_attr *attr,
1360                          u8 *s_mac, u16 vlan_id, struct ib_mad *mad)
1361 {
1362         struct ib_sge list;
1363         struct ib_ud_wr wr;
1364         struct ib_send_wr *bad_wr;
1365         struct mlx4_ib_demux_pv_ctx *sqp_ctx;
1366         struct mlx4_ib_demux_pv_qp *sqp;
1367         struct mlx4_mad_snd_buf *sqp_mad;
1368         struct ib_ah *ah;
1369         struct ib_qp *send_qp = NULL;
1370         struct ib_global_route *grh;
1371         unsigned wire_tx_ix = 0;
1372         int ret = 0;
1373         u16 wire_pkey_ix;
1374         int src_qpnum;
1375         u8 sgid_index;
1376
1377
1378         sqp_ctx = dev->sriov.sqps[port-1];
1379
1380         /* check if proxy qp created */
1381         if (!sqp_ctx || sqp_ctx->state != DEMUX_PV_STATE_ACTIVE)
1382                 return -EAGAIN;
1383
1384         if (dest_qpt == IB_QPT_SMI) {
1385                 src_qpnum = 0;
1386                 sqp = &sqp_ctx->qp[0];
1387                 wire_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][0];
1388         } else {
1389                 src_qpnum = 1;
1390                 sqp = &sqp_ctx->qp[1];
1391                 wire_pkey_ix = dev->pkeys.virt2phys_pkey[slave][port - 1][pkey_index];
1392         }
1393
1394         send_qp = sqp->qp;
1395
1396         /* create ah */
1397         grh = rdma_ah_retrieve_grh(attr);
1398         sgid_index = grh->sgid_index;
1399         grh->sgid_index = 0;
1400         ah = rdma_create_ah(sqp_ctx->pd, attr);
1401         if (IS_ERR(ah))
1402                 return -ENOMEM;
1403         grh->sgid_index = sgid_index;
1404         to_mah(ah)->av.ib.gid_index = sgid_index;
1405         /* get rid of force-loopback bit */
1406         to_mah(ah)->av.ib.port_pd &= cpu_to_be32(0x7FFFFFFF);
1407         spin_lock(&sqp->tx_lock);
1408         if (sqp->tx_ix_head - sqp->tx_ix_tail >=
1409             (MLX4_NUM_TUNNEL_BUFS - 1))
1410                 ret = -EAGAIN;
1411         else
1412                 wire_tx_ix = (++sqp->tx_ix_head) & (MLX4_NUM_TUNNEL_BUFS - 1);
1413         spin_unlock(&sqp->tx_lock);
1414         if (ret)
1415                 goto out;
1416
1417         sqp_mad = (struct mlx4_mad_snd_buf *) (sqp->tx_ring[wire_tx_ix].buf.addr);
1418         if (sqp->tx_ring[wire_tx_ix].ah)
1419                 rdma_destroy_ah(sqp->tx_ring[wire_tx_ix].ah);
1420         sqp->tx_ring[wire_tx_ix].ah = ah;
1421         ib_dma_sync_single_for_cpu(&dev->ib_dev,
1422                                    sqp->tx_ring[wire_tx_ix].buf.map,
1423                                    sizeof (struct mlx4_mad_snd_buf),
1424                                    DMA_TO_DEVICE);
1425
1426         memcpy(&sqp_mad->payload, mad, sizeof *mad);
1427
1428         ib_dma_sync_single_for_device(&dev->ib_dev,
1429                                       sqp->tx_ring[wire_tx_ix].buf.map,
1430                                       sizeof (struct mlx4_mad_snd_buf),
1431                                       DMA_TO_DEVICE);
1432
1433         list.addr = sqp->tx_ring[wire_tx_ix].buf.map;
1434         list.length = sizeof (struct mlx4_mad_snd_buf);
1435         list.lkey = sqp_ctx->pd->local_dma_lkey;
1436
1437         wr.ah = ah;
1438         wr.port_num = port;
1439         wr.pkey_index = wire_pkey_ix;
1440         wr.remote_qkey = qkey;
1441         wr.remote_qpn = remote_qpn;
1442         wr.wr.next = NULL;
1443         wr.wr.wr_id = ((u64) wire_tx_ix) | MLX4_TUN_SET_WRID_QPN(src_qpnum);
1444         wr.wr.sg_list = &list;
1445         wr.wr.num_sge = 1;
1446         wr.wr.opcode = IB_WR_SEND;
1447         wr.wr.send_flags = IB_SEND_SIGNALED;
1448         if (s_mac)
1449                 memcpy(to_mah(ah)->av.eth.s_mac, s_mac, 6);
1450         if (vlan_id < 0x1000)
1451                 vlan_id |= (rdma_ah_get_sl(attr) & 7) << 13;
1452         to_mah(ah)->av.eth.vlan = cpu_to_be16(vlan_id);
1453
1454
1455         ret = ib_post_send(send_qp, &wr.wr, &bad_wr);
1456         if (!ret)
1457                 return 0;
1458
1459         spin_lock(&sqp->tx_lock);
1460         sqp->tx_ix_tail++;
1461         spin_unlock(&sqp->tx_lock);
1462         sqp->tx_ring[wire_tx_ix].ah = NULL;
1463 out:
1464         rdma_destroy_ah(ah);
1465         return ret;
1466 }
1467
1468 static int get_slave_base_gid_ix(struct mlx4_ib_dev *dev, int slave, int port)
1469 {
1470         if (rdma_port_get_link_layer(&dev->ib_dev, port) == IB_LINK_LAYER_INFINIBAND)
1471                 return slave;
1472         return mlx4_get_base_gid_ix(dev->dev, slave, port);
1473 }
1474
1475 static void fill_in_real_sgid_index(struct mlx4_ib_dev *dev, int slave, int port,
1476                                     struct rdma_ah_attr *ah_attr)
1477 {
1478         struct ib_global_route *grh = rdma_ah_retrieve_grh(ah_attr);
1479         if (rdma_port_get_link_layer(&dev->ib_dev, port) == IB_LINK_LAYER_INFINIBAND)
1480                 grh->sgid_index = slave;
1481         else
1482                 grh->sgid_index += get_slave_base_gid_ix(dev, slave, port);
1483 }
1484
1485 static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc *wc)
1486 {
1487         struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
1488         struct mlx4_ib_demux_pv_qp *tun_qp = &ctx->qp[MLX4_TUN_WRID_QPN(wc->wr_id)];
1489         int wr_ix = wc->wr_id & (MLX4_NUM_TUNNEL_BUFS - 1);
1490         struct mlx4_tunnel_mad *tunnel = tun_qp->ring[wr_ix].addr;
1491         struct mlx4_ib_ah ah;
1492         struct rdma_ah_attr ah_attr;
1493         u8 *slave_id;
1494         int slave;
1495         int port;
1496         u16 vlan_id;
1497         u8 qos;
1498         u8 *dmac;
1499
1500         /* Get slave that sent this packet */
1501         if (wc->src_qp < dev->dev->phys_caps.base_proxy_sqpn ||
1502             wc->src_qp >= dev->dev->phys_caps.base_proxy_sqpn + 8 * MLX4_MFUNC_MAX ||
1503             (wc->src_qp & 0x1) != ctx->port - 1 ||
1504             wc->src_qp & 0x4) {
1505                 mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d\n", wc->src_qp);
1506                 return;
1507         }
1508         slave = ((wc->src_qp & ~0x7) - dev->dev->phys_caps.base_proxy_sqpn) / 8;
1509         if (slave != ctx->slave) {
1510                 mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d: "
1511                              "belongs to another slave\n", wc->src_qp);
1512                 return;
1513         }
1514
1515         /* Map transaction ID */
1516         ib_dma_sync_single_for_cpu(ctx->ib_dev, tun_qp->ring[wr_ix].map,
1517                                    sizeof (struct mlx4_tunnel_mad),
1518                                    DMA_FROM_DEVICE);
1519         switch (tunnel->mad.mad_hdr.method) {
1520         case IB_MGMT_METHOD_SET:
1521         case IB_MGMT_METHOD_GET:
1522         case IB_MGMT_METHOD_REPORT:
1523         case IB_SA_METHOD_GET_TABLE:
1524         case IB_SA_METHOD_DELETE:
1525         case IB_SA_METHOD_GET_MULTI:
1526         case IB_SA_METHOD_GET_TRACE_TBL:
1527                 slave_id = (u8 *) &tunnel->mad.mad_hdr.tid;
1528                 if (*slave_id) {
1529                         mlx4_ib_warn(ctx->ib_dev, "egress mad has non-null tid msb:%d "
1530                                      "class:%d slave:%d\n", *slave_id,
1531                                      tunnel->mad.mad_hdr.mgmt_class, slave);
1532                         return;
1533                 } else
1534                         *slave_id = slave;
1535         default:
1536                 /* nothing */;
1537         }
1538
1539         /* Class-specific handling */
1540         switch (tunnel->mad.mad_hdr.mgmt_class) {
1541         case IB_MGMT_CLASS_SUBN_LID_ROUTED:
1542         case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE:
1543                 if (slave != mlx4_master_func_num(dev->dev) &&
1544                     !mlx4_vf_smi_enabled(dev->dev, slave, ctx->port))
1545                         return;
1546                 break;
1547         case IB_MGMT_CLASS_SUBN_ADM:
1548                 if (mlx4_ib_multiplex_sa_handler(ctx->ib_dev, ctx->port, slave,
1549                               (struct ib_sa_mad *) &tunnel->mad))
1550                         return;
1551                 break;
1552         case IB_MGMT_CLASS_CM:
1553                 if (mlx4_ib_multiplex_cm_handler(ctx->ib_dev, ctx->port, slave,
1554                               (struct ib_mad *) &tunnel->mad))
1555                         return;
1556                 break;
1557         case IB_MGMT_CLASS_DEVICE_MGMT:
1558                 if (tunnel->mad.mad_hdr.method != IB_MGMT_METHOD_GET &&
1559                     tunnel->mad.mad_hdr.method != IB_MGMT_METHOD_SET)
1560                         return;
1561                 break;
1562         default:
1563                 /* Drop unsupported classes for slaves in tunnel mode */
1564                 if (slave != mlx4_master_func_num(dev->dev)) {
1565                         mlx4_ib_warn(ctx->ib_dev, "dropping unsupported egress mad from class:%d "
1566                                      "for slave:%d\n", tunnel->mad.mad_hdr.mgmt_class, slave);
1567                         return;
1568                 }
1569         }
1570
1571         /* We are using standard ib_core services to send the mad, so generate a
1572          * stadard address handle by decoding the tunnelled mlx4_ah fields */
1573         memcpy(&ah.av, &tunnel->hdr.av, sizeof (struct mlx4_av));
1574         ah.ibah.device = ctx->ib_dev;
1575
1576         port = be32_to_cpu(ah.av.ib.port_pd) >> 24;
1577         port = mlx4_slave_convert_port(dev->dev, slave, port);
1578         if (port < 0)
1579                 return;
1580         ah.av.ib.port_pd = cpu_to_be32(port << 24 | (be32_to_cpu(ah.av.ib.port_pd) & 0xffffff));
1581         ah.ibah.type = rdma_ah_find_type(&dev->ib_dev, port);
1582
1583         mlx4_ib_query_ah(&ah.ibah, &ah_attr);
1584         if (rdma_ah_get_ah_flags(&ah_attr) & IB_AH_GRH)
1585                 fill_in_real_sgid_index(dev, slave, ctx->port, &ah_attr);
1586         dmac = rdma_ah_retrieve_dmac(&ah_attr);
1587         if (dmac)
1588                 memcpy(dmac, tunnel->hdr.mac, ETH_ALEN);
1589         vlan_id = be16_to_cpu(tunnel->hdr.vlan);
1590         /* if slave have default vlan use it */
1591         if (mlx4_get_slave_default_vlan(dev->dev, ctx->port, slave,
1592                                         &vlan_id, &qos))
1593                 rdma_ah_set_sl(&ah_attr, qos);
1594
1595         mlx4_ib_send_to_wire(dev, slave, ctx->port,
1596                              is_proxy_qp0(dev, wc->src_qp, slave) ?
1597                              IB_QPT_SMI : IB_QPT_GSI,
1598                              be16_to_cpu(tunnel->hdr.pkey_index),
1599                              be32_to_cpu(tunnel->hdr.remote_qpn),
1600                              be32_to_cpu(tunnel->hdr.qkey),
1601                              &ah_attr, wc->smac, vlan_id, &tunnel->mad);
1602 }
1603
1604 static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
1605                                  enum ib_qp_type qp_type, int is_tun)
1606 {
1607         int i;
1608         struct mlx4_ib_demux_pv_qp *tun_qp;
1609         int rx_buf_size, tx_buf_size;
1610
1611         if (qp_type > IB_QPT_GSI)
1612                 return -EINVAL;
1613
1614         tun_qp = &ctx->qp[qp_type];
1615
1616         tun_qp->ring = kzalloc(sizeof (struct mlx4_ib_buf) * MLX4_NUM_TUNNEL_BUFS,
1617                                GFP_KERNEL);
1618         if (!tun_qp->ring)
1619                 return -ENOMEM;
1620
1621         tun_qp->tx_ring = kcalloc(MLX4_NUM_TUNNEL_BUFS,
1622                                   sizeof (struct mlx4_ib_tun_tx_buf),
1623                                   GFP_KERNEL);
1624         if (!tun_qp->tx_ring) {
1625                 kfree(tun_qp->ring);
1626                 tun_qp->ring = NULL;
1627                 return -ENOMEM;
1628         }
1629
1630         if (is_tun) {
1631                 rx_buf_size = sizeof (struct mlx4_tunnel_mad);
1632                 tx_buf_size = sizeof (struct mlx4_rcv_tunnel_mad);
1633         } else {
1634                 rx_buf_size = sizeof (struct mlx4_mad_rcv_buf);
1635                 tx_buf_size = sizeof (struct mlx4_mad_snd_buf);
1636         }
1637
1638         for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
1639                 tun_qp->ring[i].addr = kmalloc(rx_buf_size, GFP_KERNEL);
1640                 if (!tun_qp->ring[i].addr)
1641                         goto err;
1642                 tun_qp->ring[i].map = ib_dma_map_single(ctx->ib_dev,
1643                                                         tun_qp->ring[i].addr,
1644                                                         rx_buf_size,
1645                                                         DMA_FROM_DEVICE);
1646                 if (ib_dma_mapping_error(ctx->ib_dev, tun_qp->ring[i].map)) {
1647                         kfree(tun_qp->ring[i].addr);
1648                         goto err;
1649                 }
1650         }
1651
1652         for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
1653                 tun_qp->tx_ring[i].buf.addr =
1654                         kmalloc(tx_buf_size, GFP_KERNEL);
1655                 if (!tun_qp->tx_ring[i].buf.addr)
1656                         goto tx_err;
1657                 tun_qp->tx_ring[i].buf.map =
1658                         ib_dma_map_single(ctx->ib_dev,
1659                                           tun_qp->tx_ring[i].buf.addr,
1660                                           tx_buf_size,
1661                                           DMA_TO_DEVICE);
1662                 if (ib_dma_mapping_error(ctx->ib_dev,
1663                                          tun_qp->tx_ring[i].buf.map)) {
1664                         kfree(tun_qp->tx_ring[i].buf.addr);
1665                         goto tx_err;
1666                 }
1667                 tun_qp->tx_ring[i].ah = NULL;
1668         }
1669         spin_lock_init(&tun_qp->tx_lock);
1670         tun_qp->tx_ix_head = 0;
1671         tun_qp->tx_ix_tail = 0;
1672         tun_qp->proxy_qpt = qp_type;
1673
1674         return 0;
1675
1676 tx_err:
1677         while (i > 0) {
1678                 --i;
1679                 ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map,
1680                                     tx_buf_size, DMA_TO_DEVICE);
1681                 kfree(tun_qp->tx_ring[i].buf.addr);
1682         }
1683         kfree(tun_qp->tx_ring);
1684         tun_qp->tx_ring = NULL;
1685         i = MLX4_NUM_TUNNEL_BUFS;
1686 err:
1687         while (i > 0) {
1688                 --i;
1689                 ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map,
1690                                     rx_buf_size, DMA_FROM_DEVICE);
1691                 kfree(tun_qp->ring[i].addr);
1692         }
1693         kfree(tun_qp->ring);
1694         tun_qp->ring = NULL;
1695         return -ENOMEM;
1696 }
1697
1698 static void mlx4_ib_free_pv_qp_bufs(struct mlx4_ib_demux_pv_ctx *ctx,
1699                                      enum ib_qp_type qp_type, int is_tun)
1700 {
1701         int i;
1702         struct mlx4_ib_demux_pv_qp *tun_qp;
1703         int rx_buf_size, tx_buf_size;
1704
1705         if (qp_type > IB_QPT_GSI)
1706                 return;
1707
1708         tun_qp = &ctx->qp[qp_type];
1709         if (is_tun) {
1710                 rx_buf_size = sizeof (struct mlx4_tunnel_mad);
1711                 tx_buf_size = sizeof (struct mlx4_rcv_tunnel_mad);
1712         } else {
1713                 rx_buf_size = sizeof (struct mlx4_mad_rcv_buf);
1714                 tx_buf_size = sizeof (struct mlx4_mad_snd_buf);
1715         }
1716
1717
1718         for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
1719                 ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map,
1720                                     rx_buf_size, DMA_FROM_DEVICE);
1721                 kfree(tun_qp->ring[i].addr);
1722         }
1723
1724         for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
1725                 ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map,
1726                                     tx_buf_size, DMA_TO_DEVICE);
1727                 kfree(tun_qp->tx_ring[i].buf.addr);
1728                 if (tun_qp->tx_ring[i].ah)
1729                         rdma_destroy_ah(tun_qp->tx_ring[i].ah);
1730         }
1731         kfree(tun_qp->tx_ring);
1732         kfree(tun_qp->ring);
1733 }
1734
1735 static void mlx4_ib_tunnel_comp_worker(struct work_struct *work)
1736 {
1737         struct mlx4_ib_demux_pv_ctx *ctx;
1738         struct mlx4_ib_demux_pv_qp *tun_qp;
1739         struct ib_wc wc;
1740         int ret;
1741         ctx = container_of(work, struct mlx4_ib_demux_pv_ctx, work);
1742         ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
1743
1744         while (ib_poll_cq(ctx->cq, 1, &wc) == 1) {
1745                 tun_qp = &ctx->qp[MLX4_TUN_WRID_QPN(wc.wr_id)];
1746                 if (wc.status == IB_WC_SUCCESS) {
1747                         switch (wc.opcode) {
1748                         case IB_WC_RECV:
1749                                 mlx4_ib_multiplex_mad(ctx, &wc);
1750                                 ret = mlx4_ib_post_pv_qp_buf(ctx, tun_qp,
1751                                                              wc.wr_id &
1752                                                              (MLX4_NUM_TUNNEL_BUFS - 1));
1753                                 if (ret)
1754                                         pr_err("Failed reposting tunnel "
1755                                                "buf:%lld\n", wc.wr_id);
1756                                 break;
1757                         case IB_WC_SEND:
1758                                 pr_debug("received tunnel send completion:"
1759                                          "wrid=0x%llx, status=0x%x\n",
1760                                          wc.wr_id, wc.status);
1761                                 rdma_destroy_ah(tun_qp->tx_ring[wc.wr_id &
1762                                               (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
1763                                 tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
1764                                         = NULL;
1765                                 spin_lock(&tun_qp->tx_lock);
1766                                 tun_qp->tx_ix_tail++;
1767                                 spin_unlock(&tun_qp->tx_lock);
1768
1769                                 break;
1770                         default:
1771                                 break;
1772                         }
1773                 } else  {
1774                         pr_debug("mlx4_ib: completion error in tunnel: %d."
1775                                  " status = %d, wrid = 0x%llx\n",
1776                                  ctx->slave, wc.status, wc.wr_id);
1777                         if (!MLX4_TUN_IS_RECV(wc.wr_id)) {
1778                                 rdma_destroy_ah(tun_qp->tx_ring[wc.wr_id &
1779                                               (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
1780                                 tun_qp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
1781                                         = NULL;
1782                                 spin_lock(&tun_qp->tx_lock);
1783                                 tun_qp->tx_ix_tail++;
1784                                 spin_unlock(&tun_qp->tx_lock);
1785                         }
1786                 }
1787         }
1788 }
1789
1790 static void pv_qp_event_handler(struct ib_event *event, void *qp_context)
1791 {
1792         struct mlx4_ib_demux_pv_ctx *sqp = qp_context;
1793
1794         /* It's worse than that! He's dead, Jim! */
1795         pr_err("Fatal error (%d) on a MAD QP on port %d\n",
1796                event->event, sqp->port);
1797 }
1798
1799 static int create_pv_sqp(struct mlx4_ib_demux_pv_ctx *ctx,
1800                             enum ib_qp_type qp_type, int create_tun)
1801 {
1802         int i, ret;
1803         struct mlx4_ib_demux_pv_qp *tun_qp;
1804         struct mlx4_ib_qp_tunnel_init_attr qp_init_attr;
1805         struct ib_qp_attr attr;
1806         int qp_attr_mask_INIT;
1807
1808         if (qp_type > IB_QPT_GSI)
1809                 return -EINVAL;
1810
1811         tun_qp = &ctx->qp[qp_type];
1812
1813         memset(&qp_init_attr, 0, sizeof qp_init_attr);
1814         qp_init_attr.init_attr.send_cq = ctx->cq;
1815         qp_init_attr.init_attr.recv_cq = ctx->cq;
1816         qp_init_attr.init_attr.sq_sig_type = IB_SIGNAL_ALL_WR;
1817         qp_init_attr.init_attr.cap.max_send_wr = MLX4_NUM_TUNNEL_BUFS;
1818         qp_init_attr.init_attr.cap.max_recv_wr = MLX4_NUM_TUNNEL_BUFS;
1819         qp_init_attr.init_attr.cap.max_send_sge = 1;
1820         qp_init_attr.init_attr.cap.max_recv_sge = 1;
1821         if (create_tun) {
1822                 qp_init_attr.init_attr.qp_type = IB_QPT_UD;
1823                 qp_init_attr.init_attr.create_flags = MLX4_IB_SRIOV_TUNNEL_QP;
1824                 qp_init_attr.port = ctx->port;
1825                 qp_init_attr.slave = ctx->slave;
1826                 qp_init_attr.proxy_qp_type = qp_type;
1827                 qp_attr_mask_INIT = IB_QP_STATE | IB_QP_PKEY_INDEX |
1828                            IB_QP_QKEY | IB_QP_PORT;
1829         } else {
1830                 qp_init_attr.init_attr.qp_type = qp_type;
1831                 qp_init_attr.init_attr.create_flags = MLX4_IB_SRIOV_SQP;
1832                 qp_attr_mask_INIT = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_QKEY;
1833         }
1834         qp_init_attr.init_attr.port_num = ctx->port;
1835         qp_init_attr.init_attr.qp_context = ctx;
1836         qp_init_attr.init_attr.event_handler = pv_qp_event_handler;
1837         tun_qp->qp = ib_create_qp(ctx->pd, &qp_init_attr.init_attr);
1838         if (IS_ERR(tun_qp->qp)) {
1839                 ret = PTR_ERR(tun_qp->qp);
1840                 tun_qp->qp = NULL;
1841                 pr_err("Couldn't create %s QP (%d)\n",
1842                        create_tun ? "tunnel" : "special", ret);
1843                 return ret;
1844         }
1845
1846         memset(&attr, 0, sizeof attr);
1847         attr.qp_state = IB_QPS_INIT;
1848         ret = 0;
1849         if (create_tun)
1850                 ret = find_slave_port_pkey_ix(to_mdev(ctx->ib_dev), ctx->slave,
1851                                               ctx->port, IB_DEFAULT_PKEY_FULL,
1852                                               &attr.pkey_index);
1853         if (ret || !create_tun)
1854                 attr.pkey_index =
1855                         to_mdev(ctx->ib_dev)->pkeys.virt2phys_pkey[ctx->slave][ctx->port - 1][0];
1856         attr.qkey = IB_QP1_QKEY;
1857         attr.port_num = ctx->port;
1858         ret = ib_modify_qp(tun_qp->qp, &attr, qp_attr_mask_INIT);
1859         if (ret) {
1860                 pr_err("Couldn't change %s qp state to INIT (%d)\n",
1861                        create_tun ? "tunnel" : "special", ret);
1862                 goto err_qp;
1863         }
1864         attr.qp_state = IB_QPS_RTR;
1865         ret = ib_modify_qp(tun_qp->qp, &attr, IB_QP_STATE);
1866         if (ret) {
1867                 pr_err("Couldn't change %s qp state to RTR (%d)\n",
1868                        create_tun ? "tunnel" : "special", ret);
1869                 goto err_qp;
1870         }
1871         attr.qp_state = IB_QPS_RTS;
1872         attr.sq_psn = 0;
1873         ret = ib_modify_qp(tun_qp->qp, &attr, IB_QP_STATE | IB_QP_SQ_PSN);
1874         if (ret) {
1875                 pr_err("Couldn't change %s qp state to RTS (%d)\n",
1876                        create_tun ? "tunnel" : "special", ret);
1877                 goto err_qp;
1878         }
1879
1880         for (i = 0; i < MLX4_NUM_TUNNEL_BUFS; i++) {
1881                 ret = mlx4_ib_post_pv_qp_buf(ctx, tun_qp, i);
1882                 if (ret) {
1883                         pr_err(" mlx4_ib_post_pv_buf error"
1884                                " (err = %d, i = %d)\n", ret, i);
1885                         goto err_qp;
1886                 }
1887         }
1888         return 0;
1889
1890 err_qp:
1891         ib_destroy_qp(tun_qp->qp);
1892         tun_qp->qp = NULL;
1893         return ret;
1894 }
1895
1896 /*
1897  * IB MAD completion callback for real SQPs
1898  */
1899 static void mlx4_ib_sqp_comp_worker(struct work_struct *work)
1900 {
1901         struct mlx4_ib_demux_pv_ctx *ctx;
1902         struct mlx4_ib_demux_pv_qp *sqp;
1903         struct ib_wc wc;
1904         struct ib_grh *grh;
1905         struct ib_mad *mad;
1906
1907         ctx = container_of(work, struct mlx4_ib_demux_pv_ctx, work);
1908         ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
1909
1910         while (mlx4_ib_poll_cq(ctx->cq, 1, &wc) == 1) {
1911                 sqp = &ctx->qp[MLX4_TUN_WRID_QPN(wc.wr_id)];
1912                 if (wc.status == IB_WC_SUCCESS) {
1913                         switch (wc.opcode) {
1914                         case IB_WC_SEND:
1915                                 rdma_destroy_ah(sqp->tx_ring[wc.wr_id &
1916                                               (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
1917                                 sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
1918                                         = NULL;
1919                                 spin_lock(&sqp->tx_lock);
1920                                 sqp->tx_ix_tail++;
1921                                 spin_unlock(&sqp->tx_lock);
1922                                 break;
1923                         case IB_WC_RECV:
1924                                 mad = (struct ib_mad *) &(((struct mlx4_mad_rcv_buf *)
1925                                                 (sqp->ring[wc.wr_id &
1926                                                 (MLX4_NUM_TUNNEL_BUFS - 1)].addr))->payload);
1927                                 grh = &(((struct mlx4_mad_rcv_buf *)
1928                                                 (sqp->ring[wc.wr_id &
1929                                                 (MLX4_NUM_TUNNEL_BUFS - 1)].addr))->grh);
1930                                 mlx4_ib_demux_mad(ctx->ib_dev, ctx->port, &wc, grh, mad);
1931                                 if (mlx4_ib_post_pv_qp_buf(ctx, sqp, wc.wr_id &
1932                                                            (MLX4_NUM_TUNNEL_BUFS - 1)))
1933                                         pr_err("Failed reposting SQP "
1934                                                "buf:%lld\n", wc.wr_id);
1935                                 break;
1936                         default:
1937                                 BUG_ON(1);
1938                                 break;
1939                         }
1940                 } else  {
1941                         pr_debug("mlx4_ib: completion error in tunnel: %d."
1942                                  " status = %d, wrid = 0x%llx\n",
1943                                  ctx->slave, wc.status, wc.wr_id);
1944                         if (!MLX4_TUN_IS_RECV(wc.wr_id)) {
1945                                 rdma_destroy_ah(sqp->tx_ring[wc.wr_id &
1946                                               (MLX4_NUM_TUNNEL_BUFS - 1)].ah);
1947                                 sqp->tx_ring[wc.wr_id & (MLX4_NUM_TUNNEL_BUFS - 1)].ah
1948                                         = NULL;
1949                                 spin_lock(&sqp->tx_lock);
1950                                 sqp->tx_ix_tail++;
1951                                 spin_unlock(&sqp->tx_lock);
1952                         }
1953                 }
1954         }
1955 }
1956
1957 static int alloc_pv_object(struct mlx4_ib_dev *dev, int slave, int port,
1958                                struct mlx4_ib_demux_pv_ctx **ret_ctx)
1959 {
1960         struct mlx4_ib_demux_pv_ctx *ctx;
1961
1962         *ret_ctx = NULL;
1963         ctx = kzalloc(sizeof (struct mlx4_ib_demux_pv_ctx), GFP_KERNEL);
1964         if (!ctx)
1965                 return -ENOMEM;
1966
1967         ctx->ib_dev = &dev->ib_dev;
1968         ctx->port = port;
1969         ctx->slave = slave;
1970         *ret_ctx = ctx;
1971         return 0;
1972 }
1973
1974 static void free_pv_object(struct mlx4_ib_dev *dev, int slave, int port)
1975 {
1976         if (dev->sriov.demux[port - 1].tun[slave]) {
1977                 kfree(dev->sriov.demux[port - 1].tun[slave]);
1978                 dev->sriov.demux[port - 1].tun[slave] = NULL;
1979         }
1980 }
1981
1982 static int create_pv_resources(struct ib_device *ibdev, int slave, int port,
1983                                int create_tun, struct mlx4_ib_demux_pv_ctx *ctx)
1984 {
1985         int ret, cq_size;
1986         struct ib_cq_init_attr cq_attr = {};
1987
1988         if (ctx->state != DEMUX_PV_STATE_DOWN)
1989                 return -EEXIST;
1990
1991         ctx->state = DEMUX_PV_STATE_STARTING;
1992         /* have QP0 only if link layer is IB */
1993         if (rdma_port_get_link_layer(ibdev, ctx->port) ==
1994             IB_LINK_LAYER_INFINIBAND)
1995                 ctx->has_smi = 1;
1996
1997         if (ctx->has_smi) {
1998                 ret = mlx4_ib_alloc_pv_bufs(ctx, IB_QPT_SMI, create_tun);
1999                 if (ret) {
2000                         pr_err("Failed allocating qp0 tunnel bufs (%d)\n", ret);
2001                         goto err_out;
2002                 }
2003         }
2004
2005         ret = mlx4_ib_alloc_pv_bufs(ctx, IB_QPT_GSI, create_tun);
2006         if (ret) {
2007                 pr_err("Failed allocating qp1 tunnel bufs (%d)\n", ret);
2008                 goto err_out_qp0;
2009         }
2010
2011         cq_size = 2 * MLX4_NUM_TUNNEL_BUFS;
2012         if (ctx->has_smi)
2013                 cq_size *= 2;
2014
2015         cq_attr.cqe = cq_size;
2016         ctx->cq = ib_create_cq(ctx->ib_dev, mlx4_ib_tunnel_comp_handler,
2017                                NULL, ctx, &cq_attr);
2018         if (IS_ERR(ctx->cq)) {
2019                 ret = PTR_ERR(ctx->cq);
2020                 pr_err("Couldn't create tunnel CQ (%d)\n", ret);
2021                 goto err_buf;
2022         }
2023
2024         ctx->pd = ib_alloc_pd(ctx->ib_dev, 0);
2025         if (IS_ERR(ctx->pd)) {
2026                 ret = PTR_ERR(ctx->pd);
2027                 pr_err("Couldn't create tunnel PD (%d)\n", ret);
2028                 goto err_cq;
2029         }
2030
2031         if (ctx->has_smi) {
2032                 ret = create_pv_sqp(ctx, IB_QPT_SMI, create_tun);
2033                 if (ret) {
2034                         pr_err("Couldn't create %s QP0 (%d)\n",
2035                                create_tun ? "tunnel for" : "",  ret);
2036                         goto err_pd;
2037                 }
2038         }
2039
2040         ret = create_pv_sqp(ctx, IB_QPT_GSI, create_tun);
2041         if (ret) {
2042                 pr_err("Couldn't create %s QP1 (%d)\n",
2043                        create_tun ? "tunnel for" : "",  ret);
2044                 goto err_qp0;
2045         }
2046
2047         if (create_tun)
2048                 INIT_WORK(&ctx->work, mlx4_ib_tunnel_comp_worker);
2049         else
2050                 INIT_WORK(&ctx->work, mlx4_ib_sqp_comp_worker);
2051
2052         ctx->wq = to_mdev(ibdev)->sriov.demux[port - 1].wq;
2053
2054         ret = ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP);
2055         if (ret) {
2056                 pr_err("Couldn't arm tunnel cq (%d)\n", ret);
2057                 goto err_wq;
2058         }
2059         ctx->state = DEMUX_PV_STATE_ACTIVE;
2060         return 0;
2061
2062 err_wq:
2063         ctx->wq = NULL;
2064         ib_destroy_qp(ctx->qp[1].qp);
2065         ctx->qp[1].qp = NULL;
2066
2067
2068 err_qp0:
2069         if (ctx->has_smi)
2070                 ib_destroy_qp(ctx->qp[0].qp);
2071         ctx->qp[0].qp = NULL;
2072
2073 err_pd:
2074         ib_dealloc_pd(ctx->pd);
2075         ctx->pd = NULL;
2076
2077 err_cq:
2078         ib_destroy_cq(ctx->cq);
2079         ctx->cq = NULL;
2080
2081 err_buf:
2082         mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_GSI, create_tun);
2083
2084 err_out_qp0:
2085         if (ctx->has_smi)
2086                 mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_SMI, create_tun);
2087 err_out:
2088         ctx->state = DEMUX_PV_STATE_DOWN;
2089         return ret;
2090 }
2091
2092 static void destroy_pv_resources(struct mlx4_ib_dev *dev, int slave, int port,
2093                                  struct mlx4_ib_demux_pv_ctx *ctx, int flush)
2094 {
2095         if (!ctx)
2096                 return;
2097         if (ctx->state > DEMUX_PV_STATE_DOWN) {
2098                 ctx->state = DEMUX_PV_STATE_DOWNING;
2099                 if (flush)
2100                         flush_workqueue(ctx->wq);
2101                 if (ctx->has_smi) {
2102                         ib_destroy_qp(ctx->qp[0].qp);
2103                         ctx->qp[0].qp = NULL;
2104                         mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_SMI, 1);
2105                 }
2106                 ib_destroy_qp(ctx->qp[1].qp);
2107                 ctx->qp[1].qp = NULL;
2108                 mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_GSI, 1);
2109                 ib_dealloc_pd(ctx->pd);
2110                 ctx->pd = NULL;
2111                 ib_destroy_cq(ctx->cq);
2112                 ctx->cq = NULL;
2113                 ctx->state = DEMUX_PV_STATE_DOWN;
2114         }
2115 }
2116
2117 static int mlx4_ib_tunnels_update(struct mlx4_ib_dev *dev, int slave,
2118                                   int port, int do_init)
2119 {
2120         int ret = 0;
2121
2122         if (!do_init) {
2123                 clean_vf_mcast(&dev->sriov.demux[port - 1], slave);
2124                 /* for master, destroy real sqp resources */
2125                 if (slave == mlx4_master_func_num(dev->dev))
2126                         destroy_pv_resources(dev, slave, port,
2127                                              dev->sriov.sqps[port - 1], 1);
2128                 /* destroy the tunnel qp resources */
2129                 destroy_pv_resources(dev, slave, port,
2130                                      dev->sriov.demux[port - 1].tun[slave], 1);
2131                 return 0;
2132         }
2133
2134         /* create the tunnel qp resources */
2135         ret = create_pv_resources(&dev->ib_dev, slave, port, 1,
2136                                   dev->sriov.demux[port - 1].tun[slave]);
2137
2138         /* for master, create the real sqp resources */
2139         if (!ret && slave == mlx4_master_func_num(dev->dev))
2140                 ret = create_pv_resources(&dev->ib_dev, slave, port, 0,
2141                                           dev->sriov.sqps[port - 1]);
2142         return ret;
2143 }
2144
2145 void mlx4_ib_tunnels_update_work(struct work_struct *work)
2146 {
2147         struct mlx4_ib_demux_work *dmxw;
2148
2149         dmxw = container_of(work, struct mlx4_ib_demux_work, work);
2150         mlx4_ib_tunnels_update(dmxw->dev, dmxw->slave, (int) dmxw->port,
2151                                dmxw->do_init);
2152         kfree(dmxw);
2153         return;
2154 }
2155
2156 static int mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev,
2157                                        struct mlx4_ib_demux_ctx *ctx,
2158                                        int port)
2159 {
2160         char name[12];
2161         int ret = 0;
2162         int i;
2163
2164         ctx->tun = kcalloc(dev->dev->caps.sqp_demux,
2165                            sizeof (struct mlx4_ib_demux_pv_ctx *), GFP_KERNEL);
2166         if (!ctx->tun)
2167                 return -ENOMEM;
2168
2169         ctx->dev = dev;
2170         ctx->port = port;
2171         ctx->ib_dev = &dev->ib_dev;
2172
2173         for (i = 0;
2174              i < min(dev->dev->caps.sqp_demux,
2175              (u16)(dev->dev->persist->num_vfs + 1));
2176              i++) {
2177                 struct mlx4_active_ports actv_ports =
2178                         mlx4_get_active_ports(dev->dev, i);
2179
2180                 if (!test_bit(port - 1, actv_ports.ports))
2181                         continue;
2182
2183                 ret = alloc_pv_object(dev, i, port, &ctx->tun[i]);
2184                 if (ret) {
2185                         ret = -ENOMEM;
2186                         goto err_mcg;
2187                 }
2188         }
2189
2190         ret = mlx4_ib_mcg_port_init(ctx);
2191         if (ret) {
2192                 pr_err("Failed initializing mcg para-virt (%d)\n", ret);
2193                 goto err_mcg;
2194         }
2195
2196         snprintf(name, sizeof name, "mlx4_ibt%d", port);
2197         ctx->wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
2198         if (!ctx->wq) {
2199                 pr_err("Failed to create tunnelling WQ for port %d\n", port);
2200                 ret = -ENOMEM;
2201                 goto err_wq;
2202         }
2203
2204         snprintf(name, sizeof name, "mlx4_ibud%d", port);
2205         ctx->ud_wq = alloc_ordered_workqueue(name, WQ_MEM_RECLAIM);
2206         if (!ctx->ud_wq) {
2207                 pr_err("Failed to create up/down WQ for port %d\n", port);
2208                 ret = -ENOMEM;
2209                 goto err_udwq;
2210         }
2211
2212         return 0;
2213
2214 err_udwq:
2215         destroy_workqueue(ctx->wq);
2216         ctx->wq = NULL;
2217
2218 err_wq:
2219         mlx4_ib_mcg_port_cleanup(ctx, 1);
2220 err_mcg:
2221         for (i = 0; i < dev->dev->caps.sqp_demux; i++)
2222                 free_pv_object(dev, i, port);
2223         kfree(ctx->tun);
2224         ctx->tun = NULL;
2225         return ret;
2226 }
2227
2228 static void mlx4_ib_free_sqp_ctx(struct mlx4_ib_demux_pv_ctx *sqp_ctx)
2229 {
2230         if (sqp_ctx->state > DEMUX_PV_STATE_DOWN) {
2231                 sqp_ctx->state = DEMUX_PV_STATE_DOWNING;
2232                 flush_workqueue(sqp_ctx->wq);
2233                 if (sqp_ctx->has_smi) {
2234                         ib_destroy_qp(sqp_ctx->qp[0].qp);
2235                         sqp_ctx->qp[0].qp = NULL;
2236                         mlx4_ib_free_pv_qp_bufs(sqp_ctx, IB_QPT_SMI, 0);
2237                 }
2238                 ib_destroy_qp(sqp_ctx->qp[1].qp);
2239                 sqp_ctx->qp[1].qp = NULL;
2240                 mlx4_ib_free_pv_qp_bufs(sqp_ctx, IB_QPT_GSI, 0);
2241                 ib_dealloc_pd(sqp_ctx->pd);
2242                 sqp_ctx->pd = NULL;
2243                 ib_destroy_cq(sqp_ctx->cq);
2244                 sqp_ctx->cq = NULL;
2245                 sqp_ctx->state = DEMUX_PV_STATE_DOWN;
2246         }
2247 }
2248
2249 static void mlx4_ib_free_demux_ctx(struct mlx4_ib_demux_ctx *ctx)
2250 {
2251         int i;
2252         if (ctx) {
2253                 struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev);
2254                 mlx4_ib_mcg_port_cleanup(ctx, 1);
2255                 for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
2256                         if (!ctx->tun[i])
2257                                 continue;
2258                         if (ctx->tun[i]->state > DEMUX_PV_STATE_DOWN)
2259                                 ctx->tun[i]->state = DEMUX_PV_STATE_DOWNING;
2260                 }
2261                 flush_workqueue(ctx->wq);
2262                 for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
2263                         destroy_pv_resources(dev, i, ctx->port, ctx->tun[i], 0);
2264                         free_pv_object(dev, i, ctx->port);
2265                 }
2266                 kfree(ctx->tun);
2267                 destroy_workqueue(ctx->ud_wq);
2268                 destroy_workqueue(ctx->wq);
2269         }
2270 }
2271
2272 static void mlx4_ib_master_tunnels(struct mlx4_ib_dev *dev, int do_init)
2273 {
2274         int i;
2275
2276         if (!mlx4_is_master(dev->dev))
2277                 return;
2278         /* initialize or tear down tunnel QPs for the master */
2279         for (i = 0; i < dev->dev->caps.num_ports; i++)
2280                 mlx4_ib_tunnels_update(dev, mlx4_master_func_num(dev->dev), i + 1, do_init);
2281         return;
2282 }
2283
2284 int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev)
2285 {
2286         int i = 0;
2287         int err;
2288
2289         if (!mlx4_is_mfunc(dev->dev))
2290                 return 0;
2291
2292         dev->sriov.is_going_down = 0;
2293         spin_lock_init(&dev->sriov.going_down_lock);
2294         mlx4_ib_cm_paravirt_init(dev);
2295
2296         mlx4_ib_warn(&dev->ib_dev, "multi-function enabled\n");
2297
2298         if (mlx4_is_slave(dev->dev)) {
2299                 mlx4_ib_warn(&dev->ib_dev, "operating in qp1 tunnel mode\n");
2300                 return 0;
2301         }
2302
2303         for (i = 0; i < dev->dev->caps.sqp_demux; i++) {
2304                 if (i == mlx4_master_func_num(dev->dev))
2305                         mlx4_put_slave_node_guid(dev->dev, i, dev->ib_dev.node_guid);
2306                 else
2307                         mlx4_put_slave_node_guid(dev->dev, i, mlx4_ib_gen_node_guid());
2308         }
2309
2310         err = mlx4_ib_init_alias_guid_service(dev);
2311         if (err) {
2312                 mlx4_ib_warn(&dev->ib_dev, "Failed init alias guid process.\n");
2313                 goto paravirt_err;
2314         }
2315         err = mlx4_ib_device_register_sysfs(dev);
2316         if (err) {
2317                 mlx4_ib_warn(&dev->ib_dev, "Failed to register sysfs\n");
2318                 goto sysfs_err;
2319         }
2320
2321         mlx4_ib_warn(&dev->ib_dev, "initializing demux service for %d qp1 clients\n",
2322                      dev->dev->caps.sqp_demux);
2323         for (i = 0; i < dev->num_ports; i++) {
2324                 union ib_gid gid;
2325                 err = __mlx4_ib_query_gid(&dev->ib_dev, i + 1, 0, &gid, 1);
2326                 if (err)
2327                         goto demux_err;
2328                 dev->sriov.demux[i].guid_cache[0] = gid.global.interface_id;
2329                 atomic64_set(&dev->sriov.demux[i].subnet_prefix,
2330                              be64_to_cpu(gid.global.subnet_prefix));
2331                 err = alloc_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1,
2332                                       &dev->sriov.sqps[i]);
2333                 if (err)
2334                         goto demux_err;
2335                 err = mlx4_ib_alloc_demux_ctx(dev, &dev->sriov.demux[i], i + 1);
2336                 if (err)
2337                         goto free_pv;
2338         }
2339         mlx4_ib_master_tunnels(dev, 1);
2340         return 0;
2341
2342 free_pv:
2343         free_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1);
2344 demux_err:
2345         while (--i >= 0) {
2346                 free_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1);
2347                 mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]);
2348         }
2349         mlx4_ib_device_unregister_sysfs(dev);
2350
2351 sysfs_err:
2352         mlx4_ib_destroy_alias_guid_service(dev);
2353
2354 paravirt_err:
2355         mlx4_ib_cm_paravirt_clean(dev, -1);
2356
2357         return err;
2358 }
2359
2360 void mlx4_ib_close_sriov(struct mlx4_ib_dev *dev)
2361 {
2362         int i;
2363         unsigned long flags;
2364
2365         if (!mlx4_is_mfunc(dev->dev))
2366                 return;
2367
2368         spin_lock_irqsave(&dev->sriov.going_down_lock, flags);
2369         dev->sriov.is_going_down = 1;
2370         spin_unlock_irqrestore(&dev->sriov.going_down_lock, flags);
2371         if (mlx4_is_master(dev->dev)) {
2372                 for (i = 0; i < dev->num_ports; i++) {
2373                         flush_workqueue(dev->sriov.demux[i].ud_wq);
2374                         mlx4_ib_free_sqp_ctx(dev->sriov.sqps[i]);
2375                         kfree(dev->sriov.sqps[i]);
2376                         dev->sriov.sqps[i] = NULL;
2377                         mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]);
2378                 }
2379
2380                 mlx4_ib_cm_paravirt_clean(dev, -1);
2381                 mlx4_ib_destroy_alias_guid_service(dev);
2382                 mlx4_ib_device_unregister_sysfs(dev);
2383         }
2384 }