6fe9f76ae656b6b11056154eafcc34e83e844666
[sfrench/cifs-2.6.git] / drivers / net / ethernet / mellanox / mlx4 / resource_tracker.c
1 /*
2  * Copyright (c) 2004, 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
4  * All rights reserved.
5  * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc.  All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35
36 #include <linux/sched.h>
37 #include <linux/pci.h>
38 #include <linux/errno.h>
39 #include <linux/kernel.h>
40 #include <linux/io.h>
41 #include <linux/slab.h>
42 #include <linux/mlx4/cmd.h>
43 #include <linux/mlx4/qp.h>
44 #include <linux/if_ether.h>
45 #include <linux/etherdevice.h>
46
47 #include "mlx4.h"
48 #include "fw.h"
49 #include "mlx4_stats.h"
50
51 #define MLX4_MAC_VALID          (1ull << 63)
52 #define MLX4_PF_COUNTERS_PER_PORT       2
53 #define MLX4_VF_COUNTERS_PER_PORT       1
54
55 struct mac_res {
56         struct list_head list;
57         u64 mac;
58         int ref_count;
59         u8 smac_index;
60         u8 port;
61 };
62
63 struct vlan_res {
64         struct list_head list;
65         u16 vlan;
66         int ref_count;
67         int vlan_index;
68         u8 port;
69 };
70
71 struct res_common {
72         struct list_head        list;
73         struct rb_node          node;
74         u64                     res_id;
75         int                     owner;
76         int                     state;
77         int                     from_state;
78         int                     to_state;
79         int                     removing;
80         const char              *func_name;
81 };
82
83 enum {
84         RES_ANY_BUSY = 1
85 };
86
87 struct res_gid {
88         struct list_head        list;
89         u8                      gid[16];
90         enum mlx4_protocol      prot;
91         enum mlx4_steer_type    steer;
92         u64                     reg_id;
93 };
94
95 enum res_qp_states {
96         RES_QP_BUSY = RES_ANY_BUSY,
97
98         /* QP number was allocated */
99         RES_QP_RESERVED,
100
101         /* ICM memory for QP context was mapped */
102         RES_QP_MAPPED,
103
104         /* QP is in hw ownership */
105         RES_QP_HW
106 };
107
108 struct res_qp {
109         struct res_common       com;
110         struct res_mtt         *mtt;
111         struct res_cq          *rcq;
112         struct res_cq          *scq;
113         struct res_srq         *srq;
114         struct list_head        mcg_list;
115         spinlock_t              mcg_spl;
116         int                     local_qpn;
117         atomic_t                ref_count;
118         u32                     qpc_flags;
119         /* saved qp params before VST enforcement in order to restore on VGT */
120         u8                      sched_queue;
121         __be32                  param3;
122         u8                      vlan_control;
123         u8                      fvl_rx;
124         u8                      pri_path_fl;
125         u8                      vlan_index;
126         u8                      feup;
127 };
128
129 enum res_mtt_states {
130         RES_MTT_BUSY = RES_ANY_BUSY,
131         RES_MTT_ALLOCATED,
132 };
133
134 static inline const char *mtt_states_str(enum res_mtt_states state)
135 {
136         switch (state) {
137         case RES_MTT_BUSY: return "RES_MTT_BUSY";
138         case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
139         default: return "Unknown";
140         }
141 }
142
143 struct res_mtt {
144         struct res_common       com;
145         int                     order;
146         atomic_t                ref_count;
147 };
148
149 enum res_mpt_states {
150         RES_MPT_BUSY = RES_ANY_BUSY,
151         RES_MPT_RESERVED,
152         RES_MPT_MAPPED,
153         RES_MPT_HW,
154 };
155
156 struct res_mpt {
157         struct res_common       com;
158         struct res_mtt         *mtt;
159         int                     key;
160 };
161
162 enum res_eq_states {
163         RES_EQ_BUSY = RES_ANY_BUSY,
164         RES_EQ_RESERVED,
165         RES_EQ_HW,
166 };
167
168 struct res_eq {
169         struct res_common       com;
170         struct res_mtt         *mtt;
171 };
172
173 enum res_cq_states {
174         RES_CQ_BUSY = RES_ANY_BUSY,
175         RES_CQ_ALLOCATED,
176         RES_CQ_HW,
177 };
178
179 struct res_cq {
180         struct res_common       com;
181         struct res_mtt         *mtt;
182         atomic_t                ref_count;
183 };
184
185 enum res_srq_states {
186         RES_SRQ_BUSY = RES_ANY_BUSY,
187         RES_SRQ_ALLOCATED,
188         RES_SRQ_HW,
189 };
190
191 struct res_srq {
192         struct res_common       com;
193         struct res_mtt         *mtt;
194         struct res_cq          *cq;
195         atomic_t                ref_count;
196 };
197
198 enum res_counter_states {
199         RES_COUNTER_BUSY = RES_ANY_BUSY,
200         RES_COUNTER_ALLOCATED,
201 };
202
203 struct res_counter {
204         struct res_common       com;
205         int                     port;
206 };
207
208 enum res_xrcdn_states {
209         RES_XRCD_BUSY = RES_ANY_BUSY,
210         RES_XRCD_ALLOCATED,
211 };
212
213 struct res_xrcdn {
214         struct res_common       com;
215         int                     port;
216 };
217
218 enum res_fs_rule_states {
219         RES_FS_RULE_BUSY = RES_ANY_BUSY,
220         RES_FS_RULE_ALLOCATED,
221 };
222
223 struct res_fs_rule {
224         struct res_common       com;
225         int                     qpn;
226         /* VF DMFS mbox with port flipped */
227         void                    *mirr_mbox;
228         /* > 0 --> apply mirror when getting into HA mode      */
229         /* = 0 --> un-apply mirror when getting out of HA mode */
230         u32                     mirr_mbox_size;
231         struct list_head        mirr_list;
232         u64                     mirr_rule_id;
233 };
234
235 static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
236 {
237         struct rb_node *node = root->rb_node;
238
239         while (node) {
240                 struct res_common *res = rb_entry(node, struct res_common,
241                                                   node);
242
243                 if (res_id < res->res_id)
244                         node = node->rb_left;
245                 else if (res_id > res->res_id)
246                         node = node->rb_right;
247                 else
248                         return res;
249         }
250         return NULL;
251 }
252
253 static int res_tracker_insert(struct rb_root *root, struct res_common *res)
254 {
255         struct rb_node **new = &(root->rb_node), *parent = NULL;
256
257         /* Figure out where to put new node */
258         while (*new) {
259                 struct res_common *this = rb_entry(*new, struct res_common,
260                                                    node);
261
262                 parent = *new;
263                 if (res->res_id < this->res_id)
264                         new = &((*new)->rb_left);
265                 else if (res->res_id > this->res_id)
266                         new = &((*new)->rb_right);
267                 else
268                         return -EEXIST;
269         }
270
271         /* Add new node and rebalance tree. */
272         rb_link_node(&res->node, parent, new);
273         rb_insert_color(&res->node, root);
274
275         return 0;
276 }
277
278 enum qp_transition {
279         QP_TRANS_INIT2RTR,
280         QP_TRANS_RTR2RTS,
281         QP_TRANS_RTS2RTS,
282         QP_TRANS_SQERR2RTS,
283         QP_TRANS_SQD2SQD,
284         QP_TRANS_SQD2RTS
285 };
286
287 /* For Debug uses */
288 static const char *resource_str(enum mlx4_resource rt)
289 {
290         switch (rt) {
291         case RES_QP: return "RES_QP";
292         case RES_CQ: return "RES_CQ";
293         case RES_SRQ: return "RES_SRQ";
294         case RES_MPT: return "RES_MPT";
295         case RES_MTT: return "RES_MTT";
296         case RES_MAC: return  "RES_MAC";
297         case RES_VLAN: return  "RES_VLAN";
298         case RES_EQ: return "RES_EQ";
299         case RES_COUNTER: return "RES_COUNTER";
300         case RES_FS_RULE: return "RES_FS_RULE";
301         case RES_XRCD: return "RES_XRCD";
302         default: return "Unknown resource type !!!";
303         };
304 }
305
306 static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
307 static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
308                                       enum mlx4_resource res_type, int count,
309                                       int port)
310 {
311         struct mlx4_priv *priv = mlx4_priv(dev);
312         struct resource_allocator *res_alloc =
313                 &priv->mfunc.master.res_tracker.res_alloc[res_type];
314         int err = -EINVAL;
315         int allocated, free, reserved, guaranteed, from_free;
316         int from_rsvd;
317
318         if (slave > dev->persist->num_vfs)
319                 return -EINVAL;
320
321         spin_lock(&res_alloc->alloc_lock);
322         allocated = (port > 0) ?
323                 res_alloc->allocated[(port - 1) *
324                 (dev->persist->num_vfs + 1) + slave] :
325                 res_alloc->allocated[slave];
326         free = (port > 0) ? res_alloc->res_port_free[port - 1] :
327                 res_alloc->res_free;
328         reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] :
329                 res_alloc->res_reserved;
330         guaranteed = res_alloc->guaranteed[slave];
331
332         if (allocated + count > res_alloc->quota[slave]) {
333                 mlx4_warn(dev, "VF %d port %d res %s: quota exceeded, count %d alloc %d quota %d\n",
334                           slave, port, resource_str(res_type), count,
335                           allocated, res_alloc->quota[slave]);
336                 goto out;
337         }
338
339         if (allocated + count <= guaranteed) {
340                 err = 0;
341                 from_rsvd = count;
342         } else {
343                 /* portion may need to be obtained from free area */
344                 if (guaranteed - allocated > 0)
345                         from_free = count - (guaranteed - allocated);
346                 else
347                         from_free = count;
348
349                 from_rsvd = count - from_free;
350
351                 if (free - from_free >= reserved)
352                         err = 0;
353                 else
354                         mlx4_warn(dev, "VF %d port %d res %s: free pool empty, free %d from_free %d rsvd %d\n",
355                                   slave, port, resource_str(res_type), free,
356                                   from_free, reserved);
357         }
358
359         if (!err) {
360                 /* grant the request */
361                 if (port > 0) {
362                         res_alloc->allocated[(port - 1) *
363                         (dev->persist->num_vfs + 1) + slave] += count;
364                         res_alloc->res_port_free[port - 1] -= count;
365                         res_alloc->res_port_rsvd[port - 1] -= from_rsvd;
366                 } else {
367                         res_alloc->allocated[slave] += count;
368                         res_alloc->res_free -= count;
369                         res_alloc->res_reserved -= from_rsvd;
370                 }
371         }
372
373 out:
374         spin_unlock(&res_alloc->alloc_lock);
375         return err;
376 }
377
378 static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
379                                     enum mlx4_resource res_type, int count,
380                                     int port)
381 {
382         struct mlx4_priv *priv = mlx4_priv(dev);
383         struct resource_allocator *res_alloc =
384                 &priv->mfunc.master.res_tracker.res_alloc[res_type];
385         int allocated, guaranteed, from_rsvd;
386
387         if (slave > dev->persist->num_vfs)
388                 return;
389
390         spin_lock(&res_alloc->alloc_lock);
391
392         allocated = (port > 0) ?
393                 res_alloc->allocated[(port - 1) *
394                 (dev->persist->num_vfs + 1) + slave] :
395                 res_alloc->allocated[slave];
396         guaranteed = res_alloc->guaranteed[slave];
397
398         if (allocated - count >= guaranteed) {
399                 from_rsvd = 0;
400         } else {
401                 /* portion may need to be returned to reserved area */
402                 if (allocated - guaranteed > 0)
403                         from_rsvd = count - (allocated - guaranteed);
404                 else
405                         from_rsvd = count;
406         }
407
408         if (port > 0) {
409                 res_alloc->allocated[(port - 1) *
410                 (dev->persist->num_vfs + 1) + slave] -= count;
411                 res_alloc->res_port_free[port - 1] += count;
412                 res_alloc->res_port_rsvd[port - 1] += from_rsvd;
413         } else {
414                 res_alloc->allocated[slave] -= count;
415                 res_alloc->res_free += count;
416                 res_alloc->res_reserved += from_rsvd;
417         }
418
419         spin_unlock(&res_alloc->alloc_lock);
420         return;
421 }
422
423 static inline void initialize_res_quotas(struct mlx4_dev *dev,
424                                          struct resource_allocator *res_alloc,
425                                          enum mlx4_resource res_type,
426                                          int vf, int num_instances)
427 {
428         res_alloc->guaranteed[vf] = num_instances /
429                                     (2 * (dev->persist->num_vfs + 1));
430         res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
431         if (vf == mlx4_master_func_num(dev)) {
432                 res_alloc->res_free = num_instances;
433                 if (res_type == RES_MTT) {
434                         /* reserved mtts will be taken out of the PF allocation */
435                         res_alloc->res_free += dev->caps.reserved_mtts;
436                         res_alloc->guaranteed[vf] += dev->caps.reserved_mtts;
437                         res_alloc->quota[vf] += dev->caps.reserved_mtts;
438                 }
439         }
440 }
441
442 void mlx4_init_quotas(struct mlx4_dev *dev)
443 {
444         struct mlx4_priv *priv = mlx4_priv(dev);
445         int pf;
446
447         /* quotas for VFs are initialized in mlx4_slave_cap */
448         if (mlx4_is_slave(dev))
449                 return;
450
451         if (!mlx4_is_mfunc(dev)) {
452                 dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps -
453                         mlx4_num_reserved_sqps(dev);
454                 dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs;
455                 dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs;
456                 dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts;
457                 dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws;
458                 return;
459         }
460
461         pf = mlx4_master_func_num(dev);
462         dev->quotas.qp =
463                 priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf];
464         dev->quotas.cq =
465                 priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf];
466         dev->quotas.srq =
467                 priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf];
468         dev->quotas.mtt =
469                 priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf];
470         dev->quotas.mpt =
471                 priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
472 }
473
474 static int get_max_gauranteed_vfs_counter(struct mlx4_dev *dev)
475 {
476         /* reduce the sink counter */
477         return (dev->caps.max_counters - 1 -
478                 (MLX4_PF_COUNTERS_PER_PORT * MLX4_MAX_PORTS))
479                 / MLX4_MAX_PORTS;
480 }
481
482 int mlx4_init_resource_tracker(struct mlx4_dev *dev)
483 {
484         struct mlx4_priv *priv = mlx4_priv(dev);
485         int i, j;
486         int t;
487         int max_vfs_guarantee_counter = get_max_gauranteed_vfs_counter(dev);
488
489         priv->mfunc.master.res_tracker.slave_list =
490                 kzalloc(dev->num_slaves * sizeof(struct slave_list),
491                         GFP_KERNEL);
492         if (!priv->mfunc.master.res_tracker.slave_list)
493                 return -ENOMEM;
494
495         for (i = 0 ; i < dev->num_slaves; i++) {
496                 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
497                         INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
498                                        slave_list[i].res_list[t]);
499                 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
500         }
501
502         mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
503                  dev->num_slaves);
504         for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
505                 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
506
507         for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
508                 struct resource_allocator *res_alloc =
509                         &priv->mfunc.master.res_tracker.res_alloc[i];
510                 res_alloc->quota = kmalloc((dev->persist->num_vfs + 1) *
511                                            sizeof(int), GFP_KERNEL);
512                 res_alloc->guaranteed = kmalloc((dev->persist->num_vfs + 1) *
513                                                 sizeof(int), GFP_KERNEL);
514                 if (i == RES_MAC || i == RES_VLAN)
515                         res_alloc->allocated = kzalloc(MLX4_MAX_PORTS *
516                                                        (dev->persist->num_vfs
517                                                        + 1) *
518                                                        sizeof(int), GFP_KERNEL);
519                 else
520                         res_alloc->allocated = kzalloc((dev->persist->
521                                                         num_vfs + 1) *
522                                                        sizeof(int), GFP_KERNEL);
523                 /* Reduce the sink counter */
524                 if (i == RES_COUNTER)
525                         res_alloc->res_free = dev->caps.max_counters - 1;
526
527                 if (!res_alloc->quota || !res_alloc->guaranteed ||
528                     !res_alloc->allocated)
529                         goto no_mem_err;
530
531                 spin_lock_init(&res_alloc->alloc_lock);
532                 for (t = 0; t < dev->persist->num_vfs + 1; t++) {
533                         struct mlx4_active_ports actv_ports =
534                                 mlx4_get_active_ports(dev, t);
535                         switch (i) {
536                         case RES_QP:
537                                 initialize_res_quotas(dev, res_alloc, RES_QP,
538                                                       t, dev->caps.num_qps -
539                                                       dev->caps.reserved_qps -
540                                                       mlx4_num_reserved_sqps(dev));
541                                 break;
542                         case RES_CQ:
543                                 initialize_res_quotas(dev, res_alloc, RES_CQ,
544                                                       t, dev->caps.num_cqs -
545                                                       dev->caps.reserved_cqs);
546                                 break;
547                         case RES_SRQ:
548                                 initialize_res_quotas(dev, res_alloc, RES_SRQ,
549                                                       t, dev->caps.num_srqs -
550                                                       dev->caps.reserved_srqs);
551                                 break;
552                         case RES_MPT:
553                                 initialize_res_quotas(dev, res_alloc, RES_MPT,
554                                                       t, dev->caps.num_mpts -
555                                                       dev->caps.reserved_mrws);
556                                 break;
557                         case RES_MTT:
558                                 initialize_res_quotas(dev, res_alloc, RES_MTT,
559                                                       t, dev->caps.num_mtts -
560                                                       dev->caps.reserved_mtts);
561                                 break;
562                         case RES_MAC:
563                                 if (t == mlx4_master_func_num(dev)) {
564                                         int max_vfs_pport = 0;
565                                         /* Calculate the max vfs per port for */
566                                         /* both ports.                        */
567                                         for (j = 0; j < dev->caps.num_ports;
568                                              j++) {
569                                                 struct mlx4_slaves_pport slaves_pport =
570                                                         mlx4_phys_to_slaves_pport(dev, j + 1);
571                                                 unsigned current_slaves =
572                                                         bitmap_weight(slaves_pport.slaves,
573                                                                       dev->caps.num_ports) - 1;
574                                                 if (max_vfs_pport < current_slaves)
575                                                         max_vfs_pport =
576                                                                 current_slaves;
577                                         }
578                                         res_alloc->quota[t] =
579                                                 MLX4_MAX_MAC_NUM -
580                                                 2 * max_vfs_pport;
581                                         res_alloc->guaranteed[t] = 2;
582                                         for (j = 0; j < MLX4_MAX_PORTS; j++)
583                                                 res_alloc->res_port_free[j] =
584                                                         MLX4_MAX_MAC_NUM;
585                                 } else {
586                                         res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
587                                         res_alloc->guaranteed[t] = 2;
588                                 }
589                                 break;
590                         case RES_VLAN:
591                                 if (t == mlx4_master_func_num(dev)) {
592                                         res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
593                                         res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2;
594                                         for (j = 0; j < MLX4_MAX_PORTS; j++)
595                                                 res_alloc->res_port_free[j] =
596                                                         res_alloc->quota[t];
597                                 } else {
598                                         res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
599                                         res_alloc->guaranteed[t] = 0;
600                                 }
601                                 break;
602                         case RES_COUNTER:
603                                 res_alloc->quota[t] = dev->caps.max_counters;
604                                 if (t == mlx4_master_func_num(dev))
605                                         res_alloc->guaranteed[t] =
606                                                 MLX4_PF_COUNTERS_PER_PORT *
607                                                 MLX4_MAX_PORTS;
608                                 else if (t <= max_vfs_guarantee_counter)
609                                         res_alloc->guaranteed[t] =
610                                                 MLX4_VF_COUNTERS_PER_PORT *
611                                                 MLX4_MAX_PORTS;
612                                 else
613                                         res_alloc->guaranteed[t] = 0;
614                                 res_alloc->res_free -= res_alloc->guaranteed[t];
615                                 break;
616                         default:
617                                 break;
618                         }
619                         if (i == RES_MAC || i == RES_VLAN) {
620                                 for (j = 0; j < dev->caps.num_ports; j++)
621                                         if (test_bit(j, actv_ports.ports))
622                                                 res_alloc->res_port_rsvd[j] +=
623                                                         res_alloc->guaranteed[t];
624                         } else {
625                                 res_alloc->res_reserved += res_alloc->guaranteed[t];
626                         }
627                 }
628         }
629         spin_lock_init(&priv->mfunc.master.res_tracker.lock);
630         return 0;
631
632 no_mem_err:
633         for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
634                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
635                 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
636                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
637                 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
638                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
639                 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
640         }
641         return -ENOMEM;
642 }
643
644 void mlx4_free_resource_tracker(struct mlx4_dev *dev,
645                                 enum mlx4_res_tracker_free_type type)
646 {
647         struct mlx4_priv *priv = mlx4_priv(dev);
648         int i;
649
650         if (priv->mfunc.master.res_tracker.slave_list) {
651                 if (type != RES_TR_FREE_STRUCTS_ONLY) {
652                         for (i = 0; i < dev->num_slaves; i++) {
653                                 if (type == RES_TR_FREE_ALL ||
654                                     dev->caps.function != i)
655                                         mlx4_delete_all_resources_for_slave(dev, i);
656                         }
657                         /* free master's vlans */
658                         i = dev->caps.function;
659                         mlx4_reset_roce_gids(dev, i);
660                         mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
661                         rem_slave_vlans(dev, i);
662                         mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
663                 }
664
665                 if (type != RES_TR_FREE_SLAVES_ONLY) {
666                         for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
667                                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
668                                 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
669                                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
670                                 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
671                                 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
672                                 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
673                         }
674                         kfree(priv->mfunc.master.res_tracker.slave_list);
675                         priv->mfunc.master.res_tracker.slave_list = NULL;
676                 }
677         }
678 }
679
680 static void update_pkey_index(struct mlx4_dev *dev, int slave,
681                               struct mlx4_cmd_mailbox *inbox)
682 {
683         u8 sched = *(u8 *)(inbox->buf + 64);
684         u8 orig_index = *(u8 *)(inbox->buf + 35);
685         u8 new_index;
686         struct mlx4_priv *priv = mlx4_priv(dev);
687         int port;
688
689         port = (sched >> 6 & 1) + 1;
690
691         new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
692         *(u8 *)(inbox->buf + 35) = new_index;
693 }
694
695 static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
696                        u8 slave)
697 {
698         struct mlx4_qp_context  *qp_ctx = inbox->buf + 8;
699         enum mlx4_qp_optpar     optpar = be32_to_cpu(*(__be32 *) inbox->buf);
700         u32                     ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
701         int port;
702
703         if (MLX4_QP_ST_UD == ts) {
704                 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
705                 if (mlx4_is_eth(dev, port))
706                         qp_ctx->pri_path.mgid_index =
707                                 mlx4_get_base_gid_ix(dev, slave, port) | 0x80;
708                 else
709                         qp_ctx->pri_path.mgid_index = slave | 0x80;
710
711         } else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_XRC == ts || MLX4_QP_ST_UC == ts) {
712                 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
713                         port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
714                         if (mlx4_is_eth(dev, port)) {
715                                 qp_ctx->pri_path.mgid_index +=
716                                         mlx4_get_base_gid_ix(dev, slave, port);
717                                 qp_ctx->pri_path.mgid_index &= 0x7f;
718                         } else {
719                                 qp_ctx->pri_path.mgid_index = slave & 0x7F;
720                         }
721                 }
722                 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
723                         port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
724                         if (mlx4_is_eth(dev, port)) {
725                                 qp_ctx->alt_path.mgid_index +=
726                                         mlx4_get_base_gid_ix(dev, slave, port);
727                                 qp_ctx->alt_path.mgid_index &= 0x7f;
728                         } else {
729                                 qp_ctx->alt_path.mgid_index = slave & 0x7F;
730                         }
731                 }
732         }
733 }
734
735 static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc,
736                           u8 slave, int port);
737
738 static int update_vport_qp_param(struct mlx4_dev *dev,
739                                  struct mlx4_cmd_mailbox *inbox,
740                                  u8 slave, u32 qpn)
741 {
742         struct mlx4_qp_context  *qpc = inbox->buf + 8;
743         struct mlx4_vport_oper_state *vp_oper;
744         struct mlx4_priv *priv;
745         u32 qp_type;
746         int port, err = 0;
747
748         port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
749         priv = mlx4_priv(dev);
750         vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
751         qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
752
753         err = handle_counter(dev, qpc, slave, port);
754         if (err)
755                 goto out;
756
757         if (MLX4_VGT != vp_oper->state.default_vlan) {
758                 /* the reserved QPs (special, proxy, tunnel)
759                  * do not operate over vlans
760                  */
761                 if (mlx4_is_qp_reserved(dev, qpn))
762                         return 0;
763
764                 /* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */
765                 if (qp_type == MLX4_QP_ST_UD ||
766                     (qp_type == MLX4_QP_ST_MLX && mlx4_is_eth(dev, port))) {
767                         if (dev->caps.bmme_flags & MLX4_BMME_FLAG_VSD_INIT2RTR) {
768                                 *(__be32 *)inbox->buf =
769                                         cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) |
770                                         MLX4_QP_OPTPAR_VLAN_STRIPPING);
771                                 qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
772                         } else {
773                                 struct mlx4_update_qp_params params = {.flags = 0};
774
775                                 err = mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params);
776                                 if (err)
777                                         goto out;
778                         }
779                 }
780
781                 /* preserve IF_COUNTER flag */
782                 qpc->pri_path.vlan_control &=
783                         MLX4_CTRL_ETH_SRC_CHECK_IF_COUNTER;
784                 if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
785                     dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
786                         qpc->pri_path.vlan_control |=
787                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
788                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
789                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
790                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
791                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
792                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
793                 } else if (0 != vp_oper->state.default_vlan) {
794                         if (vp_oper->state.vlan_proto == htons(ETH_P_8021AD)) {
795                                 /* vst QinQ should block untagged on TX,
796                                  * but cvlan is in payload and phv is set so
797                                  * hw see it as untagged. Block tagged instead.
798                                  */
799                                 qpc->pri_path.vlan_control |=
800                                         MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
801                                         MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
802                                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
803                                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
804                         } else { /* vst 802.1Q */
805                                 qpc->pri_path.vlan_control |=
806                                         MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
807                                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
808                                         MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
809                         }
810                 } else { /* priority tagged */
811                         qpc->pri_path.vlan_control |=
812                                 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
813                                 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
814                 }
815
816                 qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
817                 qpc->pri_path.vlan_index = vp_oper->vlan_idx;
818                 qpc->pri_path.fl |= MLX4_FL_ETH_HIDE_CQE_VLAN;
819                 if (vp_oper->state.vlan_proto == htons(ETH_P_8021AD))
820                         qpc->pri_path.fl |= MLX4_FL_SV;
821                 else
822                         qpc->pri_path.fl |= MLX4_FL_CV;
823                 qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
824                 qpc->pri_path.sched_queue &= 0xC7;
825                 qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
826                 qpc->qos_vport = vp_oper->state.qos_vport;
827         }
828         if (vp_oper->state.spoofchk) {
829                 qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
830                 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
831         }
832 out:
833         return err;
834 }
835
836 static int mpt_mask(struct mlx4_dev *dev)
837 {
838         return dev->caps.num_mpts - 1;
839 }
840
841 static const char *mlx4_resource_type_to_str(enum mlx4_resource t)
842 {
843         switch (t) {
844         case RES_QP:
845                 return "QP";
846         case RES_CQ:
847                 return "CQ";
848         case RES_SRQ:
849                 return "SRQ";
850         case RES_XRCD:
851                 return "XRCD";
852         case RES_MPT:
853                 return "MPT";
854         case RES_MTT:
855                 return "MTT";
856         case RES_MAC:
857                 return "MAC";
858         case RES_VLAN:
859                 return "VLAN";
860         case RES_COUNTER:
861                 return "COUNTER";
862         case RES_FS_RULE:
863                 return "FS_RULE";
864         case RES_EQ:
865                 return "EQ";
866         default:
867                 return "INVALID RESOURCE";
868         }
869 }
870
871 static void *find_res(struct mlx4_dev *dev, u64 res_id,
872                       enum mlx4_resource type)
873 {
874         struct mlx4_priv *priv = mlx4_priv(dev);
875
876         return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
877                                   res_id);
878 }
879
880 static int _get_res(struct mlx4_dev *dev, int slave, u64 res_id,
881                     enum mlx4_resource type,
882                     void *res, const char *func_name)
883 {
884         struct res_common *r;
885         int err = 0;
886
887         spin_lock_irq(mlx4_tlock(dev));
888         r = find_res(dev, res_id, type);
889         if (!r) {
890                 err = -ENONET;
891                 goto exit;
892         }
893
894         if (r->state == RES_ANY_BUSY) {
895                 mlx4_warn(dev,
896                           "%s(%d) trying to get resource %llx of type %s, but it's already taken by %s\n",
897                           func_name, slave, res_id, mlx4_resource_type_to_str(type),
898                           r->func_name);
899                 err = -EBUSY;
900                 goto exit;
901         }
902
903         if (r->owner != slave) {
904                 err = -EPERM;
905                 goto exit;
906         }
907
908         r->from_state = r->state;
909         r->state = RES_ANY_BUSY;
910         r->func_name = func_name;
911
912         if (res)
913                 *((struct res_common **)res) = r;
914
915 exit:
916         spin_unlock_irq(mlx4_tlock(dev));
917         return err;
918 }
919
920 #define get_res(dev, slave, res_id, type, res) \
921         _get_res((dev), (slave), (res_id), (type), (res), __func__)
922
923 int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
924                                     enum mlx4_resource type,
925                                     u64 res_id, int *slave)
926 {
927
928         struct res_common *r;
929         int err = -ENOENT;
930         int id = res_id;
931
932         if (type == RES_QP)
933                 id &= 0x7fffff;
934         spin_lock(mlx4_tlock(dev));
935
936         r = find_res(dev, id, type);
937         if (r) {
938                 *slave = r->owner;
939                 err = 0;
940         }
941         spin_unlock(mlx4_tlock(dev));
942
943         return err;
944 }
945
946 static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
947                     enum mlx4_resource type)
948 {
949         struct res_common *r;
950
951         spin_lock_irq(mlx4_tlock(dev));
952         r = find_res(dev, res_id, type);
953         if (r) {
954                 r->state = r->from_state;
955                 r->func_name = "";
956         }
957         spin_unlock_irq(mlx4_tlock(dev));
958 }
959
960 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
961                              u64 in_param, u64 *out_param, int port);
962
963 static int handle_existing_counter(struct mlx4_dev *dev, u8 slave, int port,
964                                    int counter_index)
965 {
966         struct res_common *r;
967         struct res_counter *counter;
968         int ret = 0;
969
970         if (counter_index == MLX4_SINK_COUNTER_INDEX(dev))
971                 return ret;
972
973         spin_lock_irq(mlx4_tlock(dev));
974         r = find_res(dev, counter_index, RES_COUNTER);
975         if (!r || r->owner != slave) {
976                 ret = -EINVAL;
977         } else {
978                 counter = container_of(r, struct res_counter, com);
979                 if (!counter->port)
980                         counter->port = port;
981         }
982
983         spin_unlock_irq(mlx4_tlock(dev));
984         return ret;
985 }
986
987 static int handle_unexisting_counter(struct mlx4_dev *dev,
988                                      struct mlx4_qp_context *qpc, u8 slave,
989                                      int port)
990 {
991         struct mlx4_priv *priv = mlx4_priv(dev);
992         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
993         struct res_common *tmp;
994         struct res_counter *counter;
995         u64 counter_idx = MLX4_SINK_COUNTER_INDEX(dev);
996         int err = 0;
997
998         spin_lock_irq(mlx4_tlock(dev));
999         list_for_each_entry(tmp,
1000                             &tracker->slave_list[slave].res_list[RES_COUNTER],
1001                             list) {
1002                 counter = container_of(tmp, struct res_counter, com);
1003                 if (port == counter->port) {
1004                         qpc->pri_path.counter_index  = counter->com.res_id;
1005                         spin_unlock_irq(mlx4_tlock(dev));
1006                         return 0;
1007                 }
1008         }
1009         spin_unlock_irq(mlx4_tlock(dev));
1010
1011         /* No existing counter, need to allocate a new counter */
1012         err = counter_alloc_res(dev, slave, RES_OP_RESERVE, 0, 0, &counter_idx,
1013                                 port);
1014         if (err == -ENOENT) {
1015                 err = 0;
1016         } else if (err && err != -ENOSPC) {
1017                 mlx4_err(dev, "%s: failed to create new counter for slave %d err %d\n",
1018                          __func__, slave, err);
1019         } else {
1020                 qpc->pri_path.counter_index = counter_idx;
1021                 mlx4_dbg(dev, "%s: alloc new counter for slave %d index %d\n",
1022                          __func__, slave, qpc->pri_path.counter_index);
1023                 err = 0;
1024         }
1025
1026         return err;
1027 }
1028
1029 static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc,
1030                           u8 slave, int port)
1031 {
1032         if (qpc->pri_path.counter_index != MLX4_SINK_COUNTER_INDEX(dev))
1033                 return handle_existing_counter(dev, slave, port,
1034                                                qpc->pri_path.counter_index);
1035
1036         return handle_unexisting_counter(dev, qpc, slave, port);
1037 }
1038
1039 static struct res_common *alloc_qp_tr(int id)
1040 {
1041         struct res_qp *ret;
1042
1043         ret = kzalloc(sizeof *ret, GFP_KERNEL);
1044         if (!ret)
1045                 return NULL;
1046
1047         ret->com.res_id = id;
1048         ret->com.state = RES_QP_RESERVED;
1049         ret->local_qpn = id;
1050         INIT_LIST_HEAD(&ret->mcg_list);
1051         spin_lock_init(&ret->mcg_spl);
1052         atomic_set(&ret->ref_count, 0);
1053
1054         return &ret->com;
1055 }
1056
1057 static struct res_common *alloc_mtt_tr(int id, int order)
1058 {
1059         struct res_mtt *ret;
1060
1061         ret = kzalloc(sizeof *ret, GFP_KERNEL);
1062         if (!ret)
1063                 return NULL;
1064
1065         ret->com.res_id = id;
1066         ret->order = order;
1067         ret->com.state = RES_MTT_ALLOCATED;
1068         atomic_set(&ret->ref_count, 0);
1069
1070         return &ret->com;
1071 }
1072
1073 static struct res_common *alloc_mpt_tr(int id, int key)
1074 {
1075         struct res_mpt *ret;
1076
1077         ret = kzalloc(sizeof *ret, GFP_KERNEL);
1078         if (!ret)
1079                 return NULL;
1080
1081         ret->com.res_id = id;
1082         ret->com.state = RES_MPT_RESERVED;
1083         ret->key = key;
1084
1085         return &ret->com;
1086 }
1087
1088 static struct res_common *alloc_eq_tr(int id)
1089 {
1090         struct res_eq *ret;
1091
1092         ret = kzalloc(sizeof *ret, GFP_KERNEL);
1093         if (!ret)
1094                 return NULL;
1095
1096         ret->com.res_id = id;
1097         ret->com.state = RES_EQ_RESERVED;
1098
1099         return &ret->com;
1100 }
1101
1102 static struct res_common *alloc_cq_tr(int id)
1103 {
1104         struct res_cq *ret;
1105
1106         ret = kzalloc(sizeof *ret, GFP_KERNEL);
1107         if (!ret)
1108                 return NULL;
1109
1110         ret->com.res_id = id;
1111         ret->com.state = RES_CQ_ALLOCATED;
1112         atomic_set(&ret->ref_count, 0);
1113
1114         return &ret->com;
1115 }
1116
1117 static struct res_common *alloc_srq_tr(int id)
1118 {
1119         struct res_srq *ret;
1120
1121         ret = kzalloc(sizeof *ret, GFP_KERNEL);
1122         if (!ret)
1123                 return NULL;
1124
1125         ret->com.res_id = id;
1126         ret->com.state = RES_SRQ_ALLOCATED;
1127         atomic_set(&ret->ref_count, 0);
1128
1129         return &ret->com;
1130 }
1131
1132 static struct res_common *alloc_counter_tr(int id, int port)
1133 {
1134         struct res_counter *ret;
1135
1136         ret = kzalloc(sizeof *ret, GFP_KERNEL);
1137         if (!ret)
1138                 return NULL;
1139
1140         ret->com.res_id = id;
1141         ret->com.state = RES_COUNTER_ALLOCATED;
1142         ret->port = port;
1143
1144         return &ret->com;
1145 }
1146
1147 static struct res_common *alloc_xrcdn_tr(int id)
1148 {
1149         struct res_xrcdn *ret;
1150
1151         ret = kzalloc(sizeof *ret, GFP_KERNEL);
1152         if (!ret)
1153                 return NULL;
1154
1155         ret->com.res_id = id;
1156         ret->com.state = RES_XRCD_ALLOCATED;
1157
1158         return &ret->com;
1159 }
1160
1161 static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
1162 {
1163         struct res_fs_rule *ret;
1164
1165         ret = kzalloc(sizeof *ret, GFP_KERNEL);
1166         if (!ret)
1167                 return NULL;
1168
1169         ret->com.res_id = id;
1170         ret->com.state = RES_FS_RULE_ALLOCATED;
1171         ret->qpn = qpn;
1172         return &ret->com;
1173 }
1174
1175 static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
1176                                    int extra)
1177 {
1178         struct res_common *ret;
1179
1180         switch (type) {
1181         case RES_QP:
1182                 ret = alloc_qp_tr(id);
1183                 break;
1184         case RES_MPT:
1185                 ret = alloc_mpt_tr(id, extra);
1186                 break;
1187         case RES_MTT:
1188                 ret = alloc_mtt_tr(id, extra);
1189                 break;
1190         case RES_EQ:
1191                 ret = alloc_eq_tr(id);
1192                 break;
1193         case RES_CQ:
1194                 ret = alloc_cq_tr(id);
1195                 break;
1196         case RES_SRQ:
1197                 ret = alloc_srq_tr(id);
1198                 break;
1199         case RES_MAC:
1200                 pr_err("implementation missing\n");
1201                 return NULL;
1202         case RES_COUNTER:
1203                 ret = alloc_counter_tr(id, extra);
1204                 break;
1205         case RES_XRCD:
1206                 ret = alloc_xrcdn_tr(id);
1207                 break;
1208         case RES_FS_RULE:
1209                 ret = alloc_fs_rule_tr(id, extra);
1210                 break;
1211         default:
1212                 return NULL;
1213         }
1214         if (ret)
1215                 ret->owner = slave;
1216
1217         return ret;
1218 }
1219
1220 int mlx4_calc_vf_counters(struct mlx4_dev *dev, int slave, int port,
1221                           struct mlx4_counter *data)
1222 {
1223         struct mlx4_priv *priv = mlx4_priv(dev);
1224         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1225         struct res_common *tmp;
1226         struct res_counter *counter;
1227         int *counters_arr;
1228         int i = 0, err = 0;
1229
1230         memset(data, 0, sizeof(*data));
1231
1232         counters_arr = kmalloc_array(dev->caps.max_counters,
1233                                      sizeof(*counters_arr), GFP_KERNEL);
1234         if (!counters_arr)
1235                 return -ENOMEM;
1236
1237         spin_lock_irq(mlx4_tlock(dev));
1238         list_for_each_entry(tmp,
1239                             &tracker->slave_list[slave].res_list[RES_COUNTER],
1240                             list) {
1241                 counter = container_of(tmp, struct res_counter, com);
1242                 if (counter->port == port) {
1243                         counters_arr[i] = (int)tmp->res_id;
1244                         i++;
1245                 }
1246         }
1247         spin_unlock_irq(mlx4_tlock(dev));
1248         counters_arr[i] = -1;
1249
1250         i = 0;
1251
1252         while (counters_arr[i] != -1) {
1253                 err = mlx4_get_counter_stats(dev, counters_arr[i], data,
1254                                              0);
1255                 if (err) {
1256                         memset(data, 0, sizeof(*data));
1257                         goto table_changed;
1258                 }
1259                 i++;
1260         }
1261
1262 table_changed:
1263         kfree(counters_arr);
1264         return 0;
1265 }
1266
1267 static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1268                          enum mlx4_resource type, int extra)
1269 {
1270         int i;
1271         int err;
1272         struct mlx4_priv *priv = mlx4_priv(dev);
1273         struct res_common **res_arr;
1274         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1275         struct rb_root *root = &tracker->res_tree[type];
1276
1277         res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
1278         if (!res_arr)
1279                 return -ENOMEM;
1280
1281         for (i = 0; i < count; ++i) {
1282                 res_arr[i] = alloc_tr(base + i, type, slave, extra);
1283                 if (!res_arr[i]) {
1284                         for (--i; i >= 0; --i)
1285                                 kfree(res_arr[i]);
1286
1287                         kfree(res_arr);
1288                         return -ENOMEM;
1289                 }
1290         }
1291
1292         spin_lock_irq(mlx4_tlock(dev));
1293         for (i = 0; i < count; ++i) {
1294                 if (find_res(dev, base + i, type)) {
1295                         err = -EEXIST;
1296                         goto undo;
1297                 }
1298                 err = res_tracker_insert(root, res_arr[i]);
1299                 if (err)
1300                         goto undo;
1301                 list_add_tail(&res_arr[i]->list,
1302                               &tracker->slave_list[slave].res_list[type]);
1303         }
1304         spin_unlock_irq(mlx4_tlock(dev));
1305         kfree(res_arr);
1306
1307         return 0;
1308
1309 undo:
1310         for (--i; i >= 0; --i) {
1311                 rb_erase(&res_arr[i]->node, root);
1312                 list_del_init(&res_arr[i]->list);
1313         }
1314
1315         spin_unlock_irq(mlx4_tlock(dev));
1316
1317         for (i = 0; i < count; ++i)
1318                 kfree(res_arr[i]);
1319
1320         kfree(res_arr);
1321
1322         return err;
1323 }
1324
1325 static int remove_qp_ok(struct res_qp *res)
1326 {
1327         if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
1328             !list_empty(&res->mcg_list)) {
1329                 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
1330                        res->com.state, atomic_read(&res->ref_count));
1331                 return -EBUSY;
1332         } else if (res->com.state != RES_QP_RESERVED) {
1333                 return -EPERM;
1334         }
1335
1336         return 0;
1337 }
1338
1339 static int remove_mtt_ok(struct res_mtt *res, int order)
1340 {
1341         if (res->com.state == RES_MTT_BUSY ||
1342             atomic_read(&res->ref_count)) {
1343                 pr_devel("%s-%d: state %s, ref_count %d\n",
1344                          __func__, __LINE__,
1345                          mtt_states_str(res->com.state),
1346                          atomic_read(&res->ref_count));
1347                 return -EBUSY;
1348         } else if (res->com.state != RES_MTT_ALLOCATED)
1349                 return -EPERM;
1350         else if (res->order != order)
1351                 return -EINVAL;
1352
1353         return 0;
1354 }
1355
1356 static int remove_mpt_ok(struct res_mpt *res)
1357 {
1358         if (res->com.state == RES_MPT_BUSY)
1359                 return -EBUSY;
1360         else if (res->com.state != RES_MPT_RESERVED)
1361                 return -EPERM;
1362
1363         return 0;
1364 }
1365
1366 static int remove_eq_ok(struct res_eq *res)
1367 {
1368         if (res->com.state == RES_MPT_BUSY)
1369                 return -EBUSY;
1370         else if (res->com.state != RES_MPT_RESERVED)
1371                 return -EPERM;
1372
1373         return 0;
1374 }
1375
1376 static int remove_counter_ok(struct res_counter *res)
1377 {
1378         if (res->com.state == RES_COUNTER_BUSY)
1379                 return -EBUSY;
1380         else if (res->com.state != RES_COUNTER_ALLOCATED)
1381                 return -EPERM;
1382
1383         return 0;
1384 }
1385
1386 static int remove_xrcdn_ok(struct res_xrcdn *res)
1387 {
1388         if (res->com.state == RES_XRCD_BUSY)
1389                 return -EBUSY;
1390         else if (res->com.state != RES_XRCD_ALLOCATED)
1391                 return -EPERM;
1392
1393         return 0;
1394 }
1395
1396 static int remove_fs_rule_ok(struct res_fs_rule *res)
1397 {
1398         if (res->com.state == RES_FS_RULE_BUSY)
1399                 return -EBUSY;
1400         else if (res->com.state != RES_FS_RULE_ALLOCATED)
1401                 return -EPERM;
1402
1403         return 0;
1404 }
1405
1406 static int remove_cq_ok(struct res_cq *res)
1407 {
1408         if (res->com.state == RES_CQ_BUSY)
1409                 return -EBUSY;
1410         else if (res->com.state != RES_CQ_ALLOCATED)
1411                 return -EPERM;
1412
1413         return 0;
1414 }
1415
1416 static int remove_srq_ok(struct res_srq *res)
1417 {
1418         if (res->com.state == RES_SRQ_BUSY)
1419                 return -EBUSY;
1420         else if (res->com.state != RES_SRQ_ALLOCATED)
1421                 return -EPERM;
1422
1423         return 0;
1424 }
1425
1426 static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
1427 {
1428         switch (type) {
1429         case RES_QP:
1430                 return remove_qp_ok((struct res_qp *)res);
1431         case RES_CQ:
1432                 return remove_cq_ok((struct res_cq *)res);
1433         case RES_SRQ:
1434                 return remove_srq_ok((struct res_srq *)res);
1435         case RES_MPT:
1436                 return remove_mpt_ok((struct res_mpt *)res);
1437         case RES_MTT:
1438                 return remove_mtt_ok((struct res_mtt *)res, extra);
1439         case RES_MAC:
1440                 return -EOPNOTSUPP;
1441         case RES_EQ:
1442                 return remove_eq_ok((struct res_eq *)res);
1443         case RES_COUNTER:
1444                 return remove_counter_ok((struct res_counter *)res);
1445         case RES_XRCD:
1446                 return remove_xrcdn_ok((struct res_xrcdn *)res);
1447         case RES_FS_RULE:
1448                 return remove_fs_rule_ok((struct res_fs_rule *)res);
1449         default:
1450                 return -EINVAL;
1451         }
1452 }
1453
1454 static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
1455                          enum mlx4_resource type, int extra)
1456 {
1457         u64 i;
1458         int err;
1459         struct mlx4_priv *priv = mlx4_priv(dev);
1460         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1461         struct res_common *r;
1462
1463         spin_lock_irq(mlx4_tlock(dev));
1464         for (i = base; i < base + count; ++i) {
1465                 r = res_tracker_lookup(&tracker->res_tree[type], i);
1466                 if (!r) {
1467                         err = -ENOENT;
1468                         goto out;
1469                 }
1470                 if (r->owner != slave) {
1471                         err = -EPERM;
1472                         goto out;
1473                 }
1474                 err = remove_ok(r, type, extra);
1475                 if (err)
1476                         goto out;
1477         }
1478
1479         for (i = base; i < base + count; ++i) {
1480                 r = res_tracker_lookup(&tracker->res_tree[type], i);
1481                 rb_erase(&r->node, &tracker->res_tree[type]);
1482                 list_del(&r->list);
1483                 kfree(r);
1484         }
1485         err = 0;
1486
1487 out:
1488         spin_unlock_irq(mlx4_tlock(dev));
1489
1490         return err;
1491 }
1492
1493 static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
1494                                 enum res_qp_states state, struct res_qp **qp,
1495                                 int alloc)
1496 {
1497         struct mlx4_priv *priv = mlx4_priv(dev);
1498         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1499         struct res_qp *r;
1500         int err = 0;
1501
1502         spin_lock_irq(mlx4_tlock(dev));
1503         r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
1504         if (!r)
1505                 err = -ENOENT;
1506         else if (r->com.owner != slave)
1507                 err = -EPERM;
1508         else {
1509                 switch (state) {
1510                 case RES_QP_BUSY:
1511                         mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
1512                                  __func__, r->com.res_id);
1513                         err = -EBUSY;
1514                         break;
1515
1516                 case RES_QP_RESERVED:
1517                         if (r->com.state == RES_QP_MAPPED && !alloc)
1518                                 break;
1519
1520                         mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
1521                         err = -EINVAL;
1522                         break;
1523
1524                 case RES_QP_MAPPED:
1525                         if ((r->com.state == RES_QP_RESERVED && alloc) ||
1526                             r->com.state == RES_QP_HW)
1527                                 break;
1528                         else {
1529                                 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
1530                                           r->com.res_id);
1531                                 err = -EINVAL;
1532                         }
1533
1534                         break;
1535
1536                 case RES_QP_HW:
1537                         if (r->com.state != RES_QP_MAPPED)
1538                                 err = -EINVAL;
1539                         break;
1540                 default:
1541                         err = -EINVAL;
1542                 }
1543
1544                 if (!err) {
1545                         r->com.from_state = r->com.state;
1546                         r->com.to_state = state;
1547                         r->com.state = RES_QP_BUSY;
1548                         if (qp)
1549                                 *qp = r;
1550                 }
1551         }
1552
1553         spin_unlock_irq(mlx4_tlock(dev));
1554
1555         return err;
1556 }
1557
1558 static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1559                                 enum res_mpt_states state, struct res_mpt **mpt)
1560 {
1561         struct mlx4_priv *priv = mlx4_priv(dev);
1562         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1563         struct res_mpt *r;
1564         int err = 0;
1565
1566         spin_lock_irq(mlx4_tlock(dev));
1567         r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
1568         if (!r)
1569                 err = -ENOENT;
1570         else if (r->com.owner != slave)
1571                 err = -EPERM;
1572         else {
1573                 switch (state) {
1574                 case RES_MPT_BUSY:
1575                         err = -EINVAL;
1576                         break;
1577
1578                 case RES_MPT_RESERVED:
1579                         if (r->com.state != RES_MPT_MAPPED)
1580                                 err = -EINVAL;
1581                         break;
1582
1583                 case RES_MPT_MAPPED:
1584                         if (r->com.state != RES_MPT_RESERVED &&
1585                             r->com.state != RES_MPT_HW)
1586                                 err = -EINVAL;
1587                         break;
1588
1589                 case RES_MPT_HW:
1590                         if (r->com.state != RES_MPT_MAPPED)
1591                                 err = -EINVAL;
1592                         break;
1593                 default:
1594                         err = -EINVAL;
1595                 }
1596
1597                 if (!err) {
1598                         r->com.from_state = r->com.state;
1599                         r->com.to_state = state;
1600                         r->com.state = RES_MPT_BUSY;
1601                         if (mpt)
1602                                 *mpt = r;
1603                 }
1604         }
1605
1606         spin_unlock_irq(mlx4_tlock(dev));
1607
1608         return err;
1609 }
1610
1611 static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1612                                 enum res_eq_states state, struct res_eq **eq)
1613 {
1614         struct mlx4_priv *priv = mlx4_priv(dev);
1615         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1616         struct res_eq *r;
1617         int err = 0;
1618
1619         spin_lock_irq(mlx4_tlock(dev));
1620         r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
1621         if (!r)
1622                 err = -ENOENT;
1623         else if (r->com.owner != slave)
1624                 err = -EPERM;
1625         else {
1626                 switch (state) {
1627                 case RES_EQ_BUSY:
1628                         err = -EINVAL;
1629                         break;
1630
1631                 case RES_EQ_RESERVED:
1632                         if (r->com.state != RES_EQ_HW)
1633                                 err = -EINVAL;
1634                         break;
1635
1636                 case RES_EQ_HW:
1637                         if (r->com.state != RES_EQ_RESERVED)
1638                                 err = -EINVAL;
1639                         break;
1640
1641                 default:
1642                         err = -EINVAL;
1643                 }
1644
1645                 if (!err) {
1646                         r->com.from_state = r->com.state;
1647                         r->com.to_state = state;
1648                         r->com.state = RES_EQ_BUSY;
1649                 }
1650         }
1651
1652         spin_unlock_irq(mlx4_tlock(dev));
1653
1654         if (!err && eq)
1655                 *eq = r;
1656
1657         return err;
1658 }
1659
1660 static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1661                                 enum res_cq_states state, struct res_cq **cq)
1662 {
1663         struct mlx4_priv *priv = mlx4_priv(dev);
1664         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1665         struct res_cq *r;
1666         int err;
1667
1668         spin_lock_irq(mlx4_tlock(dev));
1669         r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
1670         if (!r) {
1671                 err = -ENOENT;
1672         } else if (r->com.owner != slave) {
1673                 err = -EPERM;
1674         } else if (state == RES_CQ_ALLOCATED) {
1675                 if (r->com.state != RES_CQ_HW)
1676                         err = -EINVAL;
1677                 else if (atomic_read(&r->ref_count))
1678                         err = -EBUSY;
1679                 else
1680                         err = 0;
1681         } else if (state != RES_CQ_HW || r->com.state != RES_CQ_ALLOCATED) {
1682                 err = -EINVAL;
1683         } else {
1684                 err = 0;
1685         }
1686
1687         if (!err) {
1688                 r->com.from_state = r->com.state;
1689                 r->com.to_state = state;
1690                 r->com.state = RES_CQ_BUSY;
1691                 if (cq)
1692                         *cq = r;
1693         }
1694
1695         spin_unlock_irq(mlx4_tlock(dev));
1696
1697         return err;
1698 }
1699
1700 static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1701                                  enum res_srq_states state, struct res_srq **srq)
1702 {
1703         struct mlx4_priv *priv = mlx4_priv(dev);
1704         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1705         struct res_srq *r;
1706         int err = 0;
1707
1708         spin_lock_irq(mlx4_tlock(dev));
1709         r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
1710         if (!r) {
1711                 err = -ENOENT;
1712         } else if (r->com.owner != slave) {
1713                 err = -EPERM;
1714         } else if (state == RES_SRQ_ALLOCATED) {
1715                 if (r->com.state != RES_SRQ_HW)
1716                         err = -EINVAL;
1717                 else if (atomic_read(&r->ref_count))
1718                         err = -EBUSY;
1719         } else if (state != RES_SRQ_HW || r->com.state != RES_SRQ_ALLOCATED) {
1720                 err = -EINVAL;
1721         }
1722
1723         if (!err) {
1724                 r->com.from_state = r->com.state;
1725                 r->com.to_state = state;
1726                 r->com.state = RES_SRQ_BUSY;
1727                 if (srq)
1728                         *srq = r;
1729         }
1730
1731         spin_unlock_irq(mlx4_tlock(dev));
1732
1733         return err;
1734 }
1735
1736 static void res_abort_move(struct mlx4_dev *dev, int slave,
1737                            enum mlx4_resource type, int id)
1738 {
1739         struct mlx4_priv *priv = mlx4_priv(dev);
1740         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1741         struct res_common *r;
1742
1743         spin_lock_irq(mlx4_tlock(dev));
1744         r = res_tracker_lookup(&tracker->res_tree[type], id);
1745         if (r && (r->owner == slave))
1746                 r->state = r->from_state;
1747         spin_unlock_irq(mlx4_tlock(dev));
1748 }
1749
1750 static void res_end_move(struct mlx4_dev *dev, int slave,
1751                          enum mlx4_resource type, int id)
1752 {
1753         struct mlx4_priv *priv = mlx4_priv(dev);
1754         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1755         struct res_common *r;
1756
1757         spin_lock_irq(mlx4_tlock(dev));
1758         r = res_tracker_lookup(&tracker->res_tree[type], id);
1759         if (r && (r->owner == slave))
1760                 r->state = r->to_state;
1761         spin_unlock_irq(mlx4_tlock(dev));
1762 }
1763
1764 static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1765 {
1766         return mlx4_is_qp_reserved(dev, qpn) &&
1767                 (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
1768 }
1769
1770 static int fw_reserved(struct mlx4_dev *dev, int qpn)
1771 {
1772         return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
1773 }
1774
1775 static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1776                         u64 in_param, u64 *out_param)
1777 {
1778         int err;
1779         int count;
1780         int align;
1781         int base;
1782         int qpn;
1783         u8 flags;
1784
1785         switch (op) {
1786         case RES_OP_RESERVE:
1787                 count = get_param_l(&in_param) & 0xffffff;
1788                 /* Turn off all unsupported QP allocation flags that the
1789                  * slave tries to set.
1790                  */
1791                 flags = (get_param_l(&in_param) >> 24) & dev->caps.alloc_res_qp_mask;
1792                 align = get_param_h(&in_param);
1793                 err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
1794                 if (err)
1795                         return err;
1796
1797                 err = __mlx4_qp_reserve_range(dev, count, align, &base, flags);
1798                 if (err) {
1799                         mlx4_release_resource(dev, slave, RES_QP, count, 0);
1800                         return err;
1801                 }
1802
1803                 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1804                 if (err) {
1805                         mlx4_release_resource(dev, slave, RES_QP, count, 0);
1806                         __mlx4_qp_release_range(dev, base, count);
1807                         return err;
1808                 }
1809                 set_param_l(out_param, base);
1810                 break;
1811         case RES_OP_MAP_ICM:
1812                 qpn = get_param_l(&in_param) & 0x7fffff;
1813                 if (valid_reserved(dev, slave, qpn)) {
1814                         err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1815                         if (err)
1816                                 return err;
1817                 }
1818
1819                 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1820                                            NULL, 1);
1821                 if (err)
1822                         return err;
1823
1824                 if (!fw_reserved(dev, qpn)) {
1825                         err = __mlx4_qp_alloc_icm(dev, qpn, GFP_KERNEL);
1826                         if (err) {
1827                                 res_abort_move(dev, slave, RES_QP, qpn);
1828                                 return err;
1829                         }
1830                 }
1831
1832                 res_end_move(dev, slave, RES_QP, qpn);
1833                 break;
1834
1835         default:
1836                 err = -EINVAL;
1837                 break;
1838         }
1839         return err;
1840 }
1841
1842 static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1843                          u64 in_param, u64 *out_param)
1844 {
1845         int err = -EINVAL;
1846         int base;
1847         int order;
1848
1849         if (op != RES_OP_RESERVE_AND_MAP)
1850                 return err;
1851
1852         order = get_param_l(&in_param);
1853
1854         err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0);
1855         if (err)
1856                 return err;
1857
1858         base = __mlx4_alloc_mtt_range(dev, order);
1859         if (base == -1) {
1860                 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1861                 return -ENOMEM;
1862         }
1863
1864         err = add_res_range(dev, slave, base, 1, RES_MTT, order);
1865         if (err) {
1866                 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
1867                 __mlx4_free_mtt_range(dev, base, order);
1868         } else {
1869                 set_param_l(out_param, base);
1870         }
1871
1872         return err;
1873 }
1874
1875 static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1876                          u64 in_param, u64 *out_param)
1877 {
1878         int err = -EINVAL;
1879         int index;
1880         int id;
1881         struct res_mpt *mpt;
1882
1883         switch (op) {
1884         case RES_OP_RESERVE:
1885                 err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0);
1886                 if (err)
1887                         break;
1888
1889                 index = __mlx4_mpt_reserve(dev);
1890                 if (index == -1) {
1891                         mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1892                         break;
1893                 }
1894                 id = index & mpt_mask(dev);
1895
1896                 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1897                 if (err) {
1898                         mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1899                         __mlx4_mpt_release(dev, index);
1900                         break;
1901                 }
1902                 set_param_l(out_param, index);
1903                 break;
1904         case RES_OP_MAP_ICM:
1905                 index = get_param_l(&in_param);
1906                 id = index & mpt_mask(dev);
1907                 err = mr_res_start_move_to(dev, slave, id,
1908                                            RES_MPT_MAPPED, &mpt);
1909                 if (err)
1910                         return err;
1911
1912                 err = __mlx4_mpt_alloc_icm(dev, mpt->key, GFP_KERNEL);
1913                 if (err) {
1914                         res_abort_move(dev, slave, RES_MPT, id);
1915                         return err;
1916                 }
1917
1918                 res_end_move(dev, slave, RES_MPT, id);
1919                 break;
1920         }
1921         return err;
1922 }
1923
1924 static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1925                         u64 in_param, u64 *out_param)
1926 {
1927         int cqn;
1928         int err;
1929
1930         switch (op) {
1931         case RES_OP_RESERVE_AND_MAP:
1932                 err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0);
1933                 if (err)
1934                         break;
1935
1936                 err = __mlx4_cq_alloc_icm(dev, &cqn);
1937                 if (err) {
1938                         mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1939                         break;
1940                 }
1941
1942                 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1943                 if (err) {
1944                         mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1945                         __mlx4_cq_free_icm(dev, cqn);
1946                         break;
1947                 }
1948
1949                 set_param_l(out_param, cqn);
1950                 break;
1951
1952         default:
1953                 err = -EINVAL;
1954         }
1955
1956         return err;
1957 }
1958
1959 static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1960                          u64 in_param, u64 *out_param)
1961 {
1962         int srqn;
1963         int err;
1964
1965         switch (op) {
1966         case RES_OP_RESERVE_AND_MAP:
1967                 err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0);
1968                 if (err)
1969                         break;
1970
1971                 err = __mlx4_srq_alloc_icm(dev, &srqn);
1972                 if (err) {
1973                         mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1974                         break;
1975                 }
1976
1977                 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1978                 if (err) {
1979                         mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1980                         __mlx4_srq_free_icm(dev, srqn);
1981                         break;
1982                 }
1983
1984                 set_param_l(out_param, srqn);
1985                 break;
1986
1987         default:
1988                 err = -EINVAL;
1989         }
1990
1991         return err;
1992 }
1993
1994 static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port,
1995                                      u8 smac_index, u64 *mac)
1996 {
1997         struct mlx4_priv *priv = mlx4_priv(dev);
1998         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1999         struct list_head *mac_list =
2000                 &tracker->slave_list[slave].res_list[RES_MAC];
2001         struct mac_res *res, *tmp;
2002
2003         list_for_each_entry_safe(res, tmp, mac_list, list) {
2004                 if (res->smac_index == smac_index && res->port == (u8) port) {
2005                         *mac = res->mac;
2006                         return 0;
2007                 }
2008         }
2009         return -ENOENT;
2010 }
2011
2012 static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index)
2013 {
2014         struct mlx4_priv *priv = mlx4_priv(dev);
2015         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2016         struct list_head *mac_list =
2017                 &tracker->slave_list[slave].res_list[RES_MAC];
2018         struct mac_res *res, *tmp;
2019
2020         list_for_each_entry_safe(res, tmp, mac_list, list) {
2021                 if (res->mac == mac && res->port == (u8) port) {
2022                         /* mac found. update ref count */
2023                         ++res->ref_count;
2024                         return 0;
2025                 }
2026         }
2027
2028         if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
2029                 return -EINVAL;
2030         res = kzalloc(sizeof *res, GFP_KERNEL);
2031         if (!res) {
2032                 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
2033                 return -ENOMEM;
2034         }
2035         res->mac = mac;
2036         res->port = (u8) port;
2037         res->smac_index = smac_index;
2038         res->ref_count = 1;
2039         list_add_tail(&res->list,
2040                       &tracker->slave_list[slave].res_list[RES_MAC]);
2041         return 0;
2042 }
2043
2044 static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
2045                                int port)
2046 {
2047         struct mlx4_priv *priv = mlx4_priv(dev);
2048         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2049         struct list_head *mac_list =
2050                 &tracker->slave_list[slave].res_list[RES_MAC];
2051         struct mac_res *res, *tmp;
2052
2053         list_for_each_entry_safe(res, tmp, mac_list, list) {
2054                 if (res->mac == mac && res->port == (u8) port) {
2055                         if (!--res->ref_count) {
2056                                 list_del(&res->list);
2057                                 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
2058                                 kfree(res);
2059                         }
2060                         break;
2061                 }
2062         }
2063 }
2064
2065 static void rem_slave_macs(struct mlx4_dev *dev, int slave)
2066 {
2067         struct mlx4_priv *priv = mlx4_priv(dev);
2068         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2069         struct list_head *mac_list =
2070                 &tracker->slave_list[slave].res_list[RES_MAC];
2071         struct mac_res *res, *tmp;
2072         int i;
2073
2074         list_for_each_entry_safe(res, tmp, mac_list, list) {
2075                 list_del(&res->list);
2076                 /* dereference the mac the num times the slave referenced it */
2077                 for (i = 0; i < res->ref_count; i++)
2078                         __mlx4_unregister_mac(dev, res->port, res->mac);
2079                 mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
2080                 kfree(res);
2081         }
2082 }
2083
2084 static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2085                          u64 in_param, u64 *out_param, int in_port)
2086 {
2087         int err = -EINVAL;
2088         int port;
2089         u64 mac;
2090         u8 smac_index;
2091
2092         if (op != RES_OP_RESERVE_AND_MAP)
2093                 return err;
2094
2095         port = !in_port ? get_param_l(out_param) : in_port;
2096         port = mlx4_slave_convert_port(
2097                         dev, slave, port);
2098
2099         if (port < 0)
2100                 return -EINVAL;
2101         mac = in_param;
2102
2103         err = __mlx4_register_mac(dev, port, mac);
2104         if (err >= 0) {
2105                 smac_index = err;
2106                 set_param_l(out_param, err);
2107                 err = 0;
2108         }
2109
2110         if (!err) {
2111                 err = mac_add_to_slave(dev, slave, mac, port, smac_index);
2112                 if (err)
2113                         __mlx4_unregister_mac(dev, port, mac);
2114         }
2115         return err;
2116 }
2117
2118 static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
2119                              int port, int vlan_index)
2120 {
2121         struct mlx4_priv *priv = mlx4_priv(dev);
2122         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2123         struct list_head *vlan_list =
2124                 &tracker->slave_list[slave].res_list[RES_VLAN];
2125         struct vlan_res *res, *tmp;
2126
2127         list_for_each_entry_safe(res, tmp, vlan_list, list) {
2128                 if (res->vlan == vlan && res->port == (u8) port) {
2129                         /* vlan found. update ref count */
2130                         ++res->ref_count;
2131                         return 0;
2132                 }
2133         }
2134
2135         if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port))
2136                 return -EINVAL;
2137         res = kzalloc(sizeof(*res), GFP_KERNEL);
2138         if (!res) {
2139                 mlx4_release_resource(dev, slave, RES_VLAN, 1, port);
2140                 return -ENOMEM;
2141         }
2142         res->vlan = vlan;
2143         res->port = (u8) port;
2144         res->vlan_index = vlan_index;
2145         res->ref_count = 1;
2146         list_add_tail(&res->list,
2147                       &tracker->slave_list[slave].res_list[RES_VLAN]);
2148         return 0;
2149 }
2150
2151
2152 static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
2153                                 int port)
2154 {
2155         struct mlx4_priv *priv = mlx4_priv(dev);
2156         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2157         struct list_head *vlan_list =
2158                 &tracker->slave_list[slave].res_list[RES_VLAN];
2159         struct vlan_res *res, *tmp;
2160
2161         list_for_each_entry_safe(res, tmp, vlan_list, list) {
2162                 if (res->vlan == vlan && res->port == (u8) port) {
2163                         if (!--res->ref_count) {
2164                                 list_del(&res->list);
2165                                 mlx4_release_resource(dev, slave, RES_VLAN,
2166                                                       1, port);
2167                                 kfree(res);
2168                         }
2169                         break;
2170                 }
2171         }
2172 }
2173
2174 static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
2175 {
2176         struct mlx4_priv *priv = mlx4_priv(dev);
2177         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2178         struct list_head *vlan_list =
2179                 &tracker->slave_list[slave].res_list[RES_VLAN];
2180         struct vlan_res *res, *tmp;
2181         int i;
2182
2183         list_for_each_entry_safe(res, tmp, vlan_list, list) {
2184                 list_del(&res->list);
2185                 /* dereference the vlan the num times the slave referenced it */
2186                 for (i = 0; i < res->ref_count; i++)
2187                         __mlx4_unregister_vlan(dev, res->port, res->vlan);
2188                 mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port);
2189                 kfree(res);
2190         }
2191 }
2192
2193 static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2194                           u64 in_param, u64 *out_param, int in_port)
2195 {
2196         struct mlx4_priv *priv = mlx4_priv(dev);
2197         struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2198         int err;
2199         u16 vlan;
2200         int vlan_index;
2201         int port;
2202
2203         port = !in_port ? get_param_l(out_param) : in_port;
2204
2205         if (!port || op != RES_OP_RESERVE_AND_MAP)
2206                 return -EINVAL;
2207
2208         port = mlx4_slave_convert_port(
2209                         dev, slave, port);
2210
2211         if (port < 0)
2212                 return -EINVAL;
2213         /* upstream kernels had NOP for reg/unreg vlan. Continue this. */
2214         if (!in_port && port > 0 && port <= dev->caps.num_ports) {
2215                 slave_state[slave].old_vlan_api = true;
2216                 return 0;
2217         }
2218
2219         vlan = (u16) in_param;
2220
2221         err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
2222         if (!err) {
2223                 set_param_l(out_param, (u32) vlan_index);
2224                 err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
2225                 if (err)
2226                         __mlx4_unregister_vlan(dev, port, vlan);
2227         }
2228         return err;
2229 }
2230
2231 static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2232                              u64 in_param, u64 *out_param, int port)
2233 {
2234         u32 index;
2235         int err;
2236
2237         if (op != RES_OP_RESERVE)
2238                 return -EINVAL;
2239
2240         err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0);
2241         if (err)
2242                 return err;
2243
2244         err = __mlx4_counter_alloc(dev, &index);
2245         if (err) {
2246                 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2247                 return err;
2248         }
2249
2250         err = add_res_range(dev, slave, index, 1, RES_COUNTER, port);
2251         if (err) {
2252                 __mlx4_counter_free(dev, index);
2253                 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2254         } else {
2255                 set_param_l(out_param, index);
2256         }
2257
2258         return err;
2259 }
2260
2261 static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2262                            u64 in_param, u64 *out_param)
2263 {
2264         u32 xrcdn;
2265         int err;
2266
2267         if (op != RES_OP_RESERVE)
2268                 return -EINVAL;
2269
2270         err = __mlx4_xrcd_alloc(dev, &xrcdn);
2271         if (err)
2272                 return err;
2273
2274         err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2275         if (err)
2276                 __mlx4_xrcd_free(dev, xrcdn);
2277         else
2278                 set_param_l(out_param, xrcdn);
2279
2280         return err;
2281 }
2282
2283 int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
2284                            struct mlx4_vhcr *vhcr,
2285                            struct mlx4_cmd_mailbox *inbox,
2286                            struct mlx4_cmd_mailbox *outbox,
2287                            struct mlx4_cmd_info *cmd)
2288 {
2289         int err;
2290         int alop = vhcr->op_modifier;
2291
2292         switch (vhcr->in_modifier & 0xFF) {
2293         case RES_QP:
2294                 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
2295                                    vhcr->in_param, &vhcr->out_param);
2296                 break;
2297
2298         case RES_MTT:
2299                 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2300                                     vhcr->in_param, &vhcr->out_param);
2301                 break;
2302
2303         case RES_MPT:
2304                 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2305                                     vhcr->in_param, &vhcr->out_param);
2306                 break;
2307
2308         case RES_CQ:
2309                 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2310                                    vhcr->in_param, &vhcr->out_param);
2311                 break;
2312
2313         case RES_SRQ:
2314                 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2315                                     vhcr->in_param, &vhcr->out_param);
2316                 break;
2317
2318         case RES_MAC:
2319                 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
2320                                     vhcr->in_param, &vhcr->out_param,
2321                                     (vhcr->in_modifier >> 8) & 0xFF);
2322                 break;
2323
2324         case RES_VLAN:
2325                 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
2326                                      vhcr->in_param, &vhcr->out_param,
2327                                      (vhcr->in_modifier >> 8) & 0xFF);
2328                 break;
2329
2330         case RES_COUNTER:
2331                 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
2332                                         vhcr->in_param, &vhcr->out_param, 0);
2333                 break;
2334
2335         case RES_XRCD:
2336                 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
2337                                       vhcr->in_param, &vhcr->out_param);
2338                 break;
2339
2340         default:
2341                 err = -EINVAL;
2342                 break;
2343         }
2344
2345         return err;
2346 }
2347
2348 static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2349                        u64 in_param)
2350 {
2351         int err;
2352         int count;
2353         int base;
2354         int qpn;
2355
2356         switch (op) {
2357         case RES_OP_RESERVE:
2358                 base = get_param_l(&in_param) & 0x7fffff;
2359                 count = get_param_h(&in_param);
2360                 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
2361                 if (err)
2362                         break;
2363                 mlx4_release_resource(dev, slave, RES_QP, count, 0);
2364                 __mlx4_qp_release_range(dev, base, count);
2365                 break;
2366         case RES_OP_MAP_ICM:
2367                 qpn = get_param_l(&in_param) & 0x7fffff;
2368                 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
2369                                            NULL, 0);
2370                 if (err)
2371                         return err;
2372
2373                 if (!fw_reserved(dev, qpn))
2374                         __mlx4_qp_free_icm(dev, qpn);
2375
2376                 res_end_move(dev, slave, RES_QP, qpn);
2377
2378                 if (valid_reserved(dev, slave, qpn))
2379                         err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
2380                 break;
2381         default:
2382                 err = -EINVAL;
2383                 break;
2384         }
2385         return err;
2386 }
2387
2388 static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2389                         u64 in_param, u64 *out_param)
2390 {
2391         int err = -EINVAL;
2392         int base;
2393         int order;
2394
2395         if (op != RES_OP_RESERVE_AND_MAP)
2396                 return err;
2397
2398         base = get_param_l(&in_param);
2399         order = get_param_h(&in_param);
2400         err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
2401         if (!err) {
2402                 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
2403                 __mlx4_free_mtt_range(dev, base, order);
2404         }
2405         return err;
2406 }
2407
2408 static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2409                         u64 in_param)
2410 {
2411         int err = -EINVAL;
2412         int index;
2413         int id;
2414         struct res_mpt *mpt;
2415
2416         switch (op) {
2417         case RES_OP_RESERVE:
2418                 index = get_param_l(&in_param);
2419                 id = index & mpt_mask(dev);
2420                 err = get_res(dev, slave, id, RES_MPT, &mpt);
2421                 if (err)
2422                         break;
2423                 index = mpt->key;
2424                 put_res(dev, slave, id, RES_MPT);
2425
2426                 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
2427                 if (err)
2428                         break;
2429                 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
2430                 __mlx4_mpt_release(dev, index);
2431                 break;
2432         case RES_OP_MAP_ICM:
2433                 index = get_param_l(&in_param);
2434                 id = index & mpt_mask(dev);
2435                 err = mr_res_start_move_to(dev, slave, id,
2436                                            RES_MPT_RESERVED, &mpt);
2437                 if (err)
2438                         return err;
2439
2440                 __mlx4_mpt_free_icm(dev, mpt->key);
2441                 res_end_move(dev, slave, RES_MPT, id);
2442                 break;
2443         default:
2444                 err = -EINVAL;
2445                 break;
2446         }
2447         return err;
2448 }
2449
2450 static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2451                        u64 in_param, u64 *out_param)
2452 {
2453         int cqn;
2454         int err;
2455
2456         switch (op) {
2457         case RES_OP_RESERVE_AND_MAP:
2458                 cqn = get_param_l(&in_param);
2459                 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
2460                 if (err)
2461                         break;
2462
2463                 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
2464                 __mlx4_cq_free_icm(dev, cqn);
2465                 break;
2466
2467         default:
2468                 err = -EINVAL;
2469                 break;
2470         }
2471
2472         return err;
2473 }
2474
2475 static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2476                         u64 in_param, u64 *out_param)
2477 {
2478         int srqn;
2479         int err;
2480
2481         switch (op) {
2482         case RES_OP_RESERVE_AND_MAP:
2483                 srqn = get_param_l(&in_param);
2484                 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
2485                 if (err)
2486                         break;
2487
2488                 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
2489                 __mlx4_srq_free_icm(dev, srqn);
2490                 break;
2491
2492         default:
2493                 err = -EINVAL;
2494                 break;
2495         }
2496
2497         return err;
2498 }
2499
2500 static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2501                             u64 in_param, u64 *out_param, int in_port)
2502 {
2503         int port;
2504         int err = 0;
2505
2506         switch (op) {
2507         case RES_OP_RESERVE_AND_MAP:
2508                 port = !in_port ? get_param_l(out_param) : in_port;
2509                 port = mlx4_slave_convert_port(
2510                                 dev, slave, port);
2511
2512                 if (port < 0)
2513                         return -EINVAL;
2514                 mac_del_from_slave(dev, slave, in_param, port);
2515                 __mlx4_unregister_mac(dev, port, in_param);
2516                 break;
2517         default:
2518                 err = -EINVAL;
2519                 break;
2520         }
2521
2522         return err;
2523
2524 }
2525
2526 static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2527                             u64 in_param, u64 *out_param, int port)
2528 {
2529         struct mlx4_priv *priv = mlx4_priv(dev);
2530         struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
2531         int err = 0;
2532
2533         port = mlx4_slave_convert_port(
2534                         dev, slave, port);
2535
2536         if (port < 0)
2537                 return -EINVAL;
2538         switch (op) {
2539         case RES_OP_RESERVE_AND_MAP:
2540                 if (slave_state[slave].old_vlan_api)
2541                         return 0;
2542                 if (!port)
2543                         return -EINVAL;
2544                 vlan_del_from_slave(dev, slave, in_param, port);
2545                 __mlx4_unregister_vlan(dev, port, in_param);
2546                 break;
2547         default:
2548                 err = -EINVAL;
2549                 break;
2550         }
2551
2552         return err;
2553 }
2554
2555 static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2556                             u64 in_param, u64 *out_param)
2557 {
2558         int index;
2559         int err;
2560
2561         if (op != RES_OP_RESERVE)
2562                 return -EINVAL;
2563
2564         index = get_param_l(&in_param);
2565         if (index == MLX4_SINK_COUNTER_INDEX(dev))
2566                 return 0;
2567
2568         err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
2569         if (err)
2570                 return err;
2571
2572         __mlx4_counter_free(dev, index);
2573         mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2574
2575         return err;
2576 }
2577
2578 static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2579                           u64 in_param, u64 *out_param)
2580 {
2581         int xrcdn;
2582         int err;
2583
2584         if (op != RES_OP_RESERVE)
2585                 return -EINVAL;
2586
2587         xrcdn = get_param_l(&in_param);
2588         err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2589         if (err)
2590                 return err;
2591
2592         __mlx4_xrcd_free(dev, xrcdn);
2593
2594         return err;
2595 }
2596
2597 int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
2598                           struct mlx4_vhcr *vhcr,
2599                           struct mlx4_cmd_mailbox *inbox,
2600                           struct mlx4_cmd_mailbox *outbox,
2601                           struct mlx4_cmd_info *cmd)
2602 {
2603         int err = -EINVAL;
2604         int alop = vhcr->op_modifier;
2605
2606         switch (vhcr->in_modifier & 0xFF) {
2607         case RES_QP:
2608                 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
2609                                   vhcr->in_param);
2610                 break;
2611
2612         case RES_MTT:
2613                 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
2614                                    vhcr->in_param, &vhcr->out_param);
2615                 break;
2616
2617         case RES_MPT:
2618                 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
2619                                    vhcr->in_param);
2620                 break;
2621
2622         case RES_CQ:
2623                 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
2624                                   vhcr->in_param, &vhcr->out_param);
2625                 break;
2626
2627         case RES_SRQ:
2628                 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
2629                                    vhcr->in_param, &vhcr->out_param);
2630                 break;
2631
2632         case RES_MAC:
2633                 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
2634                                    vhcr->in_param, &vhcr->out_param,
2635                                    (vhcr->in_modifier >> 8) & 0xFF);
2636                 break;
2637
2638         case RES_VLAN:
2639                 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
2640                                     vhcr->in_param, &vhcr->out_param,
2641                                     (vhcr->in_modifier >> 8) & 0xFF);
2642                 break;
2643
2644         case RES_COUNTER:
2645                 err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
2646                                        vhcr->in_param, &vhcr->out_param);
2647                 break;
2648
2649         case RES_XRCD:
2650                 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
2651                                      vhcr->in_param, &vhcr->out_param);
2652
2653         default:
2654                 break;
2655         }
2656         return err;
2657 }
2658
2659 /* ugly but other choices are uglier */
2660 static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
2661 {
2662         return (be32_to_cpu(mpt->flags) >> 9) & 1;
2663 }
2664
2665 static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
2666 {
2667         return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
2668 }
2669
2670 static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
2671 {
2672         return be32_to_cpu(mpt->mtt_sz);
2673 }
2674
2675 static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
2676 {
2677         return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
2678 }
2679
2680 static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
2681 {
2682         return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
2683 }
2684
2685 static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
2686 {
2687         return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
2688 }
2689
2690 static int mr_is_region(struct mlx4_mpt_entry *mpt)
2691 {
2692         return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
2693 }
2694
2695 static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
2696 {
2697         return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
2698 }
2699
2700 static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
2701 {
2702         return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
2703 }
2704
2705 static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
2706 {
2707         int page_shift = (qpc->log_page_size & 0x3f) + 12;
2708         int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
2709         int log_sq_sride = qpc->sq_size_stride & 7;
2710         int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
2711         int log_rq_stride = qpc->rq_size_stride & 7;
2712         int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
2713         int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
2714         u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
2715         int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0;
2716         int sq_size;
2717         int rq_size;
2718         int total_pages;
2719         int total_mem;
2720         int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
2721
2722         sq_size = 1 << (log_sq_size + log_sq_sride + 4);
2723         rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
2724         total_mem = sq_size + rq_size;
2725         total_pages =
2726                 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
2727                                    page_shift);
2728
2729         return total_pages;
2730 }
2731
2732 static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
2733                            int size, struct res_mtt *mtt)
2734 {
2735         int res_start = mtt->com.res_id;
2736         int res_size = (1 << mtt->order);
2737
2738         if (start < res_start || start + size > res_start + res_size)
2739                 return -EPERM;
2740         return 0;
2741 }
2742
2743 int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2744                            struct mlx4_vhcr *vhcr,
2745                            struct mlx4_cmd_mailbox *inbox,
2746                            struct mlx4_cmd_mailbox *outbox,
2747                            struct mlx4_cmd_info *cmd)
2748 {
2749         int err;
2750         int index = vhcr->in_modifier;
2751         struct res_mtt *mtt;
2752         struct res_mpt *mpt;
2753         int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
2754         int phys;
2755         int id;
2756         u32 pd;
2757         int pd_slave;
2758
2759         id = index & mpt_mask(dev);
2760         err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
2761         if (err)
2762                 return err;
2763
2764         /* Disable memory windows for VFs. */
2765         if (!mr_is_region(inbox->buf)) {
2766                 err = -EPERM;
2767                 goto ex_abort;
2768         }
2769
2770         /* Make sure that the PD bits related to the slave id are zeros. */
2771         pd = mr_get_pd(inbox->buf);
2772         pd_slave = (pd >> 17) & 0x7f;
2773         if (pd_slave != 0 && --pd_slave != slave) {
2774                 err = -EPERM;
2775                 goto ex_abort;
2776         }
2777
2778         if (mr_is_fmr(inbox->buf)) {
2779                 /* FMR and Bind Enable are forbidden in slave devices. */
2780                 if (mr_is_bind_enabled(inbox->buf)) {
2781                         err = -EPERM;
2782                         goto ex_abort;
2783                 }
2784                 /* FMR and Memory Windows are also forbidden. */
2785                 if (!mr_is_region(inbox->buf)) {
2786                         err = -EPERM;
2787                         goto ex_abort;
2788                 }
2789         }
2790
2791         phys = mr_phys_mpt(inbox->buf);
2792         if (!phys) {
2793                 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2794                 if (err)
2795                         goto ex_abort;
2796
2797                 err = check_mtt_range(dev, slave, mtt_base,
2798                                       mr_get_mtt_size(inbox->buf), mtt);
2799                 if (err)
2800                         goto ex_put;
2801
2802                 mpt->mtt = mtt;
2803         }
2804
2805         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2806         if (err)
2807                 goto ex_put;
2808
2809         if (!phys) {
2810                 atomic_inc(&mtt->ref_count);
2811                 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2812         }
2813
2814         res_end_move(dev, slave, RES_MPT, id);
2815         return 0;
2816
2817 ex_put:
2818         if (!phys)
2819                 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2820 ex_abort:
2821         res_abort_move(dev, slave, RES_MPT, id);
2822
2823         return err;
2824 }
2825
2826 int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2827                            struct mlx4_vhcr *vhcr,
2828                            struct mlx4_cmd_mailbox *inbox,
2829                            struct mlx4_cmd_mailbox *outbox,
2830                            struct mlx4_cmd_info *cmd)
2831 {
2832         int err;
2833         int index = vhcr->in_modifier;
2834         struct res_mpt *mpt;
2835         int id;
2836
2837         id = index & mpt_mask(dev);
2838         err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
2839         if (err)
2840                 return err;
2841
2842         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2843         if (err)
2844                 goto ex_abort;
2845
2846         if (mpt->mtt)
2847                 atomic_dec(&mpt->mtt->ref_count);
2848
2849         res_end_move(dev, slave, RES_MPT, id);
2850         return 0;
2851
2852 ex_abort:
2853         res_abort_move(dev, slave, RES_MPT, id);
2854
2855         return err;
2856 }
2857
2858 int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2859                            struct mlx4_vhcr *vhcr,
2860                            struct mlx4_cmd_mailbox *inbox,
2861                            struct mlx4_cmd_mailbox *outbox,
2862                            struct mlx4_cmd_info *cmd)
2863 {
2864         int err;
2865         int index = vhcr->in_modifier;
2866         struct res_mpt *mpt;
2867         int id;
2868
2869         id = index & mpt_mask(dev);
2870         err = get_res(dev, slave, id, RES_MPT, &mpt);
2871         if (err)
2872                 return err;
2873
2874         if (mpt->com.from_state == RES_MPT_MAPPED) {
2875                 /* In order to allow rereg in SRIOV, we need to alter the MPT entry. To do
2876                  * that, the VF must read the MPT. But since the MPT entry memory is not
2877                  * in the VF's virtual memory space, it must use QUERY_MPT to obtain the
2878                  * entry contents. To guarantee that the MPT cannot be changed, the driver
2879                  * must perform HW2SW_MPT before this query and return the MPT entry to HW
2880                  * ownership fofollowing the change. The change here allows the VF to
2881                  * perform QUERY_MPT also when the entry is in SW ownership.
2882                  */
2883                 struct mlx4_mpt_entry *mpt_entry = mlx4_table_find(
2884                                         &mlx4_priv(dev)->mr_table.dmpt_table,
2885                                         mpt->key, NULL);
2886
2887                 if (NULL == mpt_entry || NULL == outbox->buf) {
2888                         err = -EINVAL;
2889                         goto out;
2890                 }
2891
2892                 memcpy(outbox->buf, mpt_entry, sizeof(*mpt_entry));
2893
2894                 err = 0;
2895         } else if (mpt->com.from_state == RES_MPT_HW) {
2896                 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2897         } else {
2898                 err = -EBUSY;
2899                 goto out;
2900         }
2901
2902
2903 out:
2904         put_res(dev, slave, id, RES_MPT);
2905         return err;
2906 }
2907
2908 static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2909 {
2910         return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2911 }
2912
2913 static int qp_get_scqn(struct mlx4_qp_context *qpc)
2914 {
2915         return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2916 }
2917
2918 static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2919 {
2920         return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2921 }
2922
2923 static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2924                                   struct mlx4_qp_context *context)
2925 {
2926         u32 qpn = vhcr->in_modifier & 0xffffff;
2927         u32 qkey = 0;
2928
2929         if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2930                 return;
2931
2932         /* adjust qkey in qp context */
2933         context->qkey = cpu_to_be32(qkey);
2934 }
2935
2936 static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
2937                                  struct mlx4_qp_context *qpc,
2938                                  struct mlx4_cmd_mailbox *inbox);
2939
2940 int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2941                              struct mlx4_vhcr *vhcr,
2942                              struct mlx4_cmd_mailbox *inbox,
2943                              struct mlx4_cmd_mailbox *outbox,
2944                              struct mlx4_cmd_info *cmd)
2945 {
2946         int err;
2947         int qpn = vhcr->in_modifier & 0x7fffff;
2948         struct res_mtt *mtt;
2949         struct res_qp *qp;
2950         struct mlx4_qp_context *qpc = inbox->buf + 8;
2951         int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
2952         int mtt_size = qp_get_mtt_size(qpc);
2953         struct res_cq *rcq;
2954         struct res_cq *scq;
2955         int rcqn = qp_get_rcqn(qpc);
2956         int scqn = qp_get_scqn(qpc);
2957         u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2958         int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2959         struct res_srq *srq;
2960         int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
2961
2962         err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
2963         if (err)
2964                 return err;
2965
2966         err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2967         if (err)
2968                 return err;
2969         qp->local_qpn = local_qpn;
2970         qp->sched_queue = 0;
2971         qp->param3 = 0;
2972         qp->vlan_control = 0;
2973         qp->fvl_rx = 0;
2974         qp->pri_path_fl = 0;
2975         qp->vlan_index = 0;
2976         qp->feup = 0;
2977         qp->qpc_flags = be32_to_cpu(qpc->flags);
2978
2979         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
2980         if (err)
2981                 goto ex_abort;
2982
2983         err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2984         if (err)
2985                 goto ex_put_mtt;
2986
2987         err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2988         if (err)
2989                 goto ex_put_mtt;
2990
2991         if (scqn != rcqn) {
2992                 err = get_res(dev, slave, scqn, RES_CQ, &scq);
2993                 if (err)
2994                         goto ex_put_rcq;
2995         } else
2996                 scq = rcq;
2997
2998         if (use_srq) {
2999                 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3000                 if (err)
3001                         goto ex_put_scq;
3002         }
3003
3004         adjust_proxy_tun_qkey(dev, vhcr, qpc);
3005         update_pkey_index(dev, slave, inbox);
3006         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3007         if (err)
3008                 goto ex_put_srq;
3009         atomic_inc(&mtt->ref_count);
3010         qp->mtt = mtt;
3011         atomic_inc(&rcq->ref_count);
3012         qp->rcq = rcq;
3013         atomic_inc(&scq->ref_count);
3014         qp->scq = scq;
3015
3016         if (scqn != rcqn)
3017                 put_res(dev, slave, scqn, RES_CQ);
3018
3019         if (use_srq) {
3020                 atomic_inc(&srq->ref_count);
3021                 put_res(dev, slave, srqn, RES_SRQ);
3022                 qp->srq = srq;
3023         }
3024
3025         /* Save param3 for dynamic changes from VST back to VGT */
3026         qp->param3 = qpc->param3;
3027         put_res(dev, slave, rcqn, RES_CQ);
3028         put_res(dev, slave, mtt_base, RES_MTT);
3029         res_end_move(dev, slave, RES_QP, qpn);
3030
3031         return 0;
3032
3033 ex_put_srq:
3034         if (use_srq)
3035                 put_res(dev, slave, srqn, RES_SRQ);
3036 ex_put_scq:
3037         if (scqn != rcqn)
3038                 put_res(dev, slave, scqn, RES_CQ);
3039 ex_put_rcq:
3040         put_res(dev, slave, rcqn, RES_CQ);
3041 ex_put_mtt:
3042         put_res(dev, slave, mtt_base, RES_MTT);
3043 ex_abort:
3044         res_abort_move(dev, slave, RES_QP, qpn);
3045
3046         return err;
3047 }
3048
3049 static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
3050 {
3051         return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
3052 }
3053
3054 static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
3055 {
3056         int log_eq_size = eqc->log_eq_size & 0x1f;
3057         int page_shift = (eqc->log_page_size & 0x3f) + 12;
3058
3059         if (log_eq_size + 5 < page_shift)
3060                 return 1;
3061
3062         return 1 << (log_eq_size + 5 - page_shift);
3063 }
3064
3065 static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
3066 {
3067         return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
3068 }
3069
3070 static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
3071 {
3072         int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
3073         int page_shift = (cqc->log_page_size & 0x3f) + 12;
3074
3075         if (log_cq_size + 5 < page_shift)
3076                 return 1;
3077
3078         return 1 << (log_cq_size + 5 - page_shift);
3079 }
3080
3081 int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
3082                           struct mlx4_vhcr *vhcr,
3083                           struct mlx4_cmd_mailbox *inbox,
3084                           struct mlx4_cmd_mailbox *outbox,
3085                           struct mlx4_cmd_info *cmd)
3086 {
3087         int err;
3088         int eqn = vhcr->in_modifier;
3089         int res_id = (slave << 10) | eqn;
3090         struct mlx4_eq_context *eqc = inbox->buf;
3091         int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
3092         int mtt_size = eq_get_mtt_size(eqc);
3093         struct res_eq *eq;
3094         struct res_mtt *mtt;
3095
3096         err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3097         if (err)
3098                 return err;
3099         err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
3100         if (err)
3101                 goto out_add;
3102
3103         err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
3104         if (err)
3105                 goto out_move;
3106
3107         err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
3108         if (err)
3109                 goto out_put;
3110
3111         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3112         if (err)
3113                 goto out_put;
3114
3115         atomic_inc(&mtt->ref_count);
3116         eq->mtt = mtt;
3117         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3118         res_end_move(dev, slave, RES_EQ, res_id);
3119         return 0;
3120
3121 out_put:
3122         put_res(dev, slave, mtt->com.res_id, RES_MTT);
3123 out_move:
3124         res_abort_move(dev, slave, RES_EQ, res_id);
3125 out_add:
3126         rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3127         return err;
3128 }
3129
3130 int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev *dev, int slave,
3131                             struct mlx4_vhcr *vhcr,
3132                             struct mlx4_cmd_mailbox *inbox,
3133                             struct mlx4_cmd_mailbox *outbox,
3134                             struct mlx4_cmd_info *cmd)
3135 {
3136         int err;
3137         u8 get = vhcr->op_modifier;
3138
3139         if (get != 1)
3140                 return -EPERM;
3141
3142         err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3143
3144         return err;
3145 }
3146
3147 static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
3148                               int len, struct res_mtt **res)
3149 {
3150         struct mlx4_priv *priv = mlx4_priv(dev);
3151         struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3152         struct res_mtt *mtt;
3153         int err = -EINVAL;
3154
3155         spin_lock_irq(mlx4_tlock(dev));
3156         list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
3157                             com.list) {
3158                 if (!check_mtt_range(dev, slave, start, len, mtt)) {
3159                         *res = mtt;
3160                         mtt->com.from_state = mtt->com.state;
3161                         mtt->com.state = RES_MTT_BUSY;
3162                         err = 0;
3163                         break;
3164                 }
3165         }
3166         spin_unlock_irq(mlx4_tlock(dev));
3167
3168         return err;
3169 }
3170
3171 static int verify_qp_parameters(struct mlx4_dev *dev,
3172                                 struct mlx4_vhcr *vhcr,
3173                                 struct mlx4_cmd_mailbox *inbox,
3174                                 enum qp_transition transition, u8 slave)
3175 {
3176         u32                     qp_type;
3177         u32                     qpn;
3178         struct mlx4_qp_context  *qp_ctx;
3179         enum mlx4_qp_optpar     optpar;
3180         int port;
3181         int num_gids;
3182
3183         qp_ctx  = inbox->buf + 8;
3184         qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
3185         optpar  = be32_to_cpu(*(__be32 *) inbox->buf);
3186
3187         if (slave != mlx4_master_func_num(dev)) {
3188                 qp_ctx->params2 &= ~MLX4_QP_BIT_FPP;
3189                 /* setting QP rate-limit is disallowed for VFs */
3190                 if (qp_ctx->rate_limit_params)
3191                         return -EPERM;
3192         }
3193
3194         switch (qp_type) {
3195         case MLX4_QP_ST_RC:
3196         case MLX4_QP_ST_XRC:
3197         case MLX4_QP_ST_UC:
3198                 switch (transition) {
3199                 case QP_TRANS_INIT2RTR:
3200                 case QP_TRANS_RTR2RTS:
3201                 case QP_TRANS_RTS2RTS:
3202                 case QP_TRANS_SQD2SQD:
3203                 case QP_TRANS_SQD2RTS:
3204                         if (slave != mlx4_master_func_num(dev)) {
3205                                 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
3206                                         port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
3207                                         if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
3208                                                 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
3209                                         else
3210                                                 num_gids = 1;
3211                                         if (qp_ctx->pri_path.mgid_index >= num_gids)
3212                                                 return -EINVAL;
3213                                 }
3214                                 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3215                                         port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
3216                                         if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
3217                                                 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
3218                                         else
3219                                                 num_gids = 1;
3220                                         if (qp_ctx->alt_path.mgid_index >= num_gids)
3221                                                 return -EINVAL;
3222                                 }
3223                         }
3224                         break;
3225                 default:
3226                         break;
3227                 }
3228                 break;
3229
3230         case MLX4_QP_ST_MLX:
3231                 qpn = vhcr->in_modifier & 0x7fffff;
3232                 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
3233                 if (transition == QP_TRANS_INIT2RTR &&
3234                     slave != mlx4_master_func_num(dev) &&
3235                     mlx4_is_qp_reserved(dev, qpn) &&
3236                     !mlx4_vf_smi_enabled(dev, slave, port)) {
3237                         /* only enabled VFs may create MLX proxy QPs */
3238                         mlx4_err(dev, "%s: unprivileged slave %d attempting to create an MLX proxy special QP on port %d\n",
3239                                  __func__, slave, port);
3240                         return -EPERM;
3241                 }
3242                 break;
3243
3244         default:
3245                 break;
3246         }
3247
3248         return 0;
3249 }
3250
3251 int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
3252                            struct mlx4_vhcr *vhcr,
3253                            struct mlx4_cmd_mailbox *inbox,
3254                            struct mlx4_cmd_mailbox *outbox,
3255                            struct mlx4_cmd_info *cmd)
3256 {
3257         struct mlx4_mtt mtt;
3258         __be64 *page_list = inbox->buf;
3259         u64 *pg_list = (u64 *)page_list;
3260         int i;
3261         struct res_mtt *rmtt = NULL;
3262         int start = be64_to_cpu(page_list[0]);
3263         int npages = vhcr->in_modifier;
3264         int err;
3265
3266         err = get_containing_mtt(dev, slave, start, npages, &rmtt);
3267         if (err)
3268                 return err;
3269
3270         /* Call the SW implementation of write_mtt:
3271          * - Prepare a dummy mtt struct
3272          * - Translate inbox contents to simple addresses in host endianness */
3273         mtt.offset = 0;  /* TBD this is broken but I don't handle it since
3274                             we don't really use it */
3275         mtt.order = 0;
3276         mtt.page_shift = 0;
3277         for (i = 0; i < npages; ++i)
3278                 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
3279
3280         err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
3281                                ((u64 *)page_list + 2));
3282
3283         if (rmtt)
3284                 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
3285
3286         return err;
3287 }
3288
3289 int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
3290                           struct mlx4_vhcr *vhcr,
3291                           struct mlx4_cmd_mailbox *inbox,
3292                    &