be2net: adding support for Lancer family of CNAs
[sfrench/cifs-2.6.git] / drivers / net / benet / be_cmds.c
1 /*
2  * Copyright (C) 2005 - 2010 ServerEngines
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@serverengines.com
12  *
13  * ServerEngines
14  * 209 N. Fair Oaks Ave
15  * Sunnyvale, CA 94085
16  */
17
18 #include "be.h"
19 #include "be_cmds.h"
20
21 static void be_mcc_notify(struct be_adapter *adapter)
22 {
23         struct be_queue_info *mccq = &adapter->mcc_obj.q;
24         u32 val = 0;
25
26         val |= mccq->id & DB_MCCQ_RING_ID_MASK;
27         val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
28
29         wmb();
30         iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
31 }
32
33 /* To check if valid bit is set, check the entire word as we don't know
34  * the endianness of the data (old entry is host endian while a new entry is
35  * little endian) */
36 static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
37 {
38         if (compl->flags != 0) {
39                 compl->flags = le32_to_cpu(compl->flags);
40                 BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
41                 return true;
42         } else {
43                 return false;
44         }
45 }
46
47 /* Need to reset the entire word that houses the valid bit */
48 static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
49 {
50         compl->flags = 0;
51 }
52
53 static int be_mcc_compl_process(struct be_adapter *adapter,
54         struct be_mcc_compl *compl)
55 {
56         u16 compl_status, extd_status;
57
58         /* Just swap the status to host endian; mcc tag is opaquely copied
59          * from mcc_wrb */
60         be_dws_le_to_cpu(compl, 4);
61
62         compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
63                                 CQE_STATUS_COMPL_MASK;
64
65         if ((compl->tag0 == OPCODE_COMMON_WRITE_FLASHROM) &&
66                 (compl->tag1 == CMD_SUBSYSTEM_COMMON)) {
67                 adapter->flash_status = compl_status;
68                 complete(&adapter->flash_compl);
69         }
70
71         if (compl_status == MCC_STATUS_SUCCESS) {
72                 if (compl->tag0 == OPCODE_ETH_GET_STATISTICS) {
73                         struct be_cmd_resp_get_stats *resp =
74                                                 adapter->stats_cmd.va;
75                         be_dws_le_to_cpu(&resp->hw_stats,
76                                                 sizeof(resp->hw_stats));
77                         netdev_stats_update(adapter);
78                         adapter->stats_ioctl_sent = false;
79                 }
80         } else if ((compl_status != MCC_STATUS_NOT_SUPPORTED) &&
81                    (compl->tag0 != OPCODE_COMMON_NTWK_MAC_QUERY)) {
82                 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
83                                 CQE_STATUS_EXTD_MASK;
84                 dev_warn(&adapter->pdev->dev,
85                 "Error in cmd completion - opcode %d, compl %d, extd %d\n",
86                         compl->tag0, compl_status, extd_status);
87         }
88         return compl_status;
89 }
90
91 /* Link state evt is a string of bytes; no need for endian swapping */
92 static void be_async_link_state_process(struct be_adapter *adapter,
93                 struct be_async_event_link_state *evt)
94 {
95         be_link_status_update(adapter,
96                 evt->port_link_status == ASYNC_EVENT_LINK_UP);
97 }
98
99 /* Grp5 CoS Priority evt */
100 static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
101                 struct be_async_event_grp5_cos_priority *evt)
102 {
103         if (evt->valid) {
104                 adapter->vlan_prio_bmap = evt->available_priority_bmap;
105                 adapter->recommended_prio =
106                         evt->reco_default_priority << VLAN_PRIO_SHIFT;
107         }
108 }
109
110 /* Grp5 QOS Speed evt */
111 static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
112                 struct be_async_event_grp5_qos_link_speed *evt)
113 {
114         if (evt->physical_port == adapter->port_num) {
115                 /* qos_link_speed is in units of 10 Mbps */
116                 adapter->link_speed = evt->qos_link_speed * 10;
117         }
118 }
119
120 static void be_async_grp5_evt_process(struct be_adapter *adapter,
121                 u32 trailer, struct be_mcc_compl *evt)
122 {
123         u8 event_type = 0;
124
125         event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
126                 ASYNC_TRAILER_EVENT_TYPE_MASK;
127
128         switch (event_type) {
129         case ASYNC_EVENT_COS_PRIORITY:
130                 be_async_grp5_cos_priority_process(adapter,
131                 (struct be_async_event_grp5_cos_priority *)evt);
132         break;
133         case ASYNC_EVENT_QOS_SPEED:
134                 be_async_grp5_qos_speed_process(adapter,
135                 (struct be_async_event_grp5_qos_link_speed *)evt);
136         break;
137         default:
138                 dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n");
139                 break;
140         }
141 }
142
143 static inline bool is_link_state_evt(u32 trailer)
144 {
145         return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
146                 ASYNC_TRAILER_EVENT_CODE_MASK) ==
147                                 ASYNC_EVENT_CODE_LINK_STATE;
148 }
149
150 static inline bool is_grp5_evt(u32 trailer)
151 {
152         return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
153                 ASYNC_TRAILER_EVENT_CODE_MASK) ==
154                                 ASYNC_EVENT_CODE_GRP_5);
155 }
156
157 static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
158 {
159         struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
160         struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
161
162         if (be_mcc_compl_is_new(compl)) {
163                 queue_tail_inc(mcc_cq);
164                 return compl;
165         }
166         return NULL;
167 }
168
169 void be_async_mcc_enable(struct be_adapter *adapter)
170 {
171         spin_lock_bh(&adapter->mcc_cq_lock);
172
173         be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
174         adapter->mcc_obj.rearm_cq = true;
175
176         spin_unlock_bh(&adapter->mcc_cq_lock);
177 }
178
179 void be_async_mcc_disable(struct be_adapter *adapter)
180 {
181         adapter->mcc_obj.rearm_cq = false;
182 }
183
184 int be_process_mcc(struct be_adapter *adapter, int *status)
185 {
186         struct be_mcc_compl *compl;
187         int num = 0;
188         struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
189
190         spin_lock_bh(&adapter->mcc_cq_lock);
191         while ((compl = be_mcc_compl_get(adapter))) {
192                 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
193                         /* Interpret flags as an async trailer */
194                         if (is_link_state_evt(compl->flags))
195                                 be_async_link_state_process(adapter,
196                                 (struct be_async_event_link_state *) compl);
197                         else if (is_grp5_evt(compl->flags))
198                                 be_async_grp5_evt_process(adapter,
199                                 compl->flags, compl);
200                 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
201                                 *status = be_mcc_compl_process(adapter, compl);
202                                 atomic_dec(&mcc_obj->q.used);
203                 }
204                 be_mcc_compl_use(compl);
205                 num++;
206         }
207
208         spin_unlock_bh(&adapter->mcc_cq_lock);
209         return num;
210 }
211
212 /* Wait till no more pending mcc requests are present */
213 static int be_mcc_wait_compl(struct be_adapter *adapter)
214 {
215 #define mcc_timeout             120000 /* 12s timeout */
216         int i, num, status = 0;
217         struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
218
219         for (i = 0; i < mcc_timeout; i++) {
220                 num = be_process_mcc(adapter, &status);
221                 if (num)
222                         be_cq_notify(adapter, mcc_obj->cq.id,
223                                 mcc_obj->rearm_cq, num);
224
225                 if (atomic_read(&mcc_obj->q.used) == 0)
226                         break;
227                 udelay(100);
228         }
229         if (i == mcc_timeout) {
230                 dev_err(&adapter->pdev->dev, "mccq poll timed out\n");
231                 return -1;
232         }
233         return status;
234 }
235
236 /* Notify MCC requests and wait for completion */
237 static int be_mcc_notify_wait(struct be_adapter *adapter)
238 {
239         be_mcc_notify(adapter);
240         return be_mcc_wait_compl(adapter);
241 }
242
243 static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
244 {
245         int msecs = 0;
246         u32 ready;
247
248         do {
249                 ready = ioread32(db);
250                 if (ready == 0xffffffff) {
251                         dev_err(&adapter->pdev->dev,
252                                 "pci slot disconnected\n");
253                         return -1;
254                 }
255
256                 ready &= MPU_MAILBOX_DB_RDY_MASK;
257                 if (ready)
258                         break;
259
260                 if (msecs > 4000) {
261                         dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
262                         be_detect_dump_ue(adapter);
263                         return -1;
264                 }
265
266                 set_current_state(TASK_INTERRUPTIBLE);
267                 schedule_timeout(msecs_to_jiffies(1));
268                 msecs++;
269         } while (true);
270
271         return 0;
272 }
273
274 /*
275  * Insert the mailbox address into the doorbell in two steps
276  * Polls on the mbox doorbell till a command completion (or a timeout) occurs
277  */
278 static int be_mbox_notify_wait(struct be_adapter *adapter)
279 {
280         int status;
281         u32 val = 0;
282         void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
283         struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
284         struct be_mcc_mailbox *mbox = mbox_mem->va;
285         struct be_mcc_compl *compl = &mbox->compl;
286
287         /* wait for ready to be set */
288         status = be_mbox_db_ready_wait(adapter, db);
289         if (status != 0)
290                 return status;
291
292         val |= MPU_MAILBOX_DB_HI_MASK;
293         /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
294         val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
295         iowrite32(val, db);
296
297         /* wait for ready to be set */
298         status = be_mbox_db_ready_wait(adapter, db);
299         if (status != 0)
300                 return status;
301
302         val = 0;
303         /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
304         val |= (u32)(mbox_mem->dma >> 4) << 2;
305         iowrite32(val, db);
306
307         status = be_mbox_db_ready_wait(adapter, db);
308         if (status != 0)
309                 return status;
310
311         /* A cq entry has been made now */
312         if (be_mcc_compl_is_new(compl)) {
313                 status = be_mcc_compl_process(adapter, &mbox->compl);
314                 be_mcc_compl_use(compl);
315                 if (status)
316                         return status;
317         } else {
318                 dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
319                 return -1;
320         }
321         return 0;
322 }
323
324 static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
325 {
326         u32 sem;
327
328         if (lancer_chip(adapter))
329                 sem  = ioread32(adapter->db + MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET);
330         else
331                 sem  = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
332
333         *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
334         if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
335                 return -1;
336         else
337                 return 0;
338 }
339
340 int be_cmd_POST(struct be_adapter *adapter)
341 {
342         u16 stage;
343         int status, timeout = 0;
344
345         do {
346                 status = be_POST_stage_get(adapter, &stage);
347                 if (status) {
348                         dev_err(&adapter->pdev->dev, "POST error; stage=0x%x\n",
349                                 stage);
350                         return -1;
351                 } else if (stage != POST_STAGE_ARMFW_RDY) {
352                         set_current_state(TASK_INTERRUPTIBLE);
353                         schedule_timeout(2 * HZ);
354                         timeout += 2;
355                 } else {
356                         return 0;
357                 }
358         } while (timeout < 40);
359
360         dev_err(&adapter->pdev->dev, "POST timeout; stage=0x%x\n", stage);
361         return -1;
362 }
363
364 static inline void *embedded_payload(struct be_mcc_wrb *wrb)
365 {
366         return wrb->payload.embedded_payload;
367 }
368
369 static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
370 {
371         return &wrb->payload.sgl[0];
372 }
373
374 /* Don't touch the hdr after it's prepared */
375 static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
376                                 bool embedded, u8 sge_cnt, u32 opcode)
377 {
378         if (embedded)
379                 wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
380         else
381                 wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
382                                 MCC_WRB_SGE_CNT_SHIFT;
383         wrb->payload_length = payload_len;
384         wrb->tag0 = opcode;
385         be_dws_cpu_to_le(wrb, 8);
386 }
387
388 /* Don't touch the hdr after it's prepared */
389 static void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
390                                 u8 subsystem, u8 opcode, int cmd_len)
391 {
392         req_hdr->opcode = opcode;
393         req_hdr->subsystem = subsystem;
394         req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
395         req_hdr->version = 0;
396 }
397
398 static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
399                         struct be_dma_mem *mem)
400 {
401         int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
402         u64 dma = (u64)mem->dma;
403
404         for (i = 0; i < buf_pages; i++) {
405                 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
406                 pages[i].hi = cpu_to_le32(upper_32_bits(dma));
407                 dma += PAGE_SIZE_4K;
408         }
409 }
410
411 /* Converts interrupt delay in microseconds to multiplier value */
412 static u32 eq_delay_to_mult(u32 usec_delay)
413 {
414 #define MAX_INTR_RATE                   651042
415         const u32 round = 10;
416         u32 multiplier;
417
418         if (usec_delay == 0)
419                 multiplier = 0;
420         else {
421                 u32 interrupt_rate = 1000000 / usec_delay;
422                 /* Max delay, corresponding to the lowest interrupt rate */
423                 if (interrupt_rate == 0)
424                         multiplier = 1023;
425                 else {
426                         multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
427                         multiplier /= interrupt_rate;
428                         /* Round the multiplier to the closest value.*/
429                         multiplier = (multiplier + round/2) / round;
430                         multiplier = min(multiplier, (u32)1023);
431                 }
432         }
433         return multiplier;
434 }
435
436 static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
437 {
438         struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
439         struct be_mcc_wrb *wrb
440                 = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
441         memset(wrb, 0, sizeof(*wrb));
442         return wrb;
443 }
444
445 static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
446 {
447         struct be_queue_info *mccq = &adapter->mcc_obj.q;
448         struct be_mcc_wrb *wrb;
449
450         if (atomic_read(&mccq->used) >= mccq->len) {
451                 dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n");
452                 return NULL;
453         }
454
455         wrb = queue_head_node(mccq);
456         queue_head_inc(mccq);
457         atomic_inc(&mccq->used);
458         memset(wrb, 0, sizeof(*wrb));
459         return wrb;
460 }
461
462 /* Tell fw we're about to start firing cmds by writing a
463  * special pattern across the wrb hdr; uses mbox
464  */
465 int be_cmd_fw_init(struct be_adapter *adapter)
466 {
467         u8 *wrb;
468         int status;
469
470         spin_lock(&adapter->mbox_lock);
471
472         wrb = (u8 *)wrb_from_mbox(adapter);
473         if (lancer_chip(adapter)) {
474                 *wrb++ = 0xFF;
475                 *wrb++ = 0x34;
476                 *wrb++ = 0x12;
477                 *wrb++ = 0xFF;
478                 *wrb++ = 0xFF;
479                 *wrb++ = 0x78;
480                 *wrb++ = 0x56;
481                 *wrb = 0xFF;
482         } else {
483                 *wrb++ = 0xFF;
484                 *wrb++ = 0x12;
485                 *wrb++ = 0x34;
486                 *wrb++ = 0xFF;
487                 *wrb++ = 0xFF;
488                 *wrb++ = 0x56;
489                 *wrb++ = 0x78;
490                 *wrb = 0xFF;
491         }
492
493         status = be_mbox_notify_wait(adapter);
494
495         spin_unlock(&adapter->mbox_lock);
496         return status;
497 }
498
499 /* Tell fw we're done with firing cmds by writing a
500  * special pattern across the wrb hdr; uses mbox
501  */
502 int be_cmd_fw_clean(struct be_adapter *adapter)
503 {
504         u8 *wrb;
505         int status;
506
507         if (adapter->eeh_err)
508                 return -EIO;
509
510         spin_lock(&adapter->mbox_lock);
511
512         wrb = (u8 *)wrb_from_mbox(adapter);
513         *wrb++ = 0xFF;
514         *wrb++ = 0xAA;
515         *wrb++ = 0xBB;
516         *wrb++ = 0xFF;
517         *wrb++ = 0xFF;
518         *wrb++ = 0xCC;
519         *wrb++ = 0xDD;
520         *wrb = 0xFF;
521
522         status = be_mbox_notify_wait(adapter);
523
524         spin_unlock(&adapter->mbox_lock);
525         return status;
526 }
527 int be_cmd_eq_create(struct be_adapter *adapter,
528                 struct be_queue_info *eq, int eq_delay)
529 {
530         struct be_mcc_wrb *wrb;
531         struct be_cmd_req_eq_create *req;
532         struct be_dma_mem *q_mem = &eq->dma_mem;
533         int status;
534
535         spin_lock(&adapter->mbox_lock);
536
537         wrb = wrb_from_mbox(adapter);
538         req = embedded_payload(wrb);
539
540         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_COMMON_EQ_CREATE);
541
542         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
543                 OPCODE_COMMON_EQ_CREATE, sizeof(*req));
544
545         req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
546
547         AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
548         /* 4byte eqe*/
549         AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
550         AMAP_SET_BITS(struct amap_eq_context, count, req->context,
551                         __ilog2_u32(eq->len/256));
552         AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
553                         eq_delay_to_mult(eq_delay));
554         be_dws_cpu_to_le(req->context, sizeof(req->context));
555
556         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
557
558         status = be_mbox_notify_wait(adapter);
559         if (!status) {
560                 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
561                 eq->id = le16_to_cpu(resp->eq_id);
562                 eq->created = true;
563         }
564
565         spin_unlock(&adapter->mbox_lock);
566         return status;
567 }
568
569 /* Uses mbox */
570 int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
571                         u8 type, bool permanent, u32 if_handle)
572 {
573         struct be_mcc_wrb *wrb;
574         struct be_cmd_req_mac_query *req;
575         int status;
576
577         spin_lock(&adapter->mbox_lock);
578
579         wrb = wrb_from_mbox(adapter);
580         req = embedded_payload(wrb);
581
582         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
583                         OPCODE_COMMON_NTWK_MAC_QUERY);
584
585         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
586                 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req));
587
588         req->type = type;
589         if (permanent) {
590                 req->permanent = 1;
591         } else {
592                 req->if_id = cpu_to_le16((u16) if_handle);
593                 req->permanent = 0;
594         }
595
596         status = be_mbox_notify_wait(adapter);
597         if (!status) {
598                 struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
599                 memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
600         }
601
602         spin_unlock(&adapter->mbox_lock);
603         return status;
604 }
605
606 /* Uses synchronous MCCQ */
607 int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
608                 u32 if_id, u32 *pmac_id)
609 {
610         struct be_mcc_wrb *wrb;
611         struct be_cmd_req_pmac_add *req;
612         int status;
613
614         spin_lock_bh(&adapter->mcc_lock);
615
616         wrb = wrb_from_mccq(adapter);
617         if (!wrb) {
618                 status = -EBUSY;
619                 goto err;
620         }
621         req = embedded_payload(wrb);
622
623         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
624                         OPCODE_COMMON_NTWK_PMAC_ADD);
625
626         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
627                 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req));
628
629         req->if_id = cpu_to_le32(if_id);
630         memcpy(req->mac_address, mac_addr, ETH_ALEN);
631
632         status = be_mcc_notify_wait(adapter);
633         if (!status) {
634                 struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
635                 *pmac_id = le32_to_cpu(resp->pmac_id);
636         }
637
638 err:
639         spin_unlock_bh(&adapter->mcc_lock);
640         return status;
641 }
642
643 /* Uses synchronous MCCQ */
644 int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id)
645 {
646         struct be_mcc_wrb *wrb;
647         struct be_cmd_req_pmac_del *req;
648         int status;
649
650         spin_lock_bh(&adapter->mcc_lock);
651
652         wrb = wrb_from_mccq(adapter);
653         if (!wrb) {
654                 status = -EBUSY;
655                 goto err;
656         }
657         req = embedded_payload(wrb);
658
659         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
660                         OPCODE_COMMON_NTWK_PMAC_DEL);
661
662         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
663                 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req));
664
665         req->if_id = cpu_to_le32(if_id);
666         req->pmac_id = cpu_to_le32(pmac_id);
667
668         status = be_mcc_notify_wait(adapter);
669
670 err:
671         spin_unlock_bh(&adapter->mcc_lock);
672         return status;
673 }
674
675 /* Uses Mbox */
676 int be_cmd_cq_create(struct be_adapter *adapter,
677                 struct be_queue_info *cq, struct be_queue_info *eq,
678                 bool sol_evts, bool no_delay, int coalesce_wm)
679 {
680         struct be_mcc_wrb *wrb;
681         struct be_cmd_req_cq_create *req;
682         struct be_dma_mem *q_mem = &cq->dma_mem;
683         void *ctxt;
684         int status;
685
686         spin_lock(&adapter->mbox_lock);
687
688         wrb = wrb_from_mbox(adapter);
689         req = embedded_payload(wrb);
690         ctxt = &req->context;
691
692         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
693                         OPCODE_COMMON_CQ_CREATE);
694
695         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
696                 OPCODE_COMMON_CQ_CREATE, sizeof(*req));
697
698         req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
699         if (lancer_chip(adapter)) {
700                 req->hdr.version = 1;
701                 req->page_size = 1; /* 1 for 4K */
702                 AMAP_SET_BITS(struct amap_cq_context_lancer, coalescwm, ctxt,
703                                                                 coalesce_wm);
704                 AMAP_SET_BITS(struct amap_cq_context_lancer, nodelay, ctxt,
705                                                                 no_delay);
706                 AMAP_SET_BITS(struct amap_cq_context_lancer, count, ctxt,
707                                                 __ilog2_u32(cq->len/256));
708                 AMAP_SET_BITS(struct amap_cq_context_lancer, valid, ctxt, 1);
709                 AMAP_SET_BITS(struct amap_cq_context_lancer, eventable,
710                                                                 ctxt, 1);
711                 AMAP_SET_BITS(struct amap_cq_context_lancer, eqid,
712                                                                 ctxt, eq->id);
713                 AMAP_SET_BITS(struct amap_cq_context_lancer, armed, ctxt, 1);
714         } else {
715                 AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
716                                                                 coalesce_wm);
717                 AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
718                                                                 ctxt, no_delay);
719                 AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
720                                                 __ilog2_u32(cq->len/256));
721                 AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
722                 AMAP_SET_BITS(struct amap_cq_context_be, solevent,
723                                                                 ctxt, sol_evts);
724                 AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
725                 AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
726                 AMAP_SET_BITS(struct amap_cq_context_be, armed, ctxt, 1);
727         }
728
729         be_dws_cpu_to_le(ctxt, sizeof(req->context));
730
731         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
732
733         status = be_mbox_notify_wait(adapter);
734         if (!status) {
735                 struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
736                 cq->id = le16_to_cpu(resp->cq_id);
737                 cq->created = true;
738         }
739
740         spin_unlock(&adapter->mbox_lock);
741
742         return status;
743 }
744
745 static u32 be_encoded_q_len(int q_len)
746 {
747         u32 len_encoded = fls(q_len); /* log2(len) + 1 */
748         if (len_encoded == 16)
749                 len_encoded = 0;
750         return len_encoded;
751 }
752
753 int be_cmd_mccq_create(struct be_adapter *adapter,
754                         struct be_queue_info *mccq,
755                         struct be_queue_info *cq)
756 {
757         struct be_mcc_wrb *wrb;
758         struct be_cmd_req_mcc_create *req;
759         struct be_dma_mem *q_mem = &mccq->dma_mem;
760         void *ctxt;
761         int status;
762
763         spin_lock(&adapter->mbox_lock);
764
765         wrb = wrb_from_mbox(adapter);
766         req = embedded_payload(wrb);
767         ctxt = &req->context;
768
769         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
770                         OPCODE_COMMON_MCC_CREATE_EXT);
771
772         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
773                         OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req));
774
775         req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
776         if (lancer_chip(adapter)) {
777                 req->hdr.version = 1;
778                 req->cq_id = cpu_to_le16(cq->id);
779
780                 AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt,
781                                                 be_encoded_q_len(mccq->len));
782                 AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1);
783                 AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id,
784                                                                 ctxt, cq->id);
785                 AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid,
786                                                                  ctxt, 1);
787
788         } else {
789                 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
790                 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
791                                                 be_encoded_q_len(mccq->len));
792                 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
793         }
794
795         /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
796         req->async_event_bitmap[0] = cpu_to_le32(0x00000022);
797         be_dws_cpu_to_le(ctxt, sizeof(req->context));
798
799         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
800
801         status = be_mbox_notify_wait(adapter);
802         if (!status) {
803                 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
804                 mccq->id = le16_to_cpu(resp->id);
805                 mccq->created = true;
806         }
807         spin_unlock(&adapter->mbox_lock);
808
809         return status;
810 }
811
812 int be_cmd_txq_create(struct be_adapter *adapter,
813                         struct be_queue_info *txq,
814                         struct be_queue_info *cq)
815 {
816         struct be_mcc_wrb *wrb;
817         struct be_cmd_req_eth_tx_create *req;
818         struct be_dma_mem *q_mem = &txq->dma_mem;
819         void *ctxt;
820         int status;
821
822         spin_lock(&adapter->mbox_lock);
823
824         wrb = wrb_from_mbox(adapter);
825         req = embedded_payload(wrb);
826         ctxt = &req->context;
827
828         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
829                         OPCODE_ETH_TX_CREATE);
830
831         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE,
832                 sizeof(*req));
833
834         req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
835         req->ulp_num = BE_ULP1_NUM;
836         req->type = BE_ETH_TX_RING_TYPE_STANDARD;
837
838         AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt,
839                 be_encoded_q_len(txq->len));
840         AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1);
841         AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id);
842
843         be_dws_cpu_to_le(ctxt, sizeof(req->context));
844
845         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
846
847         status = be_mbox_notify_wait(adapter);
848         if (!status) {
849                 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
850                 txq->id = le16_to_cpu(resp->cid);
851                 txq->created = true;
852         }
853
854         spin_unlock(&adapter->mbox_lock);
855
856         return status;
857 }
858
859 /* Uses mbox */
860 int be_cmd_rxq_create(struct be_adapter *adapter,
861                 struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
862                 u16 max_frame_size, u32 if_id, u32 rss, u8 *rss_id)
863 {
864         struct be_mcc_wrb *wrb;
865         struct be_cmd_req_eth_rx_create *req;
866         struct be_dma_mem *q_mem = &rxq->dma_mem;
867         int status;
868
869         spin_lock(&adapter->mbox_lock);
870
871         wrb = wrb_from_mbox(adapter);
872         req = embedded_payload(wrb);
873
874         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
875                         OPCODE_ETH_RX_CREATE);
876
877         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_CREATE,
878                 sizeof(*req));
879
880         req->cq_id = cpu_to_le16(cq_id);
881         req->frag_size = fls(frag_size) - 1;
882         req->num_pages = 2;
883         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
884         req->interface_id = cpu_to_le32(if_id);
885         req->max_frame_size = cpu_to_le16(max_frame_size);
886         req->rss_queue = cpu_to_le32(rss);
887
888         status = be_mbox_notify_wait(adapter);
889         if (!status) {
890                 struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
891                 rxq->id = le16_to_cpu(resp->id);
892                 rxq->created = true;
893                 *rss_id = resp->rss_id;
894         }
895
896         spin_unlock(&adapter->mbox_lock);
897
898         return status;
899 }
900
901 /* Generic destroyer function for all types of queues
902  * Uses Mbox
903  */
904 int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
905                 int queue_type)
906 {
907         struct be_mcc_wrb *wrb;
908         struct be_cmd_req_q_destroy *req;
909         u8 subsys = 0, opcode = 0;
910         int status;
911
912         if (adapter->eeh_err)
913                 return -EIO;
914
915         spin_lock(&adapter->mbox_lock);
916
917         wrb = wrb_from_mbox(adapter);
918         req = embedded_payload(wrb);
919
920         switch (queue_type) {
921         case QTYPE_EQ:
922                 subsys = CMD_SUBSYSTEM_COMMON;
923                 opcode = OPCODE_COMMON_EQ_DESTROY;
924                 break;
925         case QTYPE_CQ:
926                 subsys = CMD_SUBSYSTEM_COMMON;
927                 opcode = OPCODE_COMMON_CQ_DESTROY;
928                 break;
929         case QTYPE_TXQ:
930                 subsys = CMD_SUBSYSTEM_ETH;
931                 opcode = OPCODE_ETH_TX_DESTROY;
932                 break;
933         case QTYPE_RXQ:
934                 subsys = CMD_SUBSYSTEM_ETH;
935                 opcode = OPCODE_ETH_RX_DESTROY;
936                 break;
937         case QTYPE_MCCQ:
938                 subsys = CMD_SUBSYSTEM_COMMON;
939                 opcode = OPCODE_COMMON_MCC_DESTROY;
940                 break;
941         default:
942                 BUG();
943         }
944
945         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, opcode);
946
947         be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
948         req->id = cpu_to_le16(q->id);
949
950         status = be_mbox_notify_wait(adapter);
951
952         spin_unlock(&adapter->mbox_lock);
953
954         return status;
955 }
956
957 /* Create an rx filtering policy configuration on an i/f
958  * Uses mbox
959  */
960 int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
961                 u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id,
962                 u32 domain)
963 {
964         struct be_mcc_wrb *wrb;
965         struct be_cmd_req_if_create *req;
966         int status;
967
968         spin_lock(&adapter->mbox_lock);
969
970         wrb = wrb_from_mbox(adapter);
971         req = embedded_payload(wrb);
972
973         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
974                         OPCODE_COMMON_NTWK_INTERFACE_CREATE);
975
976         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
977                 OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req));
978
979         req->hdr.domain = domain;
980         req->capability_flags = cpu_to_le32(cap_flags);
981         req->enable_flags = cpu_to_le32(en_flags);
982         req->pmac_invalid = pmac_invalid;
983         if (!pmac_invalid)
984                 memcpy(req->mac_addr, mac, ETH_ALEN);
985
986         status = be_mbox_notify_wait(adapter);
987         if (!status) {
988                 struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
989                 *if_handle = le32_to_cpu(resp->interface_id);
990                 if (!pmac_invalid)
991                         *pmac_id = le32_to_cpu(resp->pmac_id);
992         }
993
994         spin_unlock(&adapter->mbox_lock);
995         return status;
996 }
997
998 /* Uses mbox */
999 int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id)
1000 {
1001         struct be_mcc_wrb *wrb;
1002         struct be_cmd_req_if_destroy *req;
1003         int status;
1004
1005         if (adapter->eeh_err)
1006                 return -EIO;
1007
1008         spin_lock(&adapter->mbox_lock);
1009
1010         wrb = wrb_from_mbox(adapter);
1011         req = embedded_payload(wrb);
1012
1013         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1014                         OPCODE_COMMON_NTWK_INTERFACE_DESTROY);
1015
1016         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1017                 OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req));
1018
1019         req->interface_id = cpu_to_le32(interface_id);
1020
1021         status = be_mbox_notify_wait(adapter);
1022
1023         spin_unlock(&adapter->mbox_lock);
1024
1025         return status;
1026 }
1027
1028 /* Get stats is a non embedded command: the request is not embedded inside
1029  * WRB but is a separate dma memory block
1030  * Uses asynchronous MCC
1031  */
1032 int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
1033 {
1034         struct be_mcc_wrb *wrb;
1035         struct be_cmd_req_get_stats *req;
1036         struct be_sge *sge;
1037         int status = 0;
1038
1039         spin_lock_bh(&adapter->mcc_lock);
1040
1041         wrb = wrb_from_mccq(adapter);
1042         if (!wrb) {
1043                 status = -EBUSY;
1044                 goto err;
1045         }
1046         req = nonemb_cmd->va;
1047         sge = nonembedded_sgl(wrb);
1048
1049         be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
1050                         OPCODE_ETH_GET_STATISTICS);
1051
1052         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1053                 OPCODE_ETH_GET_STATISTICS, sizeof(*req));
1054         sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
1055         sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
1056         sge->len = cpu_to_le32(nonemb_cmd->size);
1057
1058         be_mcc_notify(adapter);
1059         adapter->stats_ioctl_sent = true;
1060
1061 err:
1062         spin_unlock_bh(&adapter->mcc_lock);
1063         return status;
1064 }
1065
1066 /* Uses synchronous mcc */
1067 int be_cmd_link_status_query(struct be_adapter *adapter,
1068                         bool *link_up, u8 *mac_speed, u16 *link_speed)
1069 {
1070         struct be_mcc_wrb *wrb;
1071         struct be_cmd_req_link_status *req;
1072         int status;
1073
1074         spin_lock_bh(&adapter->mcc_lock);
1075
1076         wrb = wrb_from_mccq(adapter);
1077         if (!wrb) {
1078                 status = -EBUSY;
1079                 goto err;
1080         }
1081         req = embedded_payload(wrb);
1082
1083         *link_up = false;
1084
1085         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1086                         OPCODE_COMMON_NTWK_LINK_STATUS_QUERY);
1087
1088         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1089                 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req));
1090
1091         status = be_mcc_notify_wait(adapter);
1092         if (!status) {
1093                 struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
1094                 if (resp->mac_speed != PHY_LINK_SPEED_ZERO) {
1095                         *link_up = true;
1096                         *link_speed = le16_to_cpu(resp->link_speed);
1097                         *mac_speed = resp->mac_speed;
1098                 }
1099         }
1100
1101 err:
1102         spin_unlock_bh(&adapter->mcc_lock);
1103         return status;
1104 }
1105
1106 /* Uses Mbox */
1107 int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver)
1108 {
1109         struct be_mcc_wrb *wrb;
1110         struct be_cmd_req_get_fw_version *req;
1111         int status;
1112
1113         spin_lock(&adapter->mbox_lock);
1114
1115         wrb = wrb_from_mbox(adapter);
1116         req = embedded_payload(wrb);
1117
1118         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1119                         OPCODE_COMMON_GET_FW_VERSION);
1120
1121         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1122                 OPCODE_COMMON_GET_FW_VERSION, sizeof(*req));
1123
1124         status = be_mbox_notify_wait(adapter);
1125         if (!status) {
1126                 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
1127                 strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN);
1128         }
1129
1130         spin_unlock(&adapter->mbox_lock);
1131         return status;
1132 }
1133
1134 /* set the EQ delay interval of an EQ to specified value
1135  * Uses async mcc
1136  */
1137 int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
1138 {
1139         struct be_mcc_wrb *wrb;
1140         struct be_cmd_req_modify_eq_delay *req;
1141         int status = 0;
1142
1143         spin_lock_bh(&adapter->mcc_lock);
1144
1145         wrb = wrb_from_mccq(adapter);
1146         if (!wrb) {
1147                 status = -EBUSY;
1148                 goto err;
1149         }
1150         req = embedded_payload(wrb);
1151
1152         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1153                         OPCODE_COMMON_MODIFY_EQ_DELAY);
1154
1155         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1156                 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req));
1157
1158         req->num_eq = cpu_to_le32(1);
1159         req->delay[0].eq_id = cpu_to_le32(eq_id);
1160         req->delay[0].phase = 0;
1161         req->delay[0].delay_multiplier = cpu_to_le32(eqd);
1162
1163         be_mcc_notify(adapter);
1164
1165 err:
1166         spin_unlock_bh(&adapter->mcc_lock);
1167         return status;
1168 }
1169
1170 /* Uses sycnhronous mcc */
1171 int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
1172                         u32 num, bool untagged, bool promiscuous)
1173 {
1174         struct be_mcc_wrb *wrb;
1175         struct be_cmd_req_vlan_config *req;
1176         int status;
1177
1178         spin_lock_bh(&adapter->mcc_lock);
1179
1180         wrb = wrb_from_mccq(adapter);
1181         if (!wrb) {
1182                 status = -EBUSY;
1183                 goto err;
1184         }
1185         req = embedded_payload(wrb);
1186
1187         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1188                         OPCODE_COMMON_NTWK_VLAN_CONFIG);
1189
1190         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1191                 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req));
1192
1193         req->interface_id = if_id;
1194         req->promiscuous = promiscuous;
1195         req->untagged = untagged;
1196         req->num_vlan = num;
1197         if (!promiscuous) {
1198                 memcpy(req->normal_vlan, vtag_array,
1199                         req->num_vlan * sizeof(vtag_array[0]));
1200         }
1201
1202         status = be_mcc_notify_wait(adapter);
1203
1204 err:
1205         spin_unlock_bh(&adapter->mcc_lock);
1206         return status;
1207 }
1208
1209 /* Uses MCC for this command as it may be called in BH context
1210  * Uses synchronous mcc
1211  */
1212 int be_cmd_promiscuous_config(struct be_adapter *adapter, u8 port_num, bool en)
1213 {
1214         struct be_mcc_wrb *wrb;
1215         struct be_cmd_req_promiscuous_config *req;
1216         int status;
1217
1218         spin_lock_bh(&adapter->mcc_lock);
1219
1220         wrb = wrb_from_mccq(adapter);
1221         if (!wrb) {
1222                 status = -EBUSY;
1223                 goto err;
1224         }
1225         req = embedded_payload(wrb);
1226
1227         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_ETH_PROMISCUOUS);
1228
1229         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1230                 OPCODE_ETH_PROMISCUOUS, sizeof(*req));
1231
1232         /* In FW versions X.102.149/X.101.487 and later,
1233          * the port setting associated only with the
1234          * issuing pci function will take effect
1235          */
1236         if (port_num)
1237                 req->port1_promiscuous = en;
1238         else
1239                 req->port0_promiscuous = en;
1240
1241         status = be_mcc_notify_wait(adapter);
1242
1243 err:
1244         spin_unlock_bh(&adapter->mcc_lock);
1245         return status;
1246 }
1247
1248 /*
1249  * Uses MCC for this command as it may be called in BH context
1250  * (mc == NULL) => multicast promiscous
1251  */
1252 int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
1253                 struct net_device *netdev, struct be_dma_mem *mem)
1254 {
1255         struct be_mcc_wrb *wrb;
1256         struct be_cmd_req_mcast_mac_config *req = mem->va;
1257         struct be_sge *sge;
1258         int status;
1259
1260         spin_lock_bh(&adapter->mcc_lock);
1261
1262         wrb = wrb_from_mccq(adapter);
1263         if (!wrb) {
1264                 status = -EBUSY;
1265                 goto err;
1266         }
1267         sge = nonembedded_sgl(wrb);
1268         memset(req, 0, sizeof(*req));
1269
1270         be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
1271                         OPCODE_COMMON_NTWK_MULTICAST_SET);
1272         sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
1273         sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
1274         sge->len = cpu_to_le32(mem->size);
1275
1276         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1277                 OPCODE_COMMON_NTWK_MULTICAST_SET, sizeof(*req));
1278
1279         req->interface_id = if_id;
1280         if (netdev) {
1281                 int i;
1282                 struct netdev_hw_addr *ha;
1283
1284                 req->num_mac = cpu_to_le16(netdev_mc_count(netdev));
1285
1286                 i = 0;
1287                 netdev_for_each_mc_addr(ha, netdev)
1288                         memcpy(req->mac[i].byte, ha->addr, ETH_ALEN);
1289         } else {
1290                 req->promiscuous = 1;
1291         }
1292
1293         status = be_mcc_notify_wait(adapter);
1294
1295 err:
1296         spin_unlock_bh(&adapter->mcc_lock);
1297         return status;
1298 }
1299
1300 /* Uses synchrounous mcc */
1301 int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
1302 {
1303         struct be_mcc_wrb *wrb;
1304         struct be_cmd_req_set_flow_control *req;
1305         int status;
1306
1307         spin_lock_bh(&adapter->mcc_lock);
1308
1309         wrb = wrb_from_mccq(adapter);
1310         if (!wrb) {
1311                 status = -EBUSY;
1312                 goto err;
1313         }
1314         req = embedded_payload(wrb);
1315
1316         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1317                         OPCODE_COMMON_SET_FLOW_CONTROL);
1318
1319         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1320                 OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req));
1321
1322         req->tx_flow_control = cpu_to_le16((u16)tx_fc);
1323         req->rx_flow_control = cpu_to_le16((u16)rx_fc);
1324
1325         status = be_mcc_notify_wait(adapter);
1326
1327 err:
1328         spin_unlock_bh(&adapter->mcc_lock);
1329         return status;
1330 }
1331
1332 /* Uses sycn mcc */
1333 int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
1334 {
1335         struct be_mcc_wrb *wrb;
1336         struct be_cmd_req_get_flow_control *req;
1337         int status;
1338
1339         spin_lock_bh(&adapter->mcc_lock);
1340
1341         wrb = wrb_from_mccq(adapter);
1342         if (!wrb) {
1343                 status = -EBUSY;
1344                 goto err;
1345         }
1346         req = embedded_payload(wrb);
1347
1348         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1349                         OPCODE_COMMON_GET_FLOW_CONTROL);
1350
1351         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1352                 OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req));
1353
1354         status = be_mcc_notify_wait(adapter);
1355         if (!status) {
1356                 struct be_cmd_resp_get_flow_control *resp =
1357                                                 embedded_payload(wrb);
1358                 *tx_fc = le16_to_cpu(resp->tx_flow_control);
1359                 *rx_fc = le16_to_cpu(resp->rx_flow_control);
1360         }
1361
1362 err:
1363         spin_unlock_bh(&adapter->mcc_lock);
1364         return status;
1365 }
1366
1367 /* Uses mbox */
1368 int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
1369                 u32 *mode, u32 *caps)
1370 {
1371         struct be_mcc_wrb *wrb;
1372         struct be_cmd_req_query_fw_cfg *req;
1373         int status;
1374
1375         spin_lock(&adapter->mbox_lock);
1376
1377         wrb = wrb_from_mbox(adapter);
1378         req = embedded_payload(wrb);
1379
1380         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1381                         OPCODE_COMMON_QUERY_FIRMWARE_CONFIG);
1382
1383         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1384                 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req));
1385
1386         status = be_mbox_notify_wait(adapter);
1387         if (!status) {
1388                 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
1389                 *port_num = le32_to_cpu(resp->phys_port);
1390                 *mode = le32_to_cpu(resp->function_mode);
1391                 *caps = le32_to_cpu(resp->function_caps);
1392         }
1393
1394         spin_unlock(&adapter->mbox_lock);
1395         return status;
1396 }
1397
1398 /* Uses mbox */
1399 int be_cmd_reset_function(struct be_adapter *adapter)
1400 {
1401         struct be_mcc_wrb *wrb;
1402         struct be_cmd_req_hdr *req;
1403         int status;
1404
1405         spin_lock(&adapter->mbox_lock);
1406
1407         wrb = wrb_from_mbox(adapter);
1408         req = embedded_payload(wrb);
1409
1410         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1411                         OPCODE_COMMON_FUNCTION_RESET);
1412
1413         be_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
1414                 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));
1415
1416         status = be_mbox_notify_wait(adapter);
1417
1418         spin_unlock(&adapter->mbox_lock);
1419         return status;
1420 }
1421
1422 int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
1423 {
1424         struct be_mcc_wrb *wrb;
1425         struct be_cmd_req_rss_config *req;
1426         u32 myhash[10];
1427         int status;
1428
1429         spin_lock(&adapter->mbox_lock);
1430
1431         wrb = wrb_from_mbox(adapter);
1432         req = embedded_payload(wrb);
1433
1434         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1435                 OPCODE_ETH_RSS_CONFIG);
1436
1437         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1438                 OPCODE_ETH_RSS_CONFIG, sizeof(*req));
1439
1440         req->if_id = cpu_to_le32(adapter->if_handle);
1441         req->enable_rss = cpu_to_le16(RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4);
1442         req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
1443         memcpy(req->cpu_table, rsstable, table_size);
1444         memcpy(req->hash, myhash, sizeof(myhash));
1445         be_dws_cpu_to_le(req->hash, sizeof(req->hash));
1446
1447         status = be_mbox_notify_wait(adapter);
1448
1449         spin_unlock(&adapter->mbox_lock);
1450         return status;
1451 }
1452
1453 /* Uses sync mcc */
1454 int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
1455                         u8 bcn, u8 sts, u8 state)
1456 {
1457         struct be_mcc_wrb *wrb;
1458         struct be_cmd_req_enable_disable_beacon *req;
1459         int status;
1460
1461         spin_lock_bh(&adapter->mcc_lock);
1462
1463         wrb = wrb_from_mccq(adapter);
1464         if (!wrb) {
1465                 status = -EBUSY;
1466                 goto err;
1467         }
1468         req = embedded_payload(wrb);
1469
1470         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1471                         OPCODE_COMMON_ENABLE_DISABLE_BEACON);
1472
1473         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1474                 OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req));
1475
1476         req->port_num = port_num;
1477         req->beacon_state = state;
1478         req->beacon_duration = bcn;
1479         req->status_duration = sts;
1480
1481         status = be_mcc_notify_wait(adapter);
1482
1483 err:
1484         spin_unlock_bh(&adapter->mcc_lock);
1485         return status;
1486 }
1487
1488 /* Uses sync mcc */
1489 int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
1490 {
1491         struct be_mcc_wrb *wrb;
1492         struct be_cmd_req_get_beacon_state *req;
1493         int status;
1494
1495         spin_lock_bh(&adapter->mcc_lock);
1496
1497         wrb = wrb_from_mccq(adapter);
1498         if (!wrb) {
1499                 status = -EBUSY;
1500                 goto err;
1501         }
1502         req = embedded_payload(wrb);
1503
1504         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1505                         OPCODE_COMMON_GET_BEACON_STATE);
1506
1507         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1508                 OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req));
1509
1510         req->port_num = port_num;
1511
1512         status = be_mcc_notify_wait(adapter);
1513         if (!status) {
1514                 struct be_cmd_resp_get_beacon_state *resp =
1515                                                 embedded_payload(wrb);
1516                 *state = resp->beacon_state;
1517         }
1518
1519 err:
1520         spin_unlock_bh(&adapter->mcc_lock);
1521         return status;
1522 }
1523
1524 int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
1525                         u32 flash_type, u32 flash_opcode, u32 buf_size)
1526 {
1527         struct be_mcc_wrb *wrb;
1528         struct be_cmd_write_flashrom *req;
1529         struct be_sge *sge;
1530         int status;
1531
1532         spin_lock_bh(&adapter->mcc_lock);
1533         adapter->flash_status = 0;
1534
1535         wrb = wrb_from_mccq(adapter);
1536         if (!wrb) {
1537                 status = -EBUSY;
1538                 goto err_unlock;
1539         }
1540         req = cmd->va;
1541         sge = nonembedded_sgl(wrb);
1542
1543         be_wrb_hdr_prepare(wrb, cmd->size, false, 1,
1544                         OPCODE_COMMON_WRITE_FLASHROM);
1545         wrb->tag1 = CMD_SUBSYSTEM_COMMON;
1546
1547         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1548                 OPCODE_COMMON_WRITE_FLASHROM, cmd->size);
1549         sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
1550         sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
1551         sge->len = cpu_to_le32(cmd->size);
1552
1553         req->params.op_type = cpu_to_le32(flash_type);
1554         req->params.op_code = cpu_to_le32(flash_opcode);
1555         req->params.data_buf_size = cpu_to_le32(buf_size);
1556
1557         be_mcc_notify(adapter);
1558         spin_unlock_bh(&adapter->mcc_lock);
1559
1560         if (!wait_for_completion_timeout(&adapter->flash_compl,
1561                         msecs_to_jiffies(12000)))
1562                 status = -1;
1563         else
1564                 status = adapter->flash_status;
1565
1566         return status;
1567
1568 err_unlock:
1569         spin_unlock_bh(&adapter->mcc_lock);
1570         return status;
1571 }
1572
1573 int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
1574                          int offset)
1575 {
1576         struct be_mcc_wrb *wrb;
1577         struct be_cmd_write_flashrom *req;
1578         int status;
1579
1580         spin_lock_bh(&adapter->mcc_lock);
1581
1582         wrb = wrb_from_mccq(adapter);
1583         if (!wrb) {
1584                 status = -EBUSY;
1585                 goto err;
1586         }
1587         req = embedded_payload(wrb);
1588
1589         be_wrb_hdr_prepare(wrb, sizeof(*req)+4, true, 0,
1590                         OPCODE_COMMON_READ_FLASHROM);
1591
1592         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1593                 OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4);
1594
1595         req->params.op_type = cpu_to_le32(IMG_TYPE_REDBOOT);
1596         req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
1597         req->params.offset = cpu_to_le32(offset);
1598         req->params.data_buf_size = cpu_to_le32(0x4);
1599
1600         status = be_mcc_notify_wait(adapter);
1601         if (!status)
1602                 memcpy(flashed_crc, req->params.data_buf, 4);
1603
1604 err:
1605         spin_unlock_bh(&adapter->mcc_lock);
1606         return status;
1607 }
1608
1609 int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
1610                                 struct be_dma_mem *nonemb_cmd)
1611 {
1612         struct be_mcc_wrb *wrb;
1613         struct be_cmd_req_acpi_wol_magic_config *req;
1614         struct be_sge *sge;
1615         int status;
1616
1617         spin_lock_bh(&adapter->mcc_lock);
1618
1619         wrb = wrb_from_mccq(adapter);
1620         if (!wrb) {
1621                 status = -EBUSY;
1622                 goto err;
1623         }
1624         req = nonemb_cmd->va;
1625         sge = nonembedded_sgl(wrb);
1626
1627         be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
1628                         OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG);
1629
1630         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1631                 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req));
1632         memcpy(req->magic_mac, mac, ETH_ALEN);
1633
1634         sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
1635         sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
1636         sge->len = cpu_to_le32(nonemb_cmd->size);
1637
1638         status = be_mcc_notify_wait(adapter);
1639
1640 err:
1641         spin_unlock_bh(&adapter->mcc_lock);
1642         return status;
1643 }
1644
1645 int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
1646                         u8 loopback_type, u8 enable)
1647 {
1648         struct be_mcc_wrb *wrb;
1649         struct be_cmd_req_set_lmode *req;
1650         int status;
1651
1652         spin_lock_bh(&adapter->mcc_lock);
1653
1654         wrb = wrb_from_mccq(adapter);
1655         if (!wrb) {
1656                 status = -EBUSY;
1657                 goto err;
1658         }
1659
1660         req = embedded_payload(wrb);
1661
1662         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1663                                 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE);
1664
1665         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
1666                         OPCODE_LOWLEVEL_SET_LOOPBACK_MODE,
1667                         sizeof(*req));
1668
1669         req->src_port = port_num;
1670         req->dest_port = port_num;
1671         req->loopback_type = loopback_type;
1672         req->loopback_state = enable;
1673
1674         status = be_mcc_notify_wait(adapter);
1675 err:
1676         spin_unlock_bh(&adapter->mcc_lock);
1677         return status;
1678 }
1679
1680 int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
1681                 u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
1682 {
1683         struct be_mcc_wrb *wrb;
1684         struct be_cmd_req_loopback_test *req;
1685         int status;
1686
1687         spin_lock_bh(&adapter->mcc_lock);
1688
1689         wrb = wrb_from_mccq(adapter);
1690         if (!wrb) {
1691                 status = -EBUSY;
1692                 goto err;
1693         }
1694
1695         req = embedded_payload(wrb);
1696
1697         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1698                                 OPCODE_LOWLEVEL_LOOPBACK_TEST);
1699
1700         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
1701                         OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req));
1702         req->hdr.timeout = cpu_to_le32(4);
1703
1704         req->pattern = cpu_to_le64(pattern);
1705         req->src_port = cpu_to_le32(port_num);
1706         req->dest_port = cpu_to_le32(port_num);
1707         req->pkt_size = cpu_to_le32(pkt_size);
1708         req->num_pkts = cpu_to_le32(num_pkts);
1709         req->loopback_type = cpu_to_le32(loopback_type);
1710
1711         status = be_mcc_notify_wait(adapter);
1712         if (!status) {
1713                 struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb);
1714                 status = le32_to_cpu(resp->status);
1715         }
1716
1717 err:
1718         spin_unlock_bh(&adapter->mcc_lock);
1719         return status;
1720 }
1721
1722 int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
1723                                 u32 byte_cnt, struct be_dma_mem *cmd)
1724 {
1725         struct be_mcc_wrb *wrb;
1726         struct be_cmd_req_ddrdma_test *req;
1727         struct be_sge *sge;
1728         int status;
1729         int i, j = 0;
1730
1731         spin_lock_bh(&adapter->mcc_lock);
1732
1733         wrb = wrb_from_mccq(adapter);
1734         if (!wrb) {
1735                 status = -EBUSY;
1736                 goto err;
1737         }
1738         req = cmd->va;
1739         sge = nonembedded_sgl(wrb);
1740         be_wrb_hdr_prepare(wrb, cmd->size, false, 1,
1741                                 OPCODE_LOWLEVEL_HOST_DDR_DMA);
1742         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
1743                         OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size);
1744
1745         sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
1746         sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
1747         sge->len = cpu_to_le32(cmd->size);
1748
1749         req->pattern = cpu_to_le64(pattern);
1750         req->byte_count = cpu_to_le32(byte_cnt);
1751         for (i = 0; i < byte_cnt; i++) {
1752                 req->snd_buff[i] = (u8)(pattern >> (j*8));
1753                 j++;
1754                 if (j > 7)
1755                         j = 0;
1756         }
1757
1758         status = be_mcc_notify_wait(adapter);
1759
1760         if (!status) {
1761                 struct be_cmd_resp_ddrdma_test *resp;
1762                 resp = cmd->va;
1763                 if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
1764                                 resp->snd_err) {
1765                         status = -1;
1766                 }
1767         }
1768
1769 err:
1770         spin_unlock_bh(&adapter->mcc_lock);
1771         return status;
1772 }
1773
1774 int be_cmd_get_seeprom_data(struct be_adapter *adapter,
1775                                 struct be_dma_mem *nonemb_cmd)
1776 {
1777         struct be_mcc_wrb *wrb;
1778         struct be_cmd_req_seeprom_read *req;
1779         struct be_sge *sge;
1780         int status;
1781
1782         spin_lock_bh(&adapter->mcc_lock);
1783
1784         wrb = wrb_from_mccq(adapter);
1785         req = nonemb_cmd->va;
1786         sge = nonembedded_sgl(wrb);
1787
1788         be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
1789                         OPCODE_COMMON_SEEPROM_READ);
1790
1791         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1792                         OPCODE_COMMON_SEEPROM_READ, sizeof(*req));
1793
1794         sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
1795         sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
1796         sge->len = cpu_to_le32(nonemb_cmd->size);
1797
1798         status = be_mcc_notify_wait(adapter);
1799
1800         spin_unlock_bh(&adapter->mcc_lock);
1801         return status;
1802 }
1803
1804 int be_cmd_get_phy_info(struct be_adapter *adapter, struct be_dma_mem *cmd)
1805 {
1806         struct be_mcc_wrb *wrb;
1807         struct be_cmd_req_get_phy_info *req;
1808         struct be_sge *sge;
1809         int status;
1810
1811         spin_lock_bh(&adapter->mcc_lock);
1812
1813         wrb = wrb_from_mccq(adapter);
1814         if (!wrb) {
1815                 status = -EBUSY;
1816                 goto err;
1817         }
1818
1819         req = cmd->va;
1820         sge = nonembedded_sgl(wrb);
1821
1822         be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
1823                                 OPCODE_COMMON_GET_PHY_DETAILS);
1824
1825         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1826                         OPCODE_COMMON_GET_PHY_DETAILS,
1827                         sizeof(*req));
1828
1829         sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
1830         sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
1831         sge->len = cpu_to_le32(cmd->size);
1832
1833         status = be_mcc_notify_wait(adapter);
1834 err:
1835         spin_unlock_bh(&adapter->mcc_lock);
1836         return status;
1837 }
1838
1839 int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
1840 {
1841         struct be_mcc_wrb *wrb;
1842         struct be_cmd_req_set_qos *req;
1843         int status;
1844
1845         spin_lock_bh(&adapter->mcc_lock);
1846
1847         wrb = wrb_from_mccq(adapter);
1848         if (!wrb) {
1849                 status = -EBUSY;
1850                 goto err;
1851         }
1852
1853         req = embedded_payload(wrb);
1854
1855         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1856                                 OPCODE_COMMON_SET_QOS);
1857
1858         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1859                         OPCODE_COMMON_SET_QOS, sizeof(*req));
1860
1861         req->hdr.domain = domain;
1862         req->valid_bits = BE_QOS_BITS_NIC;
1863         req->max_bps_nic = bps;
1864
1865         status = be_mcc_notify_wait(adapter);
1866
1867 err:
1868         spin_unlock_bh(&adapter->mcc_lock);
1869         return status;
1870 }