Merge remote-tracking branch 'asoc/topic/core' into asoc-next
[sfrench/cifs-2.6.git] / drivers / net / ethernet / cavium / liquidio / lio_main.c
1 /**********************************************************************
2  * Author: Cavium, Inc.
3  *
4  * Contact: support@cavium.com
5  *          Please include "LiquidIO" in the subject.
6  *
7  * Copyright (c) 2003-2016 Cavium, Inc.
8  *
9  * This file is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License, Version 2, as
11  * published by the Free Software Foundation.
12  *
13  * This file is distributed in the hope that it will be useful, but
14  * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16  * NONINFRINGEMENT.  See the GNU General Public License for more details.
17  ***********************************************************************/
18 #include <linux/module.h>
19 #include <linux/interrupt.h>
20 #include <linux/pci.h>
21 #include <linux/firmware.h>
22 #include <net/vxlan.h>
23 #include <linux/kthread.h>
24 #include <net/switchdev.h>
25 #include "liquidio_common.h"
26 #include "octeon_droq.h"
27 #include "octeon_iq.h"
28 #include "response_manager.h"
29 #include "octeon_device.h"
30 #include "octeon_nic.h"
31 #include "octeon_main.h"
32 #include "octeon_network.h"
33 #include "cn66xx_regs.h"
34 #include "cn66xx_device.h"
35 #include "cn68xx_device.h"
36 #include "cn23xx_pf_device.h"
37 #include "liquidio_image.h"
38 #include "lio_vf_rep.h"
39
40 MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
41 MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Driver");
42 MODULE_LICENSE("GPL");
43 MODULE_VERSION(LIQUIDIO_VERSION);
44 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME
45                 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
46 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME
47                 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
48 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_410NV_NAME
49                 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
50 MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_23XX_NAME
51                 "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
52
53 static int ddr_timeout = 10000;
54 module_param(ddr_timeout, int, 0644);
55 MODULE_PARM_DESC(ddr_timeout,
56                  "Number of milliseconds to wait for DDR initialization. 0 waits for ddr_timeout to be set to non-zero value before starting to check");
57
58 #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
59
60 static int debug = -1;
61 module_param(debug, int, 0644);
62 MODULE_PARM_DESC(debug, "NETIF_MSG debug bits");
63
64 static char fw_type[LIO_MAX_FW_TYPE_LEN] = LIO_FW_NAME_TYPE_AUTO;
65 module_param_string(fw_type, fw_type, sizeof(fw_type), 0444);
66 MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded (default is \"auto\"), which uses firmware in flash, if present, else loads \"nic\".");
67
68 static u32 console_bitmask;
69 module_param(console_bitmask, int, 0644);
70 MODULE_PARM_DESC(console_bitmask,
71                  "Bitmask indicating which consoles have debug output redirected to syslog.");
72
73 /**
74  * \brief determines if a given console has debug enabled.
75  * @param console console to check
76  * @returns  1 = enabled. 0 otherwise
77  */
78 static int octeon_console_debug_enabled(u32 console)
79 {
80         return (console_bitmask >> (console)) & 0x1;
81 }
82
83 /* Polling interval for determining when NIC application is alive */
84 #define LIQUIDIO_STARTER_POLL_INTERVAL_MS 100
85
86 /* runtime link query interval */
87 #define LIQUIDIO_LINK_QUERY_INTERVAL_MS         1000
88 /* update localtime to octeon firmware every 60 seconds.
89  * make firmware to use same time reference, so that it will be easy to
90  * correlate firmware logged events/errors with host events, for debugging.
91  */
92 #define LIO_SYNC_OCTEON_TIME_INTERVAL_MS 60000
93
94 struct liquidio_if_cfg_context {
95         int octeon_id;
96
97         wait_queue_head_t wc;
98
99         int cond;
100 };
101
102 struct liquidio_if_cfg_resp {
103         u64 rh;
104         struct liquidio_if_cfg_info cfg_info;
105         u64 status;
106 };
107
108 struct liquidio_rx_ctl_context {
109         int octeon_id;
110
111         wait_queue_head_t wc;
112
113         int cond;
114 };
115
116 struct oct_link_status_resp {
117         u64 rh;
118         struct oct_link_info link_info;
119         u64 status;
120 };
121
122 struct oct_timestamp_resp {
123         u64 rh;
124         u64 timestamp;
125         u64 status;
126 };
127
128 #define OCT_TIMESTAMP_RESP_SIZE (sizeof(struct oct_timestamp_resp))
129
130 union tx_info {
131         u64 u64;
132         struct {
133 #ifdef __BIG_ENDIAN_BITFIELD
134                 u16 gso_size;
135                 u16 gso_segs;
136                 u32 reserved;
137 #else
138                 u32 reserved;
139                 u16 gso_segs;
140                 u16 gso_size;
141 #endif
142         } s;
143 };
144
145 /** Octeon device properties to be used by the NIC module.
146  * Each octeon device in the system will be represented
147  * by this structure in the NIC module.
148  */
149
150 #define OCTNIC_MAX_SG  (MAX_SKB_FRAGS)
151
152 #define OCTNIC_GSO_MAX_HEADER_SIZE 128
153 #define OCTNIC_GSO_MAX_SIZE                                                    \
154         (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE)
155
156 /** Structure of a node in list of gather components maintained by
157  * NIC driver for each network device.
158  */
159 struct octnic_gather {
160         /** List manipulation. Next and prev pointers. */
161         struct list_head list;
162
163         /** Size of the gather component at sg in bytes. */
164         int sg_size;
165
166         /** Number of bytes that sg was adjusted to make it 8B-aligned. */
167         int adjust;
168
169         /** Gather component that can accommodate max sized fragment list
170          *  received from the IP layer.
171          */
172         struct octeon_sg_entry *sg;
173
174         dma_addr_t sg_dma_ptr;
175 };
176
177 struct handshake {
178         struct completion init;
179         struct completion started;
180         struct pci_dev *pci_dev;
181         int init_ok;
182         int started_ok;
183 };
184
185 #ifdef CONFIG_PCI_IOV
186 static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs);
187 #endif
188
189 static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num,
190                                     char *prefix, char *suffix);
191
192 static int octeon_device_init(struct octeon_device *);
193 static int liquidio_stop(struct net_device *netdev);
194 static void liquidio_remove(struct pci_dev *pdev);
195 static int liquidio_probe(struct pci_dev *pdev,
196                           const struct pci_device_id *ent);
197 static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
198                                       int linkstate);
199
200 static struct handshake handshake[MAX_OCTEON_DEVICES];
201 static struct completion first_stage;
202
203 static void octeon_droq_bh(unsigned long pdev)
204 {
205         int q_no;
206         int reschedule = 0;
207         struct octeon_device *oct = (struct octeon_device *)pdev;
208         struct octeon_device_priv *oct_priv =
209                 (struct octeon_device_priv *)oct->priv;
210
211         for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES(oct); q_no++) {
212                 if (!(oct->io_qmask.oq & BIT_ULL(q_no)))
213                         continue;
214                 reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no],
215                                                           MAX_PACKET_BUDGET);
216                 lio_enable_irq(oct->droq[q_no], NULL);
217
218                 if (OCTEON_CN23XX_PF(oct) && oct->msix_on) {
219                         /* set time and cnt interrupt thresholds for this DROQ
220                          * for NAPI
221                          */
222                         int adjusted_q_no = q_no + oct->sriov_info.pf_srn;
223
224                         octeon_write_csr64(
225                             oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(adjusted_q_no),
226                             0x5700000040ULL);
227                         octeon_write_csr64(
228                             oct, CN23XX_SLI_OQ_PKTS_SENT(adjusted_q_no), 0);
229                 }
230         }
231
232         if (reschedule)
233                 tasklet_schedule(&oct_priv->droq_tasklet);
234 }
235
236 static int lio_wait_for_oq_pkts(struct octeon_device *oct)
237 {
238         struct octeon_device_priv *oct_priv =
239                 (struct octeon_device_priv *)oct->priv;
240         int retry = 100, pkt_cnt = 0, pending_pkts = 0;
241         int i;
242
243         do {
244                 pending_pkts = 0;
245
246                 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
247                         if (!(oct->io_qmask.oq & BIT_ULL(i)))
248                                 continue;
249                         pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]);
250                 }
251                 if (pkt_cnt > 0) {
252                         pending_pkts += pkt_cnt;
253                         tasklet_schedule(&oct_priv->droq_tasklet);
254                 }
255                 pkt_cnt = 0;
256                 schedule_timeout_uninterruptible(1);
257
258         } while (retry-- && pending_pkts);
259
260         return pkt_cnt;
261 }
262
263 /**
264  * \brief Forces all IO queues off on a given device
265  * @param oct Pointer to Octeon device
266  */
267 static void force_io_queues_off(struct octeon_device *oct)
268 {
269         if ((oct->chip_id == OCTEON_CN66XX) ||
270             (oct->chip_id == OCTEON_CN68XX)) {
271                 /* Reset the Enable bits for Input Queues. */
272                 octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0);
273
274                 /* Reset the Enable bits for Output Queues. */
275                 octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0);
276         }
277 }
278
279 /**
280  * \brief Cause device to go quiet so it can be safely removed/reset/etc
281  * @param oct Pointer to Octeon device
282  */
283 static inline void pcierror_quiesce_device(struct octeon_device *oct)
284 {
285         int i;
286
287         /* Disable the input and output queues now. No more packets will
288          * arrive from Octeon, but we should wait for all packet processing
289          * to finish.
290          */
291         force_io_queues_off(oct);
292
293         /* To allow for in-flight requests */
294         schedule_timeout_uninterruptible(100);
295
296         if (wait_for_pending_requests(oct))
297                 dev_err(&oct->pci_dev->dev, "There were pending requests\n");
298
299         /* Force all requests waiting to be fetched by OCTEON to complete. */
300         for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
301                 struct octeon_instr_queue *iq;
302
303                 if (!(oct->io_qmask.iq & BIT_ULL(i)))
304                         continue;
305                 iq = oct->instr_queue[i];
306
307                 if (atomic_read(&iq->instr_pending)) {
308                         spin_lock_bh(&iq->lock);
309                         iq->fill_cnt = 0;
310                         iq->octeon_read_index = iq->host_write_index;
311                         iq->stats.instr_processed +=
312                                 atomic_read(&iq->instr_pending);
313                         lio_process_iq_request_list(oct, iq, 0);
314                         spin_unlock_bh(&iq->lock);
315                 }
316         }
317
318         /* Force all pending ordered list requests to time out. */
319         lio_process_ordered_list(oct, 1);
320
321         /* We do not need to wait for output queue packets to be processed. */
322 }
323
324 /**
325  * \brief Cleanup PCI AER uncorrectable error status
326  * @param dev Pointer to PCI device
327  */
328 static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
329 {
330         int pos = 0x100;
331         u32 status, mask;
332
333         pr_info("%s :\n", __func__);
334
335         pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
336         pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
337         if (dev->error_state == pci_channel_io_normal)
338                 status &= ~mask;        /* Clear corresponding nonfatal bits */
339         else
340                 status &= mask;         /* Clear corresponding fatal bits */
341         pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
342 }
343
344 /**
345  * \brief Stop all PCI IO to a given device
346  * @param dev Pointer to Octeon device
347  */
348 static void stop_pci_io(struct octeon_device *oct)
349 {
350         /* No more instructions will be forwarded. */
351         atomic_set(&oct->status, OCT_DEV_IN_RESET);
352
353         pci_disable_device(oct->pci_dev);
354
355         /* Disable interrupts  */
356         oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
357
358         pcierror_quiesce_device(oct);
359
360         /* Release the interrupt line */
361         free_irq(oct->pci_dev->irq, oct);
362
363         if (oct->flags & LIO_FLAG_MSI_ENABLED)
364                 pci_disable_msi(oct->pci_dev);
365
366         dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
367                 lio_get_state_string(&oct->status));
368
369         /* making it a common function for all OCTEON models */
370         cleanup_aer_uncorrect_error_status(oct->pci_dev);
371 }
372
373 /**
374  * \brief called when PCI error is detected
375  * @param pdev Pointer to PCI device
376  * @param state The current pci connection state
377  *
378  * This function is called after a PCI bus error affecting
379  * this device has been detected.
380  */
381 static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev,
382                                                      pci_channel_state_t state)
383 {
384         struct octeon_device *oct = pci_get_drvdata(pdev);
385
386         /* Non-correctable Non-fatal errors */
387         if (state == pci_channel_io_normal) {
388                 dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n");
389                 cleanup_aer_uncorrect_error_status(oct->pci_dev);
390                 return PCI_ERS_RESULT_CAN_RECOVER;
391         }
392
393         /* Non-correctable Fatal errors */
394         dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n");
395         stop_pci_io(oct);
396
397         /* Always return a DISCONNECT. There is no support for recovery but only
398          * for a clean shutdown.
399          */
400         return PCI_ERS_RESULT_DISCONNECT;
401 }
402
403 /**
404  * \brief mmio handler
405  * @param pdev Pointer to PCI device
406  */
407 static pci_ers_result_t liquidio_pcie_mmio_enabled(
408                                 struct pci_dev *pdev __attribute__((unused)))
409 {
410         /* We should never hit this since we never ask for a reset for a Fatal
411          * Error. We always return DISCONNECT in io_error above.
412          * But play safe and return RECOVERED for now.
413          */
414         return PCI_ERS_RESULT_RECOVERED;
415 }
416
417 /**
418  * \brief called after the pci bus has been reset.
419  * @param pdev Pointer to PCI device
420  *
421  * Restart the card from scratch, as if from a cold-boot. Implementation
422  * resembles the first-half of the octeon_resume routine.
423  */
424 static pci_ers_result_t liquidio_pcie_slot_reset(
425                                 struct pci_dev *pdev __attribute__((unused)))
426 {
427         /* We should never hit this since we never ask for a reset for a Fatal
428          * Error. We always return DISCONNECT in io_error above.
429          * But play safe and return RECOVERED for now.
430          */
431         return PCI_ERS_RESULT_RECOVERED;
432 }
433
434 /**
435  * \brief called when traffic can start flowing again.
436  * @param pdev Pointer to PCI device
437  *
438  * This callback is called when the error recovery driver tells us that
439  * its OK to resume normal operation. Implementation resembles the
440  * second-half of the octeon_resume routine.
441  */
442 static void liquidio_pcie_resume(struct pci_dev *pdev __attribute__((unused)))
443 {
444         /* Nothing to be done here. */
445 }
446
447 #ifdef CONFIG_PM
448 /**
449  * \brief called when suspending
450  * @param pdev Pointer to PCI device
451  * @param state state to suspend to
452  */
453 static int liquidio_suspend(struct pci_dev *pdev __attribute__((unused)),
454                             pm_message_t state __attribute__((unused)))
455 {
456         return 0;
457 }
458
459 /**
460  * \brief called when resuming
461  * @param pdev Pointer to PCI device
462  */
463 static int liquidio_resume(struct pci_dev *pdev __attribute__((unused)))
464 {
465         return 0;
466 }
467 #endif
468
469 /* For PCI-E Advanced Error Recovery (AER) Interface */
470 static const struct pci_error_handlers liquidio_err_handler = {
471         .error_detected = liquidio_pcie_error_detected,
472         .mmio_enabled   = liquidio_pcie_mmio_enabled,
473         .slot_reset     = liquidio_pcie_slot_reset,
474         .resume         = liquidio_pcie_resume,
475 };
476
477 static const struct pci_device_id liquidio_pci_tbl[] = {
478         {       /* 68xx */
479                 PCI_VENDOR_ID_CAVIUM, 0x91, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
480         },
481         {       /* 66xx */
482                 PCI_VENDOR_ID_CAVIUM, 0x92, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
483         },
484         {       /* 23xx pf */
485                 PCI_VENDOR_ID_CAVIUM, 0x9702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
486         },
487         {
488                 0, 0, 0, 0, 0, 0, 0
489         }
490 };
491 MODULE_DEVICE_TABLE(pci, liquidio_pci_tbl);
492
493 static struct pci_driver liquidio_pci_driver = {
494         .name           = "LiquidIO",
495         .id_table       = liquidio_pci_tbl,
496         .probe          = liquidio_probe,
497         .remove         = liquidio_remove,
498         .err_handler    = &liquidio_err_handler,    /* For AER */
499
500 #ifdef CONFIG_PM
501         .suspend        = liquidio_suspend,
502         .resume         = liquidio_resume,
503 #endif
504 #ifdef CONFIG_PCI_IOV
505         .sriov_configure = liquidio_enable_sriov,
506 #endif
507 };
508
509 /**
510  * \brief register PCI driver
511  */
512 static int liquidio_init_pci(void)
513 {
514         return pci_register_driver(&liquidio_pci_driver);
515 }
516
517 /**
518  * \brief unregister PCI driver
519  */
520 static void liquidio_deinit_pci(void)
521 {
522         pci_unregister_driver(&liquidio_pci_driver);
523 }
524
525 /**
526  * \brief Stop Tx queues
527  * @param netdev network device
528  */
529 static inline void txqs_stop(struct net_device *netdev)
530 {
531         if (netif_is_multiqueue(netdev)) {
532                 int i;
533
534                 for (i = 0; i < netdev->num_tx_queues; i++)
535                         netif_stop_subqueue(netdev, i);
536         } else {
537                 netif_stop_queue(netdev);
538         }
539 }
540
541 /**
542  * \brief Start Tx queues
543  * @param netdev network device
544  */
545 static inline void txqs_start(struct net_device *netdev)
546 {
547         if (netif_is_multiqueue(netdev)) {
548                 int i;
549
550                 for (i = 0; i < netdev->num_tx_queues; i++)
551                         netif_start_subqueue(netdev, i);
552         } else {
553                 netif_start_queue(netdev);
554         }
555 }
556
557 /**
558  * \brief Wake Tx queues
559  * @param netdev network device
560  */
561 static inline void txqs_wake(struct net_device *netdev)
562 {
563         struct lio *lio = GET_LIO(netdev);
564
565         if (netif_is_multiqueue(netdev)) {
566                 int i;
567
568                 for (i = 0; i < netdev->num_tx_queues; i++) {
569                         int qno = lio->linfo.txpciq[i %
570                                 lio->oct_dev->num_iqs].s.q_no;
571
572                         if (__netif_subqueue_stopped(netdev, i)) {
573                                 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, qno,
574                                                           tx_restart, 1);
575                                 netif_wake_subqueue(netdev, i);
576                         }
577                 }
578         } else {
579                 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq,
580                                           tx_restart, 1);
581                 netif_wake_queue(netdev);
582         }
583 }
584
585 /**
586  * \brief Stop Tx queue
587  * @param netdev network device
588  */
589 static void stop_txq(struct net_device *netdev)
590 {
591         txqs_stop(netdev);
592 }
593
594 /**
595  * \brief Start Tx queue
596  * @param netdev network device
597  */
598 static void start_txq(struct net_device *netdev)
599 {
600         struct lio *lio = GET_LIO(netdev);
601
602         if (lio->linfo.link.s.link_up) {
603                 txqs_start(netdev);
604                 return;
605         }
606 }
607
608 /**
609  * \brief Wake a queue
610  * @param netdev network device
611  * @param q which queue to wake
612  */
613 static inline void wake_q(struct net_device *netdev, int q)
614 {
615         if (netif_is_multiqueue(netdev))
616                 netif_wake_subqueue(netdev, q);
617         else
618                 netif_wake_queue(netdev);
619 }
620
621 /**
622  * \brief Stop a queue
623  * @param netdev network device
624  * @param q which queue to stop
625  */
626 static inline void stop_q(struct net_device *netdev, int q)
627 {
628         if (netif_is_multiqueue(netdev))
629                 netif_stop_subqueue(netdev, q);
630         else
631                 netif_stop_queue(netdev);
632 }
633
634 /**
635  * \brief Check Tx queue status, and take appropriate action
636  * @param lio per-network private data
637  * @returns 0 if full, number of queues woken up otherwise
638  */
639 static inline int check_txq_status(struct lio *lio)
640 {
641         int ret_val = 0;
642
643         if (netif_is_multiqueue(lio->netdev)) {
644                 int numqs = lio->netdev->num_tx_queues;
645                 int q, iq = 0;
646
647                 /* check each sub-queue state */
648                 for (q = 0; q < numqs; q++) {
649                         iq = lio->linfo.txpciq[q %
650                                 lio->oct_dev->num_iqs].s.q_no;
651                         if (octnet_iq_is_full(lio->oct_dev, iq))
652                                 continue;
653                         if (__netif_subqueue_stopped(lio->netdev, q)) {
654                                 wake_q(lio->netdev, q);
655                                 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq,
656                                                           tx_restart, 1);
657                                 ret_val++;
658                         }
659                 }
660         } else {
661                 if (octnet_iq_is_full(lio->oct_dev, lio->txq))
662                         return 0;
663                 wake_q(lio->netdev, lio->txq);
664                 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, lio->txq,
665                                           tx_restart, 1);
666                 ret_val = 1;
667         }
668         return ret_val;
669 }
670
671 /**
672  * Remove the node at the head of the list. The list would be empty at
673  * the end of this call if there are no more nodes in the list.
674  */
675 static inline struct list_head *list_delete_head(struct list_head *root)
676 {
677         struct list_head *node;
678
679         if ((root->prev == root) && (root->next == root))
680                 node = NULL;
681         else
682                 node = root->next;
683
684         if (node)
685                 list_del(node);
686
687         return node;
688 }
689
690 /**
691  * \brief Delete gather lists
692  * @param lio per-network private data
693  */
694 static void delete_glists(struct lio *lio)
695 {
696         struct octnic_gather *g;
697         int i;
698
699         kfree(lio->glist_lock);
700         lio->glist_lock = NULL;
701
702         if (!lio->glist)
703                 return;
704
705         for (i = 0; i < lio->linfo.num_txpciq; i++) {
706                 do {
707                         g = (struct octnic_gather *)
708                                 list_delete_head(&lio->glist[i]);
709                         if (g)
710                                 kfree(g);
711                 } while (g);
712
713                 if (lio->glists_virt_base && lio->glists_virt_base[i] &&
714                     lio->glists_dma_base && lio->glists_dma_base[i]) {
715                         lio_dma_free(lio->oct_dev,
716                                      lio->glist_entry_size * lio->tx_qsize,
717                                      lio->glists_virt_base[i],
718                                      lio->glists_dma_base[i]);
719                 }
720         }
721
722         kfree(lio->glists_virt_base);
723         lio->glists_virt_base = NULL;
724
725         kfree(lio->glists_dma_base);
726         lio->glists_dma_base = NULL;
727
728         kfree(lio->glist);
729         lio->glist = NULL;
730 }
731
732 /**
733  * \brief Setup gather lists
734  * @param lio per-network private data
735  */
736 static int setup_glists(struct octeon_device *oct, struct lio *lio, int num_iqs)
737 {
738         int i, j;
739         struct octnic_gather *g;
740
741         lio->glist_lock = kcalloc(num_iqs, sizeof(*lio->glist_lock),
742                                   GFP_KERNEL);
743         if (!lio->glist_lock)
744                 return -ENOMEM;
745
746         lio->glist = kcalloc(num_iqs, sizeof(*lio->glist),
747                              GFP_KERNEL);
748         if (!lio->glist) {
749                 kfree(lio->glist_lock);
750                 lio->glist_lock = NULL;
751                 return -ENOMEM;
752         }
753
754         lio->glist_entry_size =
755                 ROUNDUP8((ROUNDUP4(OCTNIC_MAX_SG) >> 2) * OCT_SG_ENTRY_SIZE);
756
757         /* allocate memory to store virtual and dma base address of
758          * per glist consistent memory
759          */
760         lio->glists_virt_base = kcalloc(num_iqs, sizeof(*lio->glists_virt_base),
761                                         GFP_KERNEL);
762         lio->glists_dma_base = kcalloc(num_iqs, sizeof(*lio->glists_dma_base),
763                                        GFP_KERNEL);
764
765         if (!lio->glists_virt_base || !lio->glists_dma_base) {
766                 delete_glists(lio);
767                 return -ENOMEM;
768         }
769
770         for (i = 0; i < num_iqs; i++) {
771                 int numa_node = dev_to_node(&oct->pci_dev->dev);
772
773                 spin_lock_init(&lio->glist_lock[i]);
774
775                 INIT_LIST_HEAD(&lio->glist[i]);
776
777                 lio->glists_virt_base[i] =
778                         lio_dma_alloc(oct,
779                                       lio->glist_entry_size * lio->tx_qsize,
780                                       &lio->glists_dma_base[i]);
781
782                 if (!lio->glists_virt_base[i]) {
783                         delete_glists(lio);
784                         return -ENOMEM;
785                 }
786
787                 for (j = 0; j < lio->tx_qsize; j++) {
788                         g = kzalloc_node(sizeof(*g), GFP_KERNEL,
789                                          numa_node);
790                         if (!g)
791                                 g = kzalloc(sizeof(*g), GFP_KERNEL);
792                         if (!g)
793                                 break;
794
795                         g->sg = lio->glists_virt_base[i] +
796                                 (j * lio->glist_entry_size);
797
798                         g->sg_dma_ptr = lio->glists_dma_base[i] +
799                                         (j * lio->glist_entry_size);
800
801                         list_add_tail(&g->list, &lio->glist[i]);
802                 }
803
804                 if (j != lio->tx_qsize) {
805                         delete_glists(lio);
806                         return -ENOMEM;
807                 }
808         }
809
810         return 0;
811 }
812
813 /**
814  * \brief Print link information
815  * @param netdev network device
816  */
817 static void print_link_info(struct net_device *netdev)
818 {
819         struct lio *lio = GET_LIO(netdev);
820
821         if (!ifstate_check(lio, LIO_IFSTATE_RESETTING) &&
822             ifstate_check(lio, LIO_IFSTATE_REGISTERED)) {
823                 struct oct_link_info *linfo = &lio->linfo;
824
825                 if (linfo->link.s.link_up) {
826                         netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n",
827                                    linfo->link.s.speed,
828                                    (linfo->link.s.duplex) ? "Full" : "Half");
829                 } else {
830                         netif_info(lio, link, lio->netdev, "Link Down\n");
831                 }
832         }
833 }
834
835 /**
836  * \brief Routine to notify MTU change
837  * @param work work_struct data structure
838  */
839 static void octnet_link_status_change(struct work_struct *work)
840 {
841         struct cavium_wk *wk = (struct cavium_wk *)work;
842         struct lio *lio = (struct lio *)wk->ctxptr;
843
844         rtnl_lock();
845         call_netdevice_notifiers(NETDEV_CHANGEMTU, lio->netdev);
846         rtnl_unlock();
847 }
848
849 /**
850  * \brief Sets up the mtu status change work
851  * @param netdev network device
852  */
853 static inline int setup_link_status_change_wq(struct net_device *netdev)
854 {
855         struct lio *lio = GET_LIO(netdev);
856         struct octeon_device *oct = lio->oct_dev;
857
858         lio->link_status_wq.wq = alloc_workqueue("link-status",
859                                                  WQ_MEM_RECLAIM, 0);
860         if (!lio->link_status_wq.wq) {
861                 dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n");
862                 return -1;
863         }
864         INIT_DELAYED_WORK(&lio->link_status_wq.wk.work,
865                           octnet_link_status_change);
866         lio->link_status_wq.wk.ctxptr = lio;
867
868         return 0;
869 }
870
871 static inline void cleanup_link_status_change_wq(struct net_device *netdev)
872 {
873         struct lio *lio = GET_LIO(netdev);
874
875         if (lio->link_status_wq.wq) {
876                 cancel_delayed_work_sync(&lio->link_status_wq.wk.work);
877                 destroy_workqueue(lio->link_status_wq.wq);
878         }
879 }
880
881 /**
882  * \brief Update link status
883  * @param netdev network device
884  * @param ls link status structure
885  *
886  * Called on receipt of a link status response from the core application to
887  * update each interface's link status.
888  */
889 static inline void update_link_status(struct net_device *netdev,
890                                       union oct_link_status *ls)
891 {
892         struct lio *lio = GET_LIO(netdev);
893         int changed = (lio->linfo.link.u64 != ls->u64);
894
895         lio->linfo.link.u64 = ls->u64;
896
897         if ((lio->intf_open) && (changed)) {
898                 print_link_info(netdev);
899                 lio->link_changes++;
900
901                 if (lio->linfo.link.s.link_up) {
902                         netif_carrier_on(netdev);
903                         txqs_wake(netdev);
904                 } else {
905                         netif_carrier_off(netdev);
906                         stop_txq(netdev);
907                 }
908         }
909 }
910
911 /**
912  * lio_sync_octeon_time_cb - callback that is invoked when soft command
913  * sent by lio_sync_octeon_time() has completed successfully or failed
914  *
915  * @oct - octeon device structure
916  * @status - indicates success or failure
917  * @buf - pointer to the command that was sent to firmware
918  **/
919 static void lio_sync_octeon_time_cb(struct octeon_device *oct,
920                                     u32 status, void *buf)
921 {
922         struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
923
924         if (status)
925                 dev_err(&oct->pci_dev->dev,
926                         "Failed to sync time to octeon; error=%d\n", status);
927
928         octeon_free_soft_command(oct, sc);
929 }
930
931 /**
932  * lio_sync_octeon_time - send latest localtime to octeon firmware so that
933  * firmware will correct it's time, in case there is a time skew
934  *
935  * @work: work scheduled to send time update to octeon firmware
936  **/
937 static void lio_sync_octeon_time(struct work_struct *work)
938 {
939         struct cavium_wk *wk = (struct cavium_wk *)work;
940         struct lio *lio = (struct lio *)wk->ctxptr;
941         struct octeon_device *oct = lio->oct_dev;
942         struct octeon_soft_command *sc;
943         struct timespec64 ts;
944         struct lio_time *lt;
945         int ret;
946
947         sc = octeon_alloc_soft_command(oct, sizeof(struct lio_time), 0, 0);
948         if (!sc) {
949                 dev_err(&oct->pci_dev->dev,
950                         "Failed to sync time to octeon: soft command allocation failed\n");
951                 return;
952         }
953
954         lt = (struct lio_time *)sc->virtdptr;
955
956         /* Get time of the day */
957         getnstimeofday64(&ts);
958         lt->sec = ts.tv_sec;
959         lt->nsec = ts.tv_nsec;
960         octeon_swap_8B_data((u64 *)lt, (sizeof(struct lio_time)) / 8);
961
962         sc->iq_no = lio->linfo.txpciq[0].s.q_no;
963         octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
964                                     OPCODE_NIC_SYNC_OCTEON_TIME, 0, 0, 0);
965
966         sc->callback = lio_sync_octeon_time_cb;
967         sc->callback_arg = sc;
968         sc->wait_time = 1000;
969
970         ret = octeon_send_soft_command(oct, sc);
971         if (ret == IQ_SEND_FAILED) {
972                 dev_err(&oct->pci_dev->dev,
973                         "Failed to sync time to octeon: failed to send soft command\n");
974                 octeon_free_soft_command(oct, sc);
975         }
976
977         queue_delayed_work(lio->sync_octeon_time_wq.wq,
978                            &lio->sync_octeon_time_wq.wk.work,
979                            msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS));
980 }
981
982 /**
983  * setup_sync_octeon_time_wq - Sets up the work to periodically update
984  * local time to octeon firmware
985  *
986  * @netdev - network device which should send time update to firmware
987  **/
988 static inline int setup_sync_octeon_time_wq(struct net_device *netdev)
989 {
990         struct lio *lio = GET_LIO(netdev);
991         struct octeon_device *oct = lio->oct_dev;
992
993         lio->sync_octeon_time_wq.wq =
994                 alloc_workqueue("update-octeon-time", WQ_MEM_RECLAIM, 0);
995         if (!lio->sync_octeon_time_wq.wq) {
996                 dev_err(&oct->pci_dev->dev, "Unable to create wq to update octeon time\n");
997                 return -1;
998         }
999         INIT_DELAYED_WORK(&lio->sync_octeon_time_wq.wk.work,
1000                           lio_sync_octeon_time);
1001         lio->sync_octeon_time_wq.wk.ctxptr = lio;
1002         queue_delayed_work(lio->sync_octeon_time_wq.wq,
1003                            &lio->sync_octeon_time_wq.wk.work,
1004                            msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS));
1005
1006         return 0;
1007 }
1008
1009 /**
1010  * cleanup_sync_octeon_time_wq - stop scheduling and destroy the work created
1011  * to periodically update local time to octeon firmware
1012  *
1013  * @netdev - network device which should send time update to firmware
1014  **/
1015 static inline void cleanup_sync_octeon_time_wq(struct net_device *netdev)
1016 {
1017         struct lio *lio = GET_LIO(netdev);
1018         struct cavium_wq *time_wq = &lio->sync_octeon_time_wq;
1019
1020         if (time_wq->wq) {
1021                 cancel_delayed_work_sync(&time_wq->wk.work);
1022                 destroy_workqueue(time_wq->wq);
1023         }
1024 }
1025
1026 static struct octeon_device *get_other_octeon_device(struct octeon_device *oct)
1027 {
1028         struct octeon_device *other_oct;
1029
1030         other_oct = lio_get_device(oct->octeon_id + 1);
1031
1032         if (other_oct && other_oct->pci_dev) {
1033                 int oct_busnum, other_oct_busnum;
1034
1035                 oct_busnum = oct->pci_dev->bus->number;
1036                 other_oct_busnum = other_oct->pci_dev->bus->number;
1037
1038                 if (oct_busnum == other_oct_busnum) {
1039                         int oct_slot, other_oct_slot;
1040
1041                         oct_slot = PCI_SLOT(oct->pci_dev->devfn);
1042                         other_oct_slot = PCI_SLOT(other_oct->pci_dev->devfn);
1043
1044                         if (oct_slot == other_oct_slot)
1045                                 return other_oct;
1046                 }
1047         }
1048
1049         return NULL;
1050 }
1051
1052 static void disable_all_vf_links(struct octeon_device *oct)
1053 {
1054         struct net_device *netdev;
1055         int max_vfs, vf, i;
1056
1057         if (!oct)
1058                 return;
1059
1060         max_vfs = oct->sriov_info.max_vfs;
1061
1062         for (i = 0; i < oct->ifcount; i++) {
1063                 netdev = oct->props[i].netdev;
1064                 if (!netdev)
1065                         continue;
1066
1067                 for (vf = 0; vf < max_vfs; vf++)
1068                         liquidio_set_vf_link_state(netdev, vf,
1069                                                    IFLA_VF_LINK_STATE_DISABLE);
1070         }
1071 }
1072
1073 static int liquidio_watchdog(void *param)
1074 {
1075         bool err_msg_was_printed[LIO_MAX_CORES];
1076         u16 mask_of_crashed_or_stuck_cores = 0;
1077         bool all_vf_links_are_disabled = false;
1078         struct octeon_device *oct = param;
1079         struct octeon_device *other_oct;
1080 #ifdef CONFIG_MODULE_UNLOAD
1081         long refcount, vfs_referencing_pf;
1082         u64 vfs_mask1, vfs_mask2;
1083 #endif
1084         int core;
1085
1086         memset(err_msg_was_printed, 0, sizeof(err_msg_was_printed));
1087
1088         while (!kthread_should_stop()) {
1089                 /* sleep for a couple of seconds so that we don't hog the CPU */
1090                 set_current_state(TASK_INTERRUPTIBLE);
1091                 schedule_timeout(msecs_to_jiffies(2000));
1092
1093                 mask_of_crashed_or_stuck_cores =
1094                     (u16)octeon_read_csr64(oct, CN23XX_SLI_SCRATCH2);
1095
1096                 if (!mask_of_crashed_or_stuck_cores)
1097                         continue;
1098
1099                 WRITE_ONCE(oct->cores_crashed, true);
1100                 other_oct = get_other_octeon_device(oct);
1101                 if (other_oct)
1102                         WRITE_ONCE(other_oct->cores_crashed, true);
1103
1104                 for (core = 0; core < LIO_MAX_CORES; core++) {
1105                         bool core_crashed_or_got_stuck;
1106
1107                         core_crashed_or_got_stuck =
1108                                                 (mask_of_crashed_or_stuck_cores
1109                                                  >> core) & 1;
1110
1111                         if (core_crashed_or_got_stuck &&
1112                             !err_msg_was_printed[core]) {
1113                                 dev_err(&oct->pci_dev->dev,
1114                                         "ERROR: Octeon core %d crashed or got stuck!  See oct-fwdump for details.\n",
1115                                         core);
1116                                 err_msg_was_printed[core] = true;
1117                         }
1118                 }
1119
1120                 if (all_vf_links_are_disabled)
1121                         continue;
1122
1123                 disable_all_vf_links(oct);
1124                 disable_all_vf_links(other_oct);
1125                 all_vf_links_are_disabled = true;
1126
1127 #ifdef CONFIG_MODULE_UNLOAD
1128                 vfs_mask1 = READ_ONCE(oct->sriov_info.vf_drv_loaded_mask);
1129                 vfs_mask2 = READ_ONCE(other_oct->sriov_info.vf_drv_loaded_mask);
1130
1131                 vfs_referencing_pf  = hweight64(vfs_mask1);
1132                 vfs_referencing_pf += hweight64(vfs_mask2);
1133
1134                 refcount = module_refcount(THIS_MODULE);
1135                 if (refcount >= vfs_referencing_pf) {
1136                         while (vfs_referencing_pf) {
1137                                 module_put(THIS_MODULE);
1138                                 vfs_referencing_pf--;
1139                         }
1140                 }
1141 #endif
1142         }
1143
1144         return 0;
1145 }
1146
1147 /**
1148  * \brief PCI probe handler
1149  * @param pdev PCI device structure
1150  * @param ent unused
1151  */
1152 static int
1153 liquidio_probe(struct pci_dev *pdev,
1154                const struct pci_device_id *ent __attribute__((unused)))
1155 {
1156         struct octeon_device *oct_dev = NULL;
1157         struct handshake *hs;
1158
1159         oct_dev = octeon_allocate_device(pdev->device,
1160                                          sizeof(struct octeon_device_priv));
1161         if (!oct_dev) {
1162                 dev_err(&pdev->dev, "Unable to allocate device\n");
1163                 return -ENOMEM;
1164         }
1165
1166         if (pdev->device == OCTEON_CN23XX_PF_VID)
1167                 oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED;
1168
1169         /* Enable PTP for 6XXX Device */
1170         if (((pdev->device == OCTEON_CN66XX) ||
1171              (pdev->device == OCTEON_CN68XX)))
1172                 oct_dev->ptp_enable = true;
1173         else
1174                 oct_dev->ptp_enable = false;
1175
1176         dev_info(&pdev->dev, "Initializing device %x:%x.\n",
1177                  (u32)pdev->vendor, (u32)pdev->device);
1178
1179         /* Assign octeon_device for this device to the private data area. */
1180         pci_set_drvdata(pdev, oct_dev);
1181
1182         /* set linux specific device pointer */
1183         oct_dev->pci_dev = (void *)pdev;
1184
1185         hs = &handshake[oct_dev->octeon_id];
1186         init_completion(&hs->init);
1187         init_completion(&hs->started);
1188         hs->pci_dev = pdev;
1189
1190         if (oct_dev->octeon_id == 0)
1191                 /* first LiquidIO NIC is detected */
1192                 complete(&first_stage);
1193
1194         if (octeon_device_init(oct_dev)) {
1195                 complete(&hs->init);
1196                 liquidio_remove(pdev);
1197                 return -ENOMEM;
1198         }
1199
1200         if (OCTEON_CN23XX_PF(oct_dev)) {
1201                 u8 bus, device, function;
1202
1203                 if (atomic_read(oct_dev->adapter_refcount) == 1) {
1204                         /* Each NIC gets one watchdog kernel thread.  The first
1205                          * PF (of each NIC) that gets pci_driver->probe()'d
1206                          * creates that thread.
1207                          */
1208                         bus = pdev->bus->number;
1209                         device = PCI_SLOT(pdev->devfn);
1210                         function = PCI_FUNC(pdev->devfn);
1211                         oct_dev->watchdog_task = kthread_create(
1212                             liquidio_watchdog, oct_dev,
1213                             "liowd/%02hhx:%02hhx.%hhx", bus, device, function);
1214                         if (!IS_ERR(oct_dev->watchdog_task)) {
1215                                 wake_up_process(oct_dev->watchdog_task);
1216                         } else {
1217                                 oct_dev->watchdog_task = NULL;
1218                                 dev_err(&oct_dev->pci_dev->dev,
1219                                         "failed to create kernel_thread\n");
1220                                 liquidio_remove(pdev);
1221                                 return -1;
1222                         }
1223                 }
1224         }
1225
1226         oct_dev->rx_pause = 1;
1227         oct_dev->tx_pause = 1;
1228
1229         dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n");
1230
1231         return 0;
1232 }
1233
1234 static bool fw_type_is_auto(void)
1235 {
1236         return strncmp(fw_type, LIO_FW_NAME_TYPE_AUTO,
1237                        sizeof(LIO_FW_NAME_TYPE_AUTO)) == 0;
1238 }
1239
1240 /**
1241  * \brief PCI FLR for each Octeon device.
1242  * @param oct octeon device
1243  */
1244 static void octeon_pci_flr(struct octeon_device *oct)
1245 {
1246         int rc;
1247
1248         pci_save_state(oct->pci_dev);
1249
1250         pci_cfg_access_lock(oct->pci_dev);
1251
1252         /* Quiesce the device completely */
1253         pci_write_config_word(oct->pci_dev, PCI_COMMAND,
1254                               PCI_COMMAND_INTX_DISABLE);
1255
1256         rc = __pci_reset_function_locked(oct->pci_dev);
1257
1258         if (rc != 0)
1259                 dev_err(&oct->pci_dev->dev, "Error %d resetting PCI function %d\n",
1260                         rc, oct->pf_num);
1261
1262         pci_cfg_access_unlock(oct->pci_dev);
1263
1264         pci_restore_state(oct->pci_dev);
1265 }
1266
1267 /**
1268  *\brief Destroy resources associated with octeon device
1269  * @param pdev PCI device structure
1270  * @param ent unused
1271  */
1272 static void octeon_destroy_resources(struct octeon_device *oct)
1273 {
1274         int i, refcount;
1275         struct msix_entry *msix_entries;
1276         struct octeon_device_priv *oct_priv =
1277                 (struct octeon_device_priv *)oct->priv;
1278
1279         struct handshake *hs;
1280
1281         switch (atomic_read(&oct->status)) {
1282         case OCT_DEV_RUNNING:
1283         case OCT_DEV_CORE_OK:
1284
1285                 /* No more instructions will be forwarded. */
1286                 atomic_set(&oct->status, OCT_DEV_IN_RESET);
1287
1288                 oct->app_mode = CVM_DRV_INVALID_APP;
1289                 dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
1290                         lio_get_state_string(&oct->status));
1291
1292                 schedule_timeout_uninterruptible(HZ / 10);
1293
1294                 /* fallthrough */
1295         case OCT_DEV_HOST_OK:
1296
1297                 /* fallthrough */
1298         case OCT_DEV_CONSOLE_INIT_DONE:
1299                 /* Remove any consoles */
1300                 octeon_remove_consoles(oct);
1301
1302                 /* fallthrough */
1303         case OCT_DEV_IO_QUEUES_DONE:
1304                 if (wait_for_pending_requests(oct))
1305                         dev_err(&oct->pci_dev->dev, "There were pending requests\n");
1306
1307                 if (lio_wait_for_instr_fetch(oct))
1308                         dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
1309
1310                 /* Disable the input and output queues now. No more packets will
1311                  * arrive from Octeon, but we should wait for all packet
1312                  * processing to finish.
1313                  */
1314                 oct->fn_list.disable_io_queues(oct);
1315
1316                 if (lio_wait_for_oq_pkts(oct))
1317                         dev_err(&oct->pci_dev->dev, "OQ had pending packets\n");
1318
1319         /* fallthrough */
1320         case OCT_DEV_INTR_SET_DONE:
1321                 /* Disable interrupts  */
1322                 oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
1323
1324                 if (oct->msix_on) {
1325                         msix_entries = (struct msix_entry *)oct->msix_entries;
1326                         for (i = 0; i < oct->num_msix_irqs - 1; i++) {
1327                                 if (oct->ioq_vector[i].vector) {
1328                                         /* clear the affinity_cpumask */
1329                                         irq_set_affinity_hint(
1330                                                         msix_entries[i].vector,
1331                                                         NULL);
1332                                         free_irq(msix_entries[i].vector,
1333                                                  &oct->ioq_vector[i]);
1334                                         oct->ioq_vector[i].vector = 0;
1335                                 }
1336                         }
1337                         /* non-iov vector's argument is oct struct */
1338                         free_irq(msix_entries[i].vector, oct);
1339
1340                         pci_disable_msix(oct->pci_dev);
1341                         kfree(oct->msix_entries);
1342                         oct->msix_entries = NULL;
1343                 } else {
1344                         /* Release the interrupt line */
1345                         free_irq(oct->pci_dev->irq, oct);
1346
1347                         if (oct->flags & LIO_FLAG_MSI_ENABLED)
1348                                 pci_disable_msi(oct->pci_dev);
1349                 }
1350
1351                 kfree(oct->irq_name_storage);
1352                 oct->irq_name_storage = NULL;
1353
1354         /* fallthrough */
1355         case OCT_DEV_MSIX_ALLOC_VECTOR_DONE:
1356                 if (OCTEON_CN23XX_PF(oct))
1357                         octeon_free_ioq_vector(oct);
1358
1359         /* fallthrough */
1360         case OCT_DEV_MBOX_SETUP_DONE:
1361                 if (OCTEON_CN23XX_PF(oct))
1362                         oct->fn_list.free_mbox(oct);
1363
1364         /* fallthrough */
1365         case OCT_DEV_IN_RESET:
1366         case OCT_DEV_DROQ_INIT_DONE:
1367                 /* Wait for any pending operations */
1368                 mdelay(100);
1369                 for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
1370                         if (!(oct->io_qmask.oq & BIT_ULL(i)))
1371                                 continue;
1372                         octeon_delete_droq(oct, i);
1373                 }
1374
1375                 /* Force any pending handshakes to complete */
1376                 for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
1377                         hs = &handshake[i];
1378
1379                         if (hs->pci_dev) {
1380                                 handshake[oct->octeon_id].init_ok = 0;
1381                                 complete(&handshake[oct->octeon_id].init);
1382                                 handshake[oct->octeon_id].started_ok = 0;
1383                                 complete(&handshake[oct->octeon_id].started);
1384                         }
1385                 }
1386
1387                 /* fallthrough */
1388         case OCT_DEV_RESP_LIST_INIT_DONE:
1389                 octeon_delete_response_list(oct);
1390
1391                 /* fallthrough */
1392         case OCT_DEV_INSTR_QUEUE_INIT_DONE:
1393                 for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
1394                         if (!(oct->io_qmask.iq & BIT_ULL(i)))
1395                                 continue;
1396                         octeon_delete_instr_queue(oct, i);
1397                 }
1398 #ifdef CONFIG_PCI_IOV
1399                 if (oct->sriov_info.sriov_enabled)
1400                         pci_disable_sriov(oct->pci_dev);
1401 #endif
1402                 /* fallthrough */
1403         case OCT_DEV_SC_BUFF_POOL_INIT_DONE:
1404                 octeon_free_sc_buffer_pool(oct);
1405
1406                 /* fallthrough */
1407         case OCT_DEV_DISPATCH_INIT_DONE:
1408                 octeon_delete_dispatch_list(oct);
1409                 cancel_delayed_work_sync(&oct->nic_poll_work.work);
1410
1411                 /* fallthrough */
1412         case OCT_DEV_PCI_MAP_DONE:
1413                 refcount = octeon_deregister_device(oct);
1414
1415                 /* Soft reset the octeon device before exiting.
1416                  * However, if fw was loaded from card (i.e. autoboot),
1417                  * perform an FLR instead.
1418                  * Implementation note: only soft-reset the device
1419                  * if it is a CN6XXX OR the LAST CN23XX device.
1420                  */
1421                 if (atomic_read(oct->adapter_fw_state) == FW_IS_PRELOADED)
1422                         octeon_pci_flr(oct);
1423                 else if (OCTEON_CN6XXX(oct) || !refcount)
1424                         oct->fn_list.soft_reset(oct);
1425
1426                 octeon_unmap_pci_barx(oct, 0);
1427                 octeon_unmap_pci_barx(oct, 1);
1428
1429                 /* fallthrough */
1430         case OCT_DEV_PCI_ENABLE_DONE:
1431                 pci_clear_master(oct->pci_dev);
1432                 /* Disable the device, releasing the PCI INT */
1433                 pci_disable_device(oct->pci_dev);
1434
1435                 /* fallthrough */
1436         case OCT_DEV_BEGIN_STATE:
1437                 /* Nothing to be done here either */
1438                 break;
1439         }                       /* end switch (oct->status) */
1440
1441         tasklet_kill(&oct_priv->droq_tasklet);
1442 }
1443
1444 /**
1445  * \brief Callback for rx ctrl
1446  * @param status status of request
1447  * @param buf pointer to resp structure
1448  */
1449 static void rx_ctl_callback(struct octeon_device *oct,
1450                             u32 status,
1451                             void *buf)
1452 {
1453         struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
1454         struct liquidio_rx_ctl_context *ctx;
1455
1456         ctx  = (struct liquidio_rx_ctl_context *)sc->ctxptr;
1457
1458         oct = lio_get_device(ctx->octeon_id);
1459         if (status)
1460                 dev_err(&oct->pci_dev->dev, "rx ctl instruction failed. Status: %llx\n",
1461                         CVM_CAST64(status));
1462         WRITE_ONCE(ctx->cond, 1);
1463
1464         /* This barrier is required to be sure that the response has been
1465          * written fully before waking up the handler
1466          */
1467         wmb();
1468
1469         wake_up_interruptible(&ctx->wc);
1470 }
1471
1472 /**
1473  * \brief Send Rx control command
1474  * @param lio per-network private data
1475  * @param start_stop whether to start or stop
1476  */
1477 static void send_rx_ctrl_cmd(struct lio *lio, int start_stop)
1478 {
1479         struct octeon_soft_command *sc;
1480         struct liquidio_rx_ctl_context *ctx;
1481         union octnet_cmd *ncmd;
1482         int ctx_size = sizeof(struct liquidio_rx_ctl_context);
1483         struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1484         int retval;
1485
1486         if (oct->props[lio->ifidx].rx_on == start_stop)
1487                 return;
1488
1489         sc = (struct octeon_soft_command *)
1490                 octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
1491                                           16, ctx_size);
1492
1493         ncmd = (union octnet_cmd *)sc->virtdptr;
1494         ctx  = (struct liquidio_rx_ctl_context *)sc->ctxptr;
1495
1496         WRITE_ONCE(ctx->cond, 0);
1497         ctx->octeon_id = lio_get_device_id(oct);
1498         init_waitqueue_head(&ctx->wc);
1499
1500         ncmd->u64 = 0;
1501         ncmd->s.cmd = OCTNET_CMD_RX_CTL;
1502         ncmd->s.param1 = start_stop;
1503
1504         octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1505
1506         sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1507
1508         octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1509                                     OPCODE_NIC_CMD, 0, 0, 0);
1510
1511         sc->callback = rx_ctl_callback;
1512         sc->callback_arg = sc;
1513         sc->wait_time = 5000;
1514
1515         retval = octeon_send_soft_command(oct, sc);
1516         if (retval == IQ_SEND_FAILED) {
1517                 netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
1518         } else {
1519                 /* Sleep on a wait queue till the cond flag indicates that the
1520                  * response arrived or timed-out.
1521                  */
1522                 if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR)
1523                         return;
1524                 oct->props[lio->ifidx].rx_on = start_stop;
1525         }
1526
1527         octeon_free_soft_command(oct, sc);
1528 }
1529
1530 /**
1531  * \brief Destroy NIC device interface
1532  * @param oct octeon device
1533  * @param ifidx which interface to destroy
1534  *
1535  * Cleanup associated with each interface for an Octeon device  when NIC
1536  * module is being unloaded or if initialization fails during load.
1537  */
1538 static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
1539 {
1540         struct net_device *netdev = oct->props[ifidx].netdev;
1541         struct lio *lio;
1542         struct napi_struct *napi, *n;
1543
1544         if (!netdev) {
1545                 dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n",
1546                         __func__, ifidx);
1547                 return;
1548         }
1549
1550         lio = GET_LIO(netdev);
1551
1552         dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n");
1553
1554         if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING)
1555                 liquidio_stop(netdev);
1556
1557         if (oct->props[lio->ifidx].napi_enabled == 1) {
1558                 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1559                         napi_disable(napi);
1560
1561                 oct->props[lio->ifidx].napi_enabled = 0;
1562
1563                 if (OCTEON_CN23XX_PF(oct))
1564                         oct->droq[0]->ops.poll_mode = 0;
1565         }
1566
1567         /* Delete NAPI */
1568         list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1569                 netif_napi_del(napi);
1570
1571         if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
1572                 unregister_netdev(netdev);
1573
1574         cleanup_sync_octeon_time_wq(netdev);
1575         cleanup_link_status_change_wq(netdev);
1576
1577         cleanup_rx_oom_poll_fn(netdev);
1578
1579         delete_glists(lio);
1580
1581         free_netdev(netdev);
1582
1583         oct->props[ifidx].gmxport = -1;
1584
1585         oct->props[ifidx].netdev = NULL;
1586 }
1587
1588 /**
1589  * \brief Stop complete NIC functionality
1590  * @param oct octeon device
1591  */
1592 static int liquidio_stop_nic_module(struct octeon_device *oct)
1593 {
1594         int i, j;
1595         struct lio *lio;
1596
1597         dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n");
1598         if (!oct->ifcount) {
1599                 dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n");
1600                 return 1;
1601         }
1602
1603         spin_lock_bh(&oct->cmd_resp_wqlock);
1604         oct->cmd_resp_state = OCT_DRV_OFFLINE;
1605         spin_unlock_bh(&oct->cmd_resp_wqlock);
1606
1607         lio_vf_rep_destroy(oct);
1608
1609         for (i = 0; i < oct->ifcount; i++) {
1610                 lio = GET_LIO(oct->props[i].netdev);
1611                 for (j = 0; j < oct->num_oqs; j++)
1612                         octeon_unregister_droq_ops(oct,
1613                                                    lio->linfo.rxpciq[j].s.q_no);
1614         }
1615
1616         for (i = 0; i < oct->ifcount; i++)
1617                 liquidio_destroy_nic_device(oct, i);
1618
1619         if (oct->devlink) {
1620                 devlink_unregister(oct->devlink);
1621                 devlink_free(oct->devlink);
1622                 oct->devlink = NULL;
1623         }
1624
1625         dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n");
1626         return 0;
1627 }
1628
1629 /**
1630  * \brief Cleans up resources at unload time
1631  * @param pdev PCI device structure
1632  */
1633 static void liquidio_remove(struct pci_dev *pdev)
1634 {
1635         struct octeon_device *oct_dev = pci_get_drvdata(pdev);
1636
1637         dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n");
1638
1639         if (oct_dev->watchdog_task)
1640                 kthread_stop(oct_dev->watchdog_task);
1641
1642         if (!oct_dev->octeon_id &&
1643             oct_dev->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP)
1644                 lio_vf_rep_modexit();
1645
1646         if (oct_dev->app_mode && (oct_dev->app_mode == CVM_DRV_NIC_APP))
1647                 liquidio_stop_nic_module(oct_dev);
1648
1649         /* Reset the octeon device and cleanup all memory allocated for
1650          * the octeon device by driver.
1651          */
1652         octeon_destroy_resources(oct_dev);
1653
1654         dev_info(&oct_dev->pci_dev->dev, "Device removed\n");
1655
1656         /* This octeon device has been removed. Update the global
1657          * data structure to reflect this. Free the device structure.
1658          */
1659         octeon_free_device_mem(oct_dev);
1660 }
1661
1662 /**
1663  * \brief Identify the Octeon device and to map the BAR address space
1664  * @param oct octeon device
1665  */
1666 static int octeon_chip_specific_setup(struct octeon_device *oct)
1667 {
1668         u32 dev_id, rev_id;
1669         int ret = 1;
1670         char *s;
1671
1672         pci_read_config_dword(oct->pci_dev, 0, &dev_id);
1673         pci_read_config_dword(oct->pci_dev, 8, &rev_id);
1674         oct->rev_id = rev_id & 0xff;
1675
1676         switch (dev_id) {
1677         case OCTEON_CN68XX_PCIID:
1678                 oct->chip_id = OCTEON_CN68XX;
1679                 ret = lio_setup_cn68xx_octeon_device(oct);
1680                 s = "CN68XX";
1681                 break;
1682
1683         case OCTEON_CN66XX_PCIID:
1684                 oct->chip_id = OCTEON_CN66XX;
1685                 ret = lio_setup_cn66xx_octeon_device(oct);
1686                 s = "CN66XX";
1687                 break;
1688
1689         case OCTEON_CN23XX_PCIID_PF:
1690                 oct->chip_id = OCTEON_CN23XX_PF_VID;
1691                 ret = setup_cn23xx_octeon_pf_device(oct);
1692                 if (ret)
1693                         break;
1694 #ifdef CONFIG_PCI_IOV
1695                 if (!ret)
1696                         pci_sriov_set_totalvfs(oct->pci_dev,
1697                                                oct->sriov_info.max_vfs);
1698 #endif
1699                 s = "CN23XX";
1700                 break;
1701
1702         default:
1703                 s = "?";
1704                 dev_err(&oct->pci_dev->dev, "Unknown device found (dev_id: %x)\n",
1705                         dev_id);
1706         }
1707
1708         if (!ret)
1709                 dev_info(&oct->pci_dev->dev, "%s PASS%d.%d %s Version: %s\n", s,
1710                          OCTEON_MAJOR_REV(oct),
1711                          OCTEON_MINOR_REV(oct),
1712                          octeon_get_conf(oct)->card_name,
1713                          LIQUIDIO_VERSION);
1714
1715         return ret;
1716 }
1717
1718 /**
1719  * \brief PCI initialization for each Octeon device.
1720  * @param oct octeon device
1721  */
1722 static int octeon_pci_os_setup(struct octeon_device *oct)
1723 {
1724         /* setup PCI stuff first */
1725         if (pci_enable_device(oct->pci_dev)) {
1726                 dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n");
1727                 return 1;
1728         }
1729
1730         if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) {
1731                 dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n");
1732                 pci_disable_device(oct->pci_dev);
1733                 return 1;
1734         }
1735
1736         /* Enable PCI DMA Master. */
1737         pci_set_master(oct->pci_dev);
1738
1739         return 0;
1740 }
1741
1742 static inline int skb_iq(struct lio *lio, struct sk_buff *skb)
1743 {
1744         int q = 0;
1745
1746         if (netif_is_multiqueue(lio->netdev))
1747                 q = skb->queue_mapping % lio->linfo.num_txpciq;
1748
1749         return q;
1750 }
1751
1752 /**
1753  * \brief Check Tx queue state for a given network buffer
1754  * @param lio per-network private data
1755  * @param skb network buffer
1756  */
1757 static inline int check_txq_state(struct lio *lio, struct sk_buff *skb)
1758 {
1759         int q = 0, iq = 0;
1760
1761         if (netif_is_multiqueue(lio->netdev)) {
1762                 q = skb->queue_mapping;
1763                 iq = lio->linfo.txpciq[(q % lio->oct_dev->num_iqs)].s.q_no;
1764         } else {
1765                 iq = lio->txq;
1766                 q = iq;
1767         }
1768
1769         if (octnet_iq_is_full(lio->oct_dev, iq))
1770                 return 0;
1771
1772         if (__netif_subqueue_stopped(lio->netdev, q)) {
1773                 INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq, tx_restart, 1);
1774                 wake_q(lio->netdev, q);
1775         }
1776         return 1;
1777 }
1778
1779 /**
1780  * \brief Unmap and free network buffer
1781  * @param buf buffer
1782  */
1783 static void free_netbuf(void *buf)
1784 {
1785         struct sk_buff *skb;
1786         struct octnet_buf_free_info *finfo;
1787         struct lio *lio;
1788
1789         finfo = (struct octnet_buf_free_info *)buf;
1790         skb = finfo->skb;
1791         lio = finfo->lio;
1792
1793         dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len,
1794                          DMA_TO_DEVICE);
1795
1796         check_txq_state(lio, skb);
1797
1798         tx_buffer_free(skb);
1799 }
1800
1801 /**
1802  * \brief Unmap and free gather buffer
1803  * @param buf buffer
1804  */
1805 static void free_netsgbuf(void *buf)
1806 {
1807         struct octnet_buf_free_info *finfo;
1808         struct sk_buff *skb;
1809         struct lio *lio;
1810         struct octnic_gather *g;
1811         int i, frags, iq;
1812
1813         finfo = (struct octnet_buf_free_info *)buf;
1814         skb = finfo->skb;
1815         lio = finfo->lio;
1816         g = finfo->g;
1817         frags = skb_shinfo(skb)->nr_frags;
1818
1819         dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1820                          g->sg[0].ptr[0], (skb->len - skb->data_len),
1821                          DMA_TO_DEVICE);
1822
1823         i = 1;
1824         while (frags--) {
1825                 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1826
1827                 pci_unmap_page((lio->oct_dev)->pci_dev,
1828                                g->sg[(i >> 2)].ptr[(i & 3)],
1829                                frag->size, DMA_TO_DEVICE);
1830                 i++;
1831         }
1832
1833         iq = skb_iq(lio, skb);
1834         spin_lock(&lio->glist_lock[iq]);
1835         list_add_tail(&g->list, &lio->glist[iq]);
1836         spin_unlock(&lio->glist_lock[iq]);
1837
1838         check_txq_state(lio, skb);     /* mq support: sub-queue state check */
1839
1840         tx_buffer_free(skb);
1841 }
1842
1843 /**
1844  * \brief Unmap and free gather buffer with response
1845  * @param buf buffer
1846  */
1847 static void free_netsgbuf_with_resp(void *buf)
1848 {
1849         struct octeon_soft_command *sc;
1850         struct octnet_buf_free_info *finfo;
1851         struct sk_buff *skb;
1852         struct lio *lio;
1853         struct octnic_gather *g;
1854         int i, frags, iq;
1855
1856         sc = (struct octeon_soft_command *)buf;
1857         skb = (struct sk_buff *)sc->callback_arg;
1858         finfo = (struct octnet_buf_free_info *)&skb->cb;
1859
1860         lio = finfo->lio;
1861         g = finfo->g;
1862         frags = skb_shinfo(skb)->nr_frags;
1863
1864         dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1865                          g->sg[0].ptr[0], (skb->len - skb->data_len),
1866                          DMA_TO_DEVICE);
1867
1868         i = 1;
1869         while (frags--) {
1870                 struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i - 1];
1871
1872                 pci_unmap_page((lio->oct_dev)->pci_dev,
1873                                g->sg[(i >> 2)].ptr[(i & 3)],
1874                                frag->size, DMA_TO_DEVICE);
1875                 i++;
1876         }
1877
1878         iq = skb_iq(lio, skb);
1879
1880         spin_lock(&lio->glist_lock[iq]);
1881         list_add_tail(&g->list, &lio->glist[iq]);
1882         spin_unlock(&lio->glist_lock[iq]);
1883
1884         /* Don't free the skb yet */
1885
1886         check_txq_state(lio, skb);
1887 }
1888
1889 /**
1890  * \brief Adjust ptp frequency
1891  * @param ptp PTP clock info
1892  * @param ppb how much to adjust by, in parts-per-billion
1893  */
1894 static int liquidio_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
1895 {
1896         struct lio *lio = container_of(ptp, struct lio, ptp_info);
1897         struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1898         u64 comp, delta;
1899         unsigned long flags;
1900         bool neg_adj = false;
1901
1902         if (ppb < 0) {
1903                 neg_adj = true;
1904                 ppb = -ppb;
1905         }
1906
1907         /* The hardware adds the clock compensation value to the
1908          * PTP clock on every coprocessor clock cycle, so we
1909          * compute the delta in terms of coprocessor clocks.
1910          */
1911         delta = (u64)ppb << 32;
1912         do_div(delta, oct->coproc_clock_rate);
1913
1914         spin_lock_irqsave(&lio->ptp_lock, flags);
1915         comp = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_COMP);
1916         if (neg_adj)
1917                 comp -= delta;
1918         else
1919                 comp += delta;
1920         lio_pci_writeq(oct, comp, CN6XXX_MIO_PTP_CLOCK_COMP);
1921         spin_unlock_irqrestore(&lio->ptp_lock, flags);
1922
1923         return 0;
1924 }
1925
1926 /**
1927  * \brief Adjust ptp time
1928  * @param ptp PTP clock info
1929  * @param delta how much to adjust by, in nanosecs
1930  */
1931 static int liquidio_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
1932 {
1933         unsigned long flags;
1934         struct lio *lio = container_of(ptp, struct lio, ptp_info);
1935
1936         spin_lock_irqsave(&lio->ptp_lock, flags);
1937         lio->ptp_adjust += delta;
1938         spin_unlock_irqrestore(&lio->ptp_lock, flags);
1939
1940         return 0;
1941 }
1942
1943 /**
1944  * \brief Get hardware clock time, including any adjustment
1945  * @param ptp PTP clock info
1946  * @param ts timespec
1947  */
1948 static int liquidio_ptp_gettime(struct ptp_clock_info *ptp,
1949                                 struct timespec64 *ts)
1950 {
1951         u64 ns;
1952         unsigned long flags;
1953         struct lio *lio = container_of(ptp, struct lio, ptp_info);
1954         struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1955
1956         spin_lock_irqsave(&lio->ptp_lock, flags);
1957         ns = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_HI);
1958         ns += lio->ptp_adjust;
1959         spin_unlock_irqrestore(&lio->ptp_lock, flags);
1960
1961         *ts = ns_to_timespec64(ns);
1962
1963         return 0;
1964 }
1965
1966 /**
1967  * \brief Set hardware clock time. Reset adjustment
1968  * @param ptp PTP clock info
1969  * @param ts timespec
1970  */
1971 static int liquidio_ptp_settime(struct ptp_clock_info *ptp,
1972                                 const struct timespec64 *ts)
1973 {
1974         u64 ns;
1975         unsigned long flags;
1976         struct lio *lio = container_of(ptp, struct lio, ptp_info);
1977         struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1978
1979         ns = timespec64_to_ns(ts);
1980
1981         spin_lock_irqsave(&lio->ptp_lock, flags);
1982         lio_pci_writeq(oct, ns, CN6XXX_MIO_PTP_CLOCK_HI);
1983         lio->ptp_adjust = 0;
1984         spin_unlock_irqrestore(&lio->ptp_lock, flags);
1985
1986         return 0;
1987 }
1988
1989 /**
1990  * \brief Check if PTP is enabled
1991  * @param ptp PTP clock info
1992  * @param rq request
1993  * @param on is it on
1994  */
1995 static int
1996 liquidio_ptp_enable(struct ptp_clock_info *ptp __attribute__((unused)),
1997                     struct ptp_clock_request *rq __attribute__((unused)),
1998                     int on __attribute__((unused)))
1999 {
2000         return -EOPNOTSUPP;
2001 }
2002
2003 /**
2004  * \brief Open PTP clock source
2005  * @param netdev network device
2006  */
2007 static void oct_ptp_open(struct net_device *netdev)
2008 {
2009         struct lio *lio = GET_LIO(netdev);
2010         struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
2011
2012         spin_lock_init(&lio->ptp_lock);
2013
2014         snprintf(lio->ptp_info.name, 16, "%s", netdev->name);
2015         lio->ptp_info.owner = THIS_MODULE;
2016         lio->ptp_info.max_adj = 250000000;
2017         lio->ptp_info.n_alarm = 0;
2018         lio->ptp_info.n_ext_ts = 0;
2019         lio->ptp_info.n_per_out = 0;
2020         lio->ptp_info.pps = 0;
2021         lio->ptp_info.adjfreq = liquidio_ptp_adjfreq;
2022         lio->ptp_info.adjtime = liquidio_ptp_adjtime;
2023         lio->ptp_info.gettime64 = liquidio_ptp_gettime;
2024         lio->ptp_info.settime64 = liquidio_ptp_settime;
2025         lio->ptp_info.enable = liquidio_ptp_enable;
2026
2027         lio->ptp_adjust = 0;
2028
2029         lio->ptp_clock = ptp_clock_register(&lio->ptp_info,
2030                                              &oct->pci_dev->dev);
2031
2032         if (IS_ERR(lio->ptp_clock))
2033                 lio->ptp_clock = NULL;
2034 }
2035
2036 /**
2037  * \brief Init PTP clock
2038  * @param oct octeon device
2039  */
2040 static void liquidio_ptp_init(struct octeon_device *oct)
2041 {
2042         u64 clock_comp, cfg;
2043
2044         clock_comp = (u64)NSEC_PER_SEC << 32;
2045         do_div(clock_comp, oct->coproc_clock_rate);
2046         lio_pci_writeq(oct, clock_comp, CN6XXX_MIO_PTP_CLOCK_COMP);
2047
2048         /* Enable */
2049         cfg = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_CFG);
2050         lio_pci_writeq(oct, cfg | 0x01, CN6XXX_MIO_PTP_CLOCK_CFG);
2051 }
2052
2053 /**
2054  * \brief Load firmware to device
2055  * @param oct octeon device
2056  *
2057  * Maps device to firmware filename, requests firmware, and downloads it
2058  */
2059 static int load_firmware(struct octeon_device *oct)
2060 {
2061         int ret = 0;
2062         const struct firmware *fw;
2063         char fw_name[LIO_MAX_FW_FILENAME_LEN];
2064         char *tmp_fw_type;
2065
2066         if (fw_type_is_auto()) {
2067                 tmp_fw_type = LIO_FW_NAME_TYPE_NIC;
2068                 strncpy(fw_type, tmp_fw_type, sizeof(fw_type));
2069         } else {
2070                 tmp_fw_type = fw_type;
2071         }
2072
2073         sprintf(fw_name, "%s%s%s_%s%s", LIO_FW_DIR, LIO_FW_BASE_NAME,
2074                 octeon_get_conf(oct)->card_name, tmp_fw_type,
2075                 LIO_FW_NAME_SUFFIX);
2076
2077         ret = request_firmware(&fw, fw_name, &oct->pci_dev->dev);
2078         if (ret) {
2079                 dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n.",
2080                         fw_name);
2081                 release_firmware(fw);
2082                 return ret;
2083         }
2084
2085         ret = octeon_download_firmware(oct, fw->data, fw->size);
2086
2087         release_firmware(fw);
2088
2089         return ret;
2090 }
2091
2092 /**
2093  * \brief Callback for getting interface configuration
2094  * @param status status of request
2095  * @param buf pointer to resp structure
2096  */
2097 static void if_cfg_callback(struct octeon_device *oct,
2098                             u32 status __attribute__((unused)),
2099                             void *buf)
2100 {
2101         struct octeon_soft_command *sc = (struct octeon_soft_command *)buf;
2102         struct liquidio_if_cfg_resp *resp;
2103         struct liquidio_if_cfg_context *ctx;
2104
2105         resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
2106         ctx = (struct liquidio_if_cfg_context *)sc->ctxptr;
2107
2108         oct = lio_get_device(ctx->octeon_id);
2109         if (resp->status)
2110                 dev_err(&oct->pci_dev->dev, "nic if cfg instruction failed. Status: 0x%llx (0x%08x)\n",
2111                         CVM_CAST64(resp->status), status);
2112         WRITE_ONCE(ctx->cond, 1);
2113
2114         snprintf(oct->fw_info.liquidio_firmware_version, 32, "%s",
2115                  resp->cfg_info.liquidio_firmware_version);
2116
2117         /* This barrier is required to be sure that the response has been
2118          * written fully before waking up the handler
2119          */
2120         wmb();
2121
2122         wake_up_interruptible(&ctx->wc);
2123 }
2124
2125 /**
2126  * \brief Poll routine for checking transmit queue status
2127  * @param work work_struct data structure
2128  */
2129 static void octnet_poll_check_txq_status(struct work_struct *work)
2130 {
2131         struct cavium_wk *wk = (struct cavium_wk *)work;
2132         struct lio *lio = (struct lio *)wk->ctxptr;
2133
2134         if (!ifstate_check(lio, LIO_IFSTATE_RUNNING))
2135                 return;
2136
2137         check_txq_status(lio);
2138         queue_delayed_work(lio->txq_status_wq.wq,
2139                            &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
2140 }
2141
2142 /**
2143  * \brief Sets up the txq poll check
2144  * @param netdev network device
2145  */
2146 static inline int setup_tx_poll_fn(struct net_device *netdev)
2147 {
2148         struct lio *lio = GET_LIO(netdev);
2149         struct octeon_device *oct = lio->oct_dev;
2150
2151         lio->txq_status_wq.wq = alloc_workqueue("txq-status",
2152                                                 WQ_MEM_RECLAIM, 0);
2153         if (!lio->txq_status_wq.wq) {
2154                 dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n");
2155                 return -1;
2156         }
2157         INIT_DELAYED_WORK(&lio->txq_status_wq.wk.work,
2158                           octnet_poll_check_txq_status);
2159         lio->txq_status_wq.wk.ctxptr = lio;
2160         queue_delayed_work(lio->txq_status_wq.wq,
2161                            &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
2162         return 0;
2163 }
2164
2165 static inline void cleanup_tx_poll_fn(struct net_device *netdev)
2166 {
2167         struct lio *lio = GET_LIO(netdev);
2168
2169         if (lio->txq_status_wq.wq) {
2170                 cancel_delayed_work_sync(&lio->txq_status_wq.wk.work);
2171                 destroy_workqueue(lio->txq_status_wq.wq);
2172         }
2173 }
2174
2175 /**
2176  * \brief Net device open for LiquidIO
2177  * @param netdev network device
2178  */
2179 static int liquidio_open(struct net_device *netdev)
2180 {
2181         struct lio *lio = GET_LIO(netdev);
2182         struct octeon_device *oct = lio->oct_dev;
2183         struct napi_struct *napi, *n;
2184
2185         if (oct->props[lio->ifidx].napi_enabled == 0) {
2186                 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
2187                         napi_enable(napi);
2188
2189                 oct->props[lio->ifidx].napi_enabled = 1;
2190
2191                 if (OCTEON_CN23XX_PF(oct))
2192                         oct->droq[0]->ops.poll_mode = 1;
2193         }
2194
2195         if (oct->ptp_enable)
2196                 oct_ptp_open(netdev);
2197
2198         ifstate_set(lio, LIO_IFSTATE_RUNNING);
2199
2200         /* Ready for link status updates */
2201         lio->intf_open = 1;
2202
2203         netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
2204
2205         if (OCTEON_CN23XX_PF(oct)) {
2206                 if (!oct->msix_on)
2207                         if (setup_tx_poll_fn(netdev))
2208                                 return -1;
2209         } else {
2210                 if (setup_tx_poll_fn(netdev))
2211                         return -1;
2212         }
2213
2214         start_txq(netdev);
2215
2216         /* tell Octeon to start forwarding packets to host */
2217         send_rx_ctrl_cmd(lio, 1);
2218
2219         dev_info(&oct->pci_dev->dev, "%s interface is opened\n",
2220                  netdev->name);
2221
2222         return 0;
2223 }
2224
2225 /**
2226  * \brief Net device stop for LiquidIO
2227  * @param netdev network device
2228  */
2229 static int liquidio_stop(struct net_device *netdev)
2230 {
2231         struct lio *lio = GET_LIO(netdev);
2232         struct octeon_device *oct = lio->oct_dev;
2233         struct napi_struct *napi, *n;
2234
2235         if (oct->props[lio->ifidx].napi_enabled) {
2236                 list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
2237                         napi_disable(napi);
2238
2239                 oct->props[lio->ifidx].napi_enabled = 0;
2240
2241                 if (OCTEON_CN23XX_PF(oct))
2242                         oct->droq[0]->ops.poll_mode = 0;
2243         }
2244
2245         ifstate_reset(lio, LIO_IFSTATE_RUNNING);
2246
2247         netif_tx_disable(netdev);
2248
2249         /* Inform that netif carrier is down */
2250         netif_carrier_off(netdev);
2251         lio->intf_open = 0;
2252         lio->linfo.link.s.link_up = 0;
2253         lio->link_changes++;
2254
2255         /* Tell Octeon that nic interface is down. */
2256         send_rx_ctrl_cmd(lio, 0);
2257
2258         if (OCTEON_CN23XX_PF(oct)) {
2259                 if (!oct->msix_on)
2260                         cleanup_tx_poll_fn(netdev);
2261         } else {
2262                 cleanup_tx_poll_fn(netdev);
2263         }
2264
2265         if (lio->ptp_clock) {
2266                 ptp_clock_unregister(lio->ptp_clock);
2267                 lio->ptp_clock = NULL;
2268         }
2269
2270         dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
2271
2272         return 0;
2273 }
2274
2275 /**
2276  * \brief Converts a mask based on net device flags
2277  * @param netdev network device
2278  *
2279  * This routine generates a octnet_ifflags mask from the net device flags
2280  * received from the OS.
2281  */
2282 static inline enum octnet_ifflags get_new_flags(struct net_device *netdev)
2283 {
2284         enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST;
2285
2286         if (netdev->flags & IFF_PROMISC)
2287                 f |= OCTNET_IFFLAG_PROMISC;
2288
2289         if (netdev->flags & IFF_ALLMULTI)
2290                 f |= OCTNET_IFFLAG_ALLMULTI;
2291
2292         if (netdev->flags & IFF_MULTICAST) {
2293                 f |= OCTNET_IFFLAG_MULTICAST;
2294
2295                 /* Accept all multicast addresses if there are more than we
2296                  * can handle
2297                  */
2298                 if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR)
2299                         f |= OCTNET_IFFLAG_ALLMULTI;
2300         }
2301
2302         if (netdev->flags & IFF_BROADCAST)
2303                 f |= OCTNET_IFFLAG_BROADCAST;
2304
2305         return f;
2306 }
2307
2308 /**
2309  * \brief Net device set_multicast_list
2310  * @param netdev network device
2311  */
2312 static void liquidio_set_mcast_list(struct net_device *netdev)
2313 {
2314         struct lio *lio = GET_LIO(netdev);
2315         struct octeon_device *oct = lio->oct_dev;
2316         struct octnic_ctrl_pkt nctrl;
2317         struct netdev_hw_addr *ha;
2318         u64 *mc;
2319         int ret;
2320         int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR);
2321
2322         memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2323
2324         /* Create a ctrl pkt command to be sent to core app. */
2325         nctrl.ncmd.u64 = 0;
2326         nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST;
2327         nctrl.ncmd.s.param1 = get_new_flags(netdev);
2328         nctrl.ncmd.s.param2 = mc_count;
2329         nctrl.ncmd.s.more = mc_count;
2330         nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2331         nctrl.netpndev = (u64)netdev;
2332         nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2333
2334         /* copy all the addresses into the udd */
2335         mc = &nctrl.udd[0];
2336         netdev_for_each_mc_addr(ha, netdev) {
2337                 *mc = 0;
2338                 memcpy(((u8 *)mc) + 2, ha->addr, ETH_ALEN);
2339                 /* no need to swap bytes */
2340
2341                 if (++mc > &nctrl.udd[mc_count])
2342                         break;
2343         }
2344
2345         /* Apparently, any activity in this call from the kernel has to
2346          * be atomic. So we won't wait for response.
2347          */
2348         nctrl.wait_time = 0;
2349
2350         ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2351         if (ret < 0) {
2352                 dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n",
2353                         ret);
2354         }
2355 }
2356
2357 /**
2358  * \brief Net device set_mac_address
2359  * @param netdev network device
2360  */
2361 static int liquidio_set_mac(struct net_device *netdev, void *p)
2362 {
2363         int ret = 0;
2364         struct lio *lio = GET_LIO(netdev);
2365         struct octeon_device *oct = lio->oct_dev;
2366         struct sockaddr *addr = (struct sockaddr *)p;
2367         struct octnic_ctrl_pkt nctrl;
2368
2369         if (!is_valid_ether_addr(addr->sa_data))
2370                 return -EADDRNOTAVAIL;
2371
2372         memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2373
2374         nctrl.ncmd.u64 = 0;
2375         nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
2376         nctrl.ncmd.s.param1 = 0;
2377         nctrl.ncmd.s.more = 1;
2378         nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2379         nctrl.netpndev = (u64)netdev;
2380         nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2381         nctrl.wait_time = 100;
2382
2383         nctrl.udd[0] = 0;
2384         /* The MAC Address is presented in network byte order. */
2385         memcpy((u8 *)&nctrl.udd[0] + 2, addr->sa_data, ETH_ALEN);
2386
2387         ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2388         if (ret < 0) {
2389                 dev_err(&oct->pci_dev->dev, "MAC Address change failed\n");
2390                 return -ENOMEM;
2391         }
2392         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2393         memcpy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data, ETH_ALEN);
2394
2395         return 0;
2396 }
2397
2398 /**
2399  * \brief Net device get_stats
2400  * @param netdev network device
2401  */
2402 static struct net_device_stats *liquidio_get_stats(struct net_device *netdev)
2403 {
2404         struct lio *lio = GET_LIO(netdev);
2405         struct net_device_stats *stats = &netdev->stats;
2406         struct octeon_device *oct;
2407         u64 pkts = 0, drop = 0, bytes = 0;
2408         struct oct_droq_stats *oq_stats;
2409         struct oct_iq_stats *iq_stats;
2410         int i, iq_no, oq_no;
2411
2412         oct = lio->oct_dev;
2413
2414         if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
2415                 return stats;
2416
2417         for (i = 0; i < oct->num_iqs; i++) {
2418                 iq_no = lio->linfo.txpciq[i].s.q_no;
2419                 iq_stats = &oct->instr_queue[iq_no]->stats;
2420                 pkts += iq_stats->tx_done;
2421                 drop += iq_stats->tx_dropped;
2422                 bytes += iq_stats->tx_tot_bytes;
2423         }
2424
2425         stats->tx_packets = pkts;
2426         stats->tx_bytes = bytes;
2427         stats->tx_dropped = drop;
2428
2429         pkts = 0;
2430         drop = 0;
2431         bytes = 0;
2432
2433         for (i = 0; i < oct->num_oqs; i++) {
2434                 oq_no = lio->linfo.rxpciq[i].s.q_no;
2435                 oq_stats = &oct->droq[oq_no]->stats;
2436                 pkts += oq_stats->rx_pkts_received;
2437                 drop += (oq_stats->rx_dropped +
2438                          oq_stats->dropped_nodispatch +
2439                          oq_stats->dropped_toomany +
2440                          oq_stats->dropped_nomem);
2441                 bytes += oq_stats->rx_bytes_received;
2442         }
2443
2444         stats->rx_bytes = bytes;
2445         stats->rx_packets = pkts;
2446         stats->rx_dropped = drop;
2447
2448         return stats;
2449 }
2450
2451 /**
2452  * \brief Net device change_mtu
2453  * @param netdev network device
2454  */
2455 static int liquidio_change_mtu(struct net_device *netdev, int new_mtu)
2456 {
2457         struct lio *lio = GET_LIO(netdev);
2458         struct octeon_device *oct = lio->oct_dev;
2459         struct octnic_ctrl_pkt nctrl;
2460         int ret = 0;
2461
2462         memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2463
2464         nctrl.ncmd.u64 = 0;
2465         nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MTU;
2466         nctrl.ncmd.s.param1 = new_mtu;
2467         nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2468         nctrl.wait_time = 100;
2469         nctrl.netpndev = (u64)netdev;
2470         nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2471
2472         ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2473         if (ret < 0) {
2474                 dev_err(&oct->pci_dev->dev, "Failed to set MTU\n");
2475                 return -1;
2476         }
2477
2478         lio->mtu = new_mtu;
2479
2480         return 0;
2481 }
2482
2483 /**
2484  * \brief Handler for SIOCSHWTSTAMP ioctl
2485  * @param netdev network device
2486  * @param ifr interface request
2487  * @param cmd command
2488  */
2489 static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
2490 {
2491         struct hwtstamp_config conf;
2492         struct lio *lio = GET_LIO(netdev);
2493
2494         if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf)))
2495                 return -EFAULT;
2496
2497         if (conf.flags)
2498                 return -EINVAL;
2499
2500         switch (conf.tx_type) {
2501         case HWTSTAMP_TX_ON:
2502         case HWTSTAMP_TX_OFF:
2503                 break;
2504         default:
2505                 return -ERANGE;
2506         }
2507
2508         switch (conf.rx_filter) {
2509         case HWTSTAMP_FILTER_NONE:
2510                 break;
2511         case HWTSTAMP_FILTER_ALL:
2512         case HWTSTAMP_FILTER_SOME:
2513         case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2514         case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2515         case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2516         case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2517         case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2518         case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2519         case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2520         case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2521         case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2522         case HWTSTAMP_FILTER_PTP_V2_EVENT:
2523         case HWTSTAMP_FILTER_PTP_V2_SYNC:
2524         case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2525         case HWTSTAMP_FILTER_NTP_ALL:
2526                 conf.rx_filter = HWTSTAMP_FILTER_ALL;
2527                 break;
2528         default:
2529                 return -ERANGE;
2530         }
2531
2532         if (conf.rx_filter == HWTSTAMP_FILTER_ALL)
2533                 ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
2534
2535         else
2536                 ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
2537
2538         return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0;
2539 }
2540
2541 /**
2542  * \brief ioctl handler
2543  * @param netdev network device
2544  * @param ifr interface request
2545  * @param cmd command
2546  */
2547 static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2548 {
2549         struct lio *lio = GET_LIO(netdev);
2550
2551         switch (cmd) {
2552         case SIOCSHWTSTAMP:
2553                 if (lio->oct_dev->ptp_enable)
2554                         return hwtstamp_ioctl(netdev, ifr);
2555         default:
2556                 return -EOPNOTSUPP;
2557         }
2558 }
2559
2560 /**
2561  * \brief handle a Tx timestamp response
2562  * @param status response status
2563  * @param buf pointer to skb
2564  */
2565 static void handle_timestamp(struct octeon_device *oct,
2566                              u32 status,
2567                              void *buf)
2568 {
2569         struct octnet_buf_free_info *finfo;
2570         struct octeon_soft_command *sc;
2571         struct oct_timestamp_resp *resp;
2572         struct lio *lio;
2573         struct sk_buff *skb = (struct sk_buff *)buf;
2574
2575         finfo = (struct octnet_buf_free_info *)skb->cb;
2576         lio = finfo->lio;
2577         sc = finfo->sc;
2578         oct = lio->oct_dev;
2579         resp = (struct oct_timestamp_resp *)sc->virtrptr;
2580
2581         if (status != OCTEON_REQUEST_DONE) {
2582                 dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n",
2583                         CVM_CAST64(status));
2584                 resp->timestamp = 0;
2585         }
2586
2587         octeon_swap_8B_data(&resp->timestamp, 1);
2588
2589         if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) != 0)) {
2590                 struct skb_shared_hwtstamps ts;
2591                 u64 ns = resp->timestamp;
2592
2593                 netif_info(lio, tx_done, lio->netdev,
2594                            "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n",
2595                            skb, (unsigned long long)ns);
2596                 ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust);
2597                 skb_tstamp_tx(skb, &ts);
2598         }
2599
2600         octeon_free_soft_command(oct, sc);
2601         tx_buffer_free(skb);
2602 }
2603
2604 /* \brief Send a data packet that will be timestamped
2605  * @param oct octeon device
2606  * @param ndata pointer to network data
2607  * @param finfo pointer to private network data
2608  */
2609 static inline int send_nic_timestamp_pkt(struct octeon_device *oct,
2610                                          struct octnic_data_pkt *ndata,
2611                                          struct octnet_buf_free_info *finfo,
2612                                          int xmit_more)
2613 {
2614         int retval;
2615         struct octeon_soft_command *sc;
2616         struct lio *lio;
2617         int ring_doorbell;
2618         u32 len;
2619
2620         lio = finfo->lio;
2621
2622         sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd,
2623                                             sizeof(struct oct_timestamp_resp));
2624         finfo->sc = sc;
2625
2626         if (!sc) {
2627                 dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n");
2628                 return IQ_SEND_FAILED;
2629         }
2630
2631         if (ndata->reqtype == REQTYPE_NORESP_NET)
2632                 ndata->reqtype = REQTYPE_RESP_NET;
2633         else if (ndata->reqtype == REQTYPE_NORESP_NET_SG)
2634                 ndata->reqtype = REQTYPE_RESP_NET_SG;
2635
2636         sc->callback = handle_timestamp;
2637         sc->callback_arg = finfo->skb;
2638         sc->iq_no = ndata->q_no;
2639
2640         if (OCTEON_CN23XX_PF(oct))
2641                 len = (u32)((struct octeon_instr_ih3 *)
2642                             (&sc->cmd.cmd3.ih3))->dlengsz;
2643         else
2644                 len = (u32)((struct octeon_instr_ih2 *)
2645                             (&sc->cmd.cmd2.ih2))->dlengsz;
2646
2647         ring_doorbell = !xmit_more;
2648
2649         retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd,
2650                                      sc, len, ndata->reqtype);
2651
2652         if (retval == IQ_SEND_FAILED) {
2653                 dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n",
2654                         retval);
2655                 octeon_free_soft_command(oct, sc);
2656         } else {
2657                 netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n");
2658         }
2659
2660         return retval;
2661 }
2662
2663 /** \brief Transmit networks packets to the Octeon interface
2664  * @param skbuff   skbuff struct to be passed to network layer.
2665  * @param netdev    pointer to network device
2666  * @returns whether the packet was transmitted to the device okay or not
2667  *             (NETDEV_TX_OK or NETDEV_TX_BUSY)
2668  */
2669 static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
2670 {
2671         struct lio *lio;
2672         struct octnet_buf_free_info *finfo;
2673         union octnic_cmd_setup cmdsetup;
2674         struct octnic_data_pkt ndata;
2675         struct octeon_device *oct;
2676         struct oct_iq_stats *stats;
2677         struct octeon_instr_irh *irh;
2678         union tx_info *tx_info;
2679         int status = 0;
2680         int q_idx = 0, iq_no = 0;
2681         int j, xmit_more = 0;
2682         u64 dptr = 0;
2683         u32 tag = 0;
2684
2685         lio = GET_LIO(netdev);
2686         oct = lio->oct_dev;
2687
2688         if (netif_is_multiqueue(netdev)) {
2689                 q_idx = skb->queue_mapping;
2690                 q_idx = (q_idx % (lio->linfo.num_txpciq));
2691                 tag = q_idx;
2692                 iq_no = lio->linfo.txpciq[q_idx].s.q_no;
2693         } else {
2694                 iq_no = lio->txq;
2695         }
2696
2697         stats = &oct->instr_queue[iq_no]->stats;
2698
2699         /* Check for all conditions in which the current packet cannot be
2700          * transmitted.
2701          */
2702         if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) ||
2703             (!lio->linfo.link.s.link_up) ||
2704             (skb->len <= 0)) {
2705                 netif_info(lio, tx_err, lio->netdev,
2706                            "Transmit failed link_status : %d\n",
2707                            lio->linfo.link.s.link_up);
2708                 goto lio_xmit_failed;
2709         }
2710
2711         /* Use space in skb->cb to store info used to unmap and
2712          * free the buffers.
2713          */
2714         finfo = (struct octnet_buf_free_info *)skb->cb;
2715         finfo->lio = lio;
2716         finfo->skb = skb;
2717         finfo->sc = NULL;
2718
2719         /* Prepare the attributes for the data to be passed to OSI. */
2720         memset(&ndata, 0, sizeof(struct octnic_data_pkt));
2721
2722         ndata.buf = (void *)finfo;
2723
2724         ndata.q_no = iq_no;
2725
2726         if (netif_is_multiqueue(netdev)) {
2727                 if (octnet_iq_is_full(oct, ndata.q_no)) {
2728                         /* defer sending if queue is full */
2729                         netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
2730                                    ndata.q_no);
2731                         stats->tx_iq_busy++;
2732                         return NETDEV_TX_BUSY;
2733                 }
2734         } else {
2735                 if (octnet_iq_is_full(oct, lio->txq)) {
2736                         /* defer sending if queue is full */
2737                         stats->tx_iq_busy++;
2738                         netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
2739                                    lio->txq);
2740                         return NETDEV_TX_BUSY;
2741                 }
2742         }
2743         /* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu:  %d, q_no:%d\n",
2744          *      lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no);
2745          */
2746
2747         ndata.datasize = skb->len;
2748
2749         cmdsetup.u64 = 0;
2750         cmdsetup.s.iq_no = iq_no;
2751
2752         if (skb->ip_summed == CHECKSUM_PARTIAL) {
2753                 if (skb->encapsulation) {
2754                         cmdsetup.s.tnl_csum = 1;
2755                         stats->tx_vxlan++;
2756                 } else {
2757                         cmdsetup.s.transport_csum = 1;
2758                 }
2759         }
2760         if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
2761                 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2762                 cmdsetup.s.timestamp = 1;
2763         }
2764
2765         if (skb_shinfo(skb)->nr_frags == 0) {
2766                 cmdsetup.s.u.datasize = skb->len;
2767                 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
2768
2769                 /* Offload checksum calculation for TCP/UDP packets */
2770                 dptr = dma_map_single(&oct->pci_dev->dev,
2771                                       skb->data,
2772                                       skb->len,
2773                                       DMA_TO_DEVICE);
2774                 if (dma_mapping_error(&oct->pci_dev->dev, dptr)) {
2775                         dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n",
2776                                 __func__);
2777                         return NETDEV_TX_BUSY;
2778                 }
2779
2780                 if (OCTEON_CN23XX_PF(oct))
2781                         ndata.cmd.cmd3.dptr = dptr;
2782                 else
2783                         ndata.cmd.cmd2.dptr = dptr;
2784                 finfo->dptr = dptr;
2785                 ndata.reqtype = REQTYPE_NORESP_NET;
2786
2787         } else {
2788                 int i, frags;
2789                 struct skb_frag_struct *frag;
2790                 struct octnic_gather *g;
2791
2792                 spin_lock(&lio->glist_lock[q_idx]);
2793                 g = (struct octnic_gather *)
2794                         list_delete_head(&lio->glist[q_idx]);
2795                 spin_unlock(&lio->glist_lock[q_idx]);
2796
2797                 if (!g) {
2798                         netif_info(lio, tx_err, lio->netdev,
2799                                    "Transmit scatter gather: glist null!\n");
2800                         goto lio_xmit_failed;
2801                 }
2802
2803                 cmdsetup.s.gather = 1;
2804                 cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1);
2805                 octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
2806
2807                 memset(g->sg, 0, g->sg_size);
2808
2809                 g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev,
2810                                                  skb->data,
2811                                                  (skb->len - skb->data_len),
2812                                                  DMA_TO_DEVICE);
2813                 if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) {
2814                         dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n",
2815                                 __func__);
2816                         return NETDEV_TX_BUSY;
2817                 }
2818                 add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0);
2819
2820                 frags = skb_shinfo(skb)->nr_frags;
2821                 i = 1;
2822                 while (frags--) {
2823                         frag = &skb_shinfo(skb)->frags[i - 1];
2824
2825                         g->sg[(i >> 2)].ptr[(i & 3)] =
2826                                 dma_map_page(&oct->pci_dev->dev,
2827                                              frag->page.p,
2828                                              frag->page_offset,
2829                                              frag->size,
2830                                              DMA_TO_DEVICE);
2831
2832                         if (dma_mapping_error(&oct->pci_dev->dev,
2833                                               g->sg[i >> 2].ptr[i & 3])) {
2834                                 dma_unmap_single(&oct->pci_dev->dev,
2835                                                  g->sg[0].ptr[0],
2836                                                  skb->len - skb->data_len,
2837                                                  DMA_TO_DEVICE);
2838                                 for (j = 1; j < i; j++) {
2839                                         frag = &skb_shinfo(skb)->frags[j - 1];
2840                                         dma_unmap_page(&oct->pci_dev->dev,
2841                                                        g->sg[j >> 2].ptr[j & 3],
2842                                                        frag->size,
2843                                                        DMA_TO_DEVICE);
2844                                 }
2845                                 dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
2846                                         __func__);
2847                                 return NETDEV_TX_BUSY;
2848                         }
2849
2850                         add_sg_size(&g->sg[(i >> 2)], frag->size, (i & 3));
2851                         i++;
2852                 }
2853
2854                 dptr = g->sg_dma_ptr;
2855
2856                 if (OCTEON_CN23XX_PF(oct))
2857                         ndata.cmd.cmd3.dptr = dptr;
2858                 else
2859                         ndata.cmd.cmd2.dptr = dptr;
2860                 finfo->dptr = dptr;
2861                 finfo->g = g;
2862
2863                 ndata.reqtype = REQTYPE_NORESP_NET_SG;
2864         }
2865
2866         if (OCTEON_CN23XX_PF(oct)) {
2867                 irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh;
2868                 tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0];
2869         } else {
2870                 irh = (struct octeon_instr_irh *)&ndata.cmd.cmd2.irh;
2871                 tx_info = (union tx_info *)&ndata.cmd.cmd2.ossp[0];
2872         }
2873
2874         if (skb_shinfo(skb)->gso_size) {
2875                 tx_info->s.gso_size = skb_shinfo(skb)->gso_size;
2876                 tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs;
2877                 stats->tx_gso++;
2878         }
2879
2880         /* HW insert VLAN tag */
2881         if (skb_vlan_tag_present(skb)) {
2882                 irh->priority = skb_vlan_tag_get(skb) >> 13;
2883                 irh->vlan = skb_vlan_tag_get(skb) & 0xfff;
2884         }
2885
2886         xmit_more = skb->xmit_more;
2887
2888         if (unlikely(cmdsetup.s.timestamp))
2889                 status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more);
2890         else
2891                 status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more);
2892         if (status == IQ_SEND_FAILED)
2893                 goto lio_xmit_failed;
2894
2895         netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
2896
2897         if (status == IQ_SEND_STOP)
2898                 stop_q(netdev, q_idx);
2899
2900         netif_trans_update(netdev);
2901
2902         if (tx_info->s.gso_segs)
2903                 stats->tx_done += tx_info->s.gso_segs;
2904         else
2905                 stats->tx_done++;
2906         stats->tx_tot_bytes += ndata.datasize;
2907
2908         return NETDEV_TX_OK;
2909
2910 lio_xmit_failed:
2911         stats->tx_dropped++;
2912         netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
2913                    iq_no, stats->tx_dropped);
2914         if (dptr)
2915                 dma_unmap_single(&oct->pci_dev->dev, dptr,
2916                                  ndata.datasize, DMA_TO_DEVICE);
2917
2918         octeon_ring_doorbell_locked(oct, iq_no);
2919
2920         tx_buffer_free(skb);
2921         return NETDEV_TX_OK;
2922 }
2923
2924 /** \brief Network device Tx timeout
2925  * @param netdev    pointer to network device
2926  */
2927 static void liquidio_tx_timeout(struct net_device *netdev)
2928 {
2929         struct lio *lio;
2930
2931         lio = GET_LIO(netdev);
2932
2933         netif_info(lio, tx_err, lio->netdev,
2934                    "Transmit timeout tx_dropped:%ld, waking up queues now!!\n",
2935                    netdev->stats.tx_dropped);
2936         netif_trans_update(netdev);
2937         txqs_wake(netdev);
2938 }
2939
2940 static int liquidio_vlan_rx_add_vid(struct net_device *netdev,
2941                                     __be16 proto __attribute__((unused)),
2942                                     u16 vid)
2943 {
2944         struct lio *lio = GET_LIO(netdev);
2945         struct octeon_device *oct = lio->oct_dev;
2946         struct octnic_ctrl_pkt nctrl;
2947         int ret = 0;
2948
2949         memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2950
2951         nctrl.ncmd.u64 = 0;
2952         nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
2953         nctrl.ncmd.s.param1 = vid;
2954         nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2955         nctrl.wait_time = 100;
2956         nctrl.netpndev = (u64)netdev;
2957         nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2958
2959         ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2960         if (ret < 0) {
2961                 dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
2962                         ret);
2963         }
2964
2965         return ret;
2966 }
2967
2968 static int liquidio_vlan_rx_kill_vid(struct net_device *netdev,
2969                                      __be16 proto __attribute__((unused)),
2970                                      u16 vid)
2971 {
2972         struct lio *lio = GET_LIO(netdev);
2973         struct octeon_device *oct = lio->oct_dev;
2974         struct octnic_ctrl_pkt nctrl;
2975         int ret = 0;
2976
2977         memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2978
2979         nctrl.ncmd.u64 = 0;
2980         nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
2981         nctrl.ncmd.s.param1 = vid;
2982         nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2983         nctrl.wait_time = 100;
2984         nctrl.netpndev = (u64)netdev;
2985         nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2986
2987         ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2988         if (ret < 0) {
2989                 dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
2990                         ret);
2991         }
2992         return ret;
2993 }
2994
2995 /** Sending command to enable/disable RX checksum offload
2996  * @param netdev                pointer to network device
2997  * @param command               OCTNET_CMD_TNL_RX_CSUM_CTL
2998  * @param rx_cmd_bit            OCTNET_CMD_RXCSUM_ENABLE/
2999  *                              OCTNET_CMD_RXCSUM_DISABLE
3000  * @returns                     SUCCESS or FAILURE
3001  */
3002 static int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
3003                                        u8 rx_cmd)
3004 {
3005         struct lio *lio = GET_LIO(netdev);
3006         struct octeon_device *oct = lio->oct_dev;
3007         struct octnic_ctrl_pkt nctrl;
3008         int ret = 0;
3009
3010         memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
3011
3012         nctrl.ncmd.u64 = 0;
3013         nctrl.ncmd.s.cmd = command;
3014         nctrl.ncmd.s.param1 = rx_cmd;
3015         nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3016         nctrl.wait_time = 100;
3017         nctrl.netpndev = (u64)netdev;
3018         nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
3019
3020         ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
3021         if (ret < 0) {
3022                 dev_err(&oct->pci_dev->dev,
3023                         "DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n",
3024                         ret);
3025         }
3026         return ret;
3027 }
3028
3029 /** Sending command to add/delete VxLAN UDP port to firmware
3030  * @param netdev                pointer to network device
3031  * @param command               OCTNET_CMD_VXLAN_PORT_CONFIG
3032  * @param vxlan_port            VxLAN port to be added or deleted
3033  * @param vxlan_cmd_bit         OCTNET_CMD_VXLAN_PORT_ADD,
3034  *                              OCTNET_CMD_VXLAN_PORT_DEL
3035  * @returns                     SUCCESS or FAILURE
3036  */
3037 static int liquidio_vxlan_port_command(struct net_device *netdev, int command,
3038                                        u16 vxlan_port, u8 vxlan_cmd_bit)
3039 {
3040         struct lio *lio = GET_LIO(netdev);
3041         struct octeon_device *oct = lio->oct_dev;
3042         struct octnic_ctrl_pkt nctrl;
3043         int ret = 0;
3044
3045         memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
3046
3047         nctrl.ncmd.u64 = 0;
3048         nctrl.ncmd.s.cmd = command;
3049         nctrl.ncmd.s.more = vxlan_cmd_bit;
3050         nctrl.ncmd.s.param1 = vxlan_port;
3051         nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3052         nctrl.wait_time = 100;
3053         nctrl.netpndev = (u64)netdev;
3054         nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
3055
3056         ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
3057         if (ret < 0) {
3058                 dev_err(&oct->pci_dev->dev,
3059                         "VxLAN port add/delete failed in core (ret:0x%x)\n",
3060                         ret);
3061         }
3062         return ret;
3063 }
3064
3065 /** \brief Net device fix features
3066  * @param netdev  pointer to network device
3067  * @param request features requested
3068  * @returns updated features list
3069  */
3070 static netdev_features_t liquidio_fix_features(struct net_device *netdev,
3071                                                netdev_features_t request)
3072 {
3073         struct lio *lio = netdev_priv(netdev);
3074
3075         if ((request & NETIF_F_RXCSUM) &&
3076             !(lio->dev_capability & NETIF_F_RXCSUM))
3077                 request &= ~NETIF_F_RXCSUM;
3078
3079         if ((request & NETIF_F_HW_CSUM) &&
3080             !(lio->dev_capability & NETIF_F_HW_CSUM))
3081                 request &= ~NETIF_F_HW_CSUM;
3082
3083         if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO))
3084                 request &= ~NETIF_F_TSO;
3085
3086         if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6))
3087                 request &= ~NETIF_F_TSO6;
3088
3089         if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO))
3090                 request &= ~NETIF_F_LRO;
3091
3092         /*Disable LRO if RXCSUM is off */
3093         if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) &&
3094             (lio->dev_capability & NETIF_F_LRO))
3095                 request &= ~NETIF_F_LRO;
3096
3097         if ((request & NETIF_F_HW_VLAN_CTAG_FILTER) &&
3098             !(lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER))
3099                 request &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
3100
3101         return request;
3102 }
3103
3104 /** \brief Net device set features
3105  * @param netdev  pointer to network device
3106  * @param features features to enable/disable
3107  */
3108 static int liquidio_set_features(struct net_device *netdev,
3109                                  netdev_features_t features)
3110 {
3111         struct lio *lio = netdev_priv(netdev);
3112
3113         if ((features & NETIF_F_LRO) &&
3114             (lio->dev_capability & NETIF_F_LRO) &&
3115             !(netdev->features & NETIF_F_LRO))
3116                 liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
3117                                      OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
3118         else if (!(features & NETIF_F_LRO) &&
3119                  (lio->dev_capability & NETIF_F_LRO) &&
3120                  (netdev->features & NETIF_F_LRO))
3121                 liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE,
3122                                      OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
3123
3124         /* Sending command to firmware to enable/disable RX checksum
3125          * offload settings using ethtool
3126          */
3127         if (!(netdev->features & NETIF_F_RXCSUM) &&
3128             (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
3129             (features & NETIF_F_RXCSUM))
3130                 liquidio_set_rxcsum_command(netdev,
3131                                             OCTNET_CMD_TNL_RX_CSUM_CTL,
3132                                             OCTNET_CMD_RXCSUM_ENABLE);
3133         else if ((netdev->features & NETIF_F_RXCSUM) &&
3134                  (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
3135                  !(features & NETIF_F_RXCSUM))
3136                 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
3137                                             OCTNET_CMD_RXCSUM_DISABLE);
3138
3139         if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
3140             (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) &&
3141             !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
3142                 liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
3143                                      OCTNET_CMD_VLAN_FILTER_ENABLE);
3144         else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
3145                  (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) &&
3146                  (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
3147                 liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
3148                                      OCTNET_CMD_VLAN_FILTER_DISABLE);
3149
3150         return 0;
3151 }
3152
3153 static void liquidio_add_vxlan_port(struct net_device *netdev,
3154                                     struct udp_tunnel_info *ti)
3155 {
3156         if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
3157                 return;
3158
3159         liquidio_vxlan_port_command(netdev,
3160                                     OCTNET_CMD_VXLAN_PORT_CONFIG,
3161                                     htons(ti->port),
3162                                     OCTNET_CMD_VXLAN_PORT_ADD);
3163 }
3164
3165 static void liquidio_del_vxlan_port(struct net_device *netdev,
3166                                     struct udp_tunnel_info *ti)
3167 {
3168         if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
3169                 return;
3170
3171         liquidio_vxlan_port_command(netdev,
3172                                     OCTNET_CMD_VXLAN_PORT_CONFIG,
3173                                     htons(ti->port),
3174                                     OCTNET_CMD_VXLAN_PORT_DEL);
3175 }
3176
3177 static int __liquidio_set_vf_mac(struct net_device *netdev, int vfidx,
3178                                  u8 *mac, bool is_admin_assigned)
3179 {
3180         struct lio *lio = GET_LIO(netdev);
3181         struct octeon_device *oct = lio->oct_dev;
3182         struct octnic_ctrl_pkt nctrl;
3183
3184         if (!is_valid_ether_addr(mac))
3185                 return -EINVAL;
3186
3187         if (vfidx < 0 || vfidx >= oct->sriov_info.max_vfs)
3188                 return -EINVAL;
3189
3190         memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
3191
3192         nctrl.ncmd.u64 = 0;
3193         nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
3194         /* vfidx is 0 based, but vf_num (param1) is 1 based */
3195         nctrl.ncmd.s.param1 = vfidx + 1;
3196         nctrl.ncmd.s.param2 = (is_admin_assigned ? 1 : 0);
3197         nctrl.ncmd.s.more = 1;
3198         nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3199         nctrl.netpndev = (u64)netdev;
3200         nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
3201         nctrl.wait_time = LIO_CMD_WAIT_TM;
3202
3203         nctrl.udd[0] = 0;
3204         /* The MAC Address is presented in network byte order. */
3205         ether_addr_copy((u8 *)&nctrl.udd[0] + 2, mac);
3206
3207         oct->sriov_info.vf_macaddr[vfidx] = nctrl.udd[0];
3208
3209         octnet_send_nic_ctrl_pkt(oct, &nctrl);
3210
3211         return 0;
3212 }
3213
3214 static int liquidio_set_vf_mac(struct net_device *netdev, int vfidx, u8 *mac)
3215 {
3216         struct lio *lio = GET_LIO(netdev);
3217         struct octeon_device *oct = lio->oct_dev;
3218         int retval;
3219
3220         if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
3221                 return -EINVAL;
3222
3223         retval = __liquidio_set_vf_mac(netdev, vfidx, mac, true);
3224         if (!retval)
3225                 cn23xx_tell_vf_its_macaddr_changed(oct, vfidx, mac);
3226
3227         return retval;
3228 }
3229
3230 static int liquidio_set_vf_vlan(struct net_device *netdev, int vfidx,
3231                                 u16 vlan, u8 qos, __be16 vlan_proto)
3232 {
3233         struct lio *lio = GET_LIO(netdev);
3234         struct octeon_device *oct = lio->oct_dev;
3235         struct octnic_ctrl_pkt nctrl;
3236         u16 vlantci;
3237
3238         if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
3239                 return -EINVAL;
3240
3241         if (vlan_proto != htons(ETH_P_8021Q))
3242                 return -EPROTONOSUPPORT;
3243
3244         if (vlan >= VLAN_N_VID || qos > 7)
3245                 return -EINVAL;
3246
3247         if (vlan)
3248                 vlantci = vlan | (u16)qos << VLAN_PRIO_SHIFT;
3249         else
3250                 vlantci = 0;
3251
3252         if (oct->sriov_info.vf_vlantci[vfidx] == vlantci)
3253                 return 0;
3254
3255         memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
3256
3257         if (vlan)
3258                 nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
3259         else
3260                 nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
3261
3262         nctrl.ncmd.s.param1 = vlantci;
3263         nctrl.ncmd.s.param2 =
3264             vfidx + 1; /* vfidx is 0 based, but vf_num (param2) is 1 based */
3265         nctrl.ncmd.s.more = 0;
3266         nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3267         nctrl.cb_fn = 0;
3268         nctrl.wait_time = LIO_CMD_WAIT_TM;
3269
3270         octnet_send_nic_ctrl_pkt(oct, &nctrl);
3271
3272         oct->sriov_info.vf_vlantci[vfidx] = vlantci;
3273
3274         return 0;
3275 }
3276
3277 static int liquidio_get_vf_config(struct net_device *netdev, int vfidx,
3278                                   struct ifla_vf_info *ivi)
3279 {
3280         struct lio *lio = GET_LIO(netdev);
3281         struct octeon_device *oct = lio->oct_dev;
3282         u8 *macaddr;
3283
3284         if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
3285                 return -EINVAL;
3286
3287         ivi->vf = vfidx;
3288         macaddr = 2 + (u8 *)&oct->sriov_info.vf_macaddr[vfidx];
3289         ether_addr_copy(&ivi->mac[0], macaddr);
3290         ivi->vlan = oct->sriov_info.vf_vlantci[vfidx] & VLAN_VID_MASK;
3291         ivi->qos = oct->sriov_info.vf_vlantci[vfidx] >> VLAN_PRIO_SHIFT;
3292         ivi->linkstate = oct->sriov_info.vf_linkstate[vfidx];
3293         return 0;
3294 }
3295
3296 static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
3297                                       int linkstate)
3298 {
3299         struct lio *lio = GET_LIO(netdev);
3300         struct octeon_device *oct = lio->oct_dev;
3301         struct octnic_ctrl_pkt nctrl;
3302
3303         if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
3304                 return -EINVAL;
3305
3306         if (oct->sriov_info.vf_linkstate[vfidx] == linkstate)
3307                 return 0;
3308
3309         memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
3310         nctrl.ncmd.s.cmd = OCTNET_CMD_SET_VF_LINKSTATE;
3311         nctrl.ncmd.s.param1 =
3312             vfidx + 1; /* vfidx is 0 based, but vf_num (param1) is 1 based */
3313         nctrl.ncmd.s.param2 = linkstate;
3314         nctrl.ncmd.s.more = 0;
3315         nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3316         nctrl.cb_fn = 0;
3317         nctrl.wait_time = LIO_CMD_WAIT_TM;
3318
3319         octnet_send_nic_ctrl_pkt(oct, &nctrl);
3320
3321         oct->sriov_info.vf_linkstate[vfidx] = linkstate;
3322
3323         return 0;
3324 }
3325
3326 static int
3327 liquidio_eswitch_mode_get(struct devlink *devlink, u16 *mode)
3328 {
3329         struct lio_devlink_priv *priv;
3330         struct octeon_device *oct;
3331
3332         priv = devlink_priv(devlink);
3333         oct = priv->oct;
3334
3335         *mode = oct->eswitch_mode;
3336
3337         return 0;
3338 }
3339
3340 static int
3341 liquidio_eswitch_mode_set(struct devlink *devlink, u16 mode)
3342 {
3343         struct lio_devlink_priv *priv;
3344         struct octeon_device *oct;
3345         int ret = 0;
3346
3347         priv = devlink_priv(devlink);
3348         oct = priv->oct;
3349
3350         if (!(oct->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP))
3351                 return -EINVAL;
3352
3353         if (oct->eswitch_mode == mode)
3354                 return 0;
3355
3356         switch (mode) {
3357         case DEVLINK_ESWITCH_MODE_SWITCHDEV:
3358                 oct->eswitch_mode = mode;
3359                 ret = lio_vf_rep_create(oct);
3360                 break;
3361
3362         case DEVLINK_ESWITCH_MODE_LEGACY:
3363                 lio_vf_rep_destroy(oct);
3364                 oct->eswitch_mode = mode;
3365                 break;
3366
3367         default:
3368                 ret = -EINVAL;
3369         }
3370
3371         return ret;
3372 }
3373
3374 static const struct devlink_ops liquidio_devlink_ops = {
3375         .eswitch_mode_get = liquidio_eswitch_mode_get,
3376         .eswitch_mode_set = liquidio_eswitch_mode_set,
3377 };
3378
3379 static int
3380 lio_pf_switchdev_attr_get(struct net_device *dev, struct switchdev_attr *attr)
3381 {
3382         struct lio *lio = GET_LIO(dev);
3383         struct octeon_device *oct = lio->oct_dev;
3384
3385         if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
3386                 return -EOPNOTSUPP;
3387
3388         switch (attr->id) {
3389         case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
3390                 attr->u.ppid.id_len = ETH_ALEN;
3391                 ether_addr_copy(attr->u.ppid.id,
3392                                 (void *)&lio->linfo.hw_addr + 2);
3393                 break;
3394
3395         default:
3396                 return -EOPNOTSUPP;
3397         }
3398
3399         return 0;
3400 }
3401
3402 static const struct switchdev_ops lio_pf_switchdev_ops = {
3403         .switchdev_port_attr_get = lio_pf_switchdev_attr_get,
3404 };
3405
3406 static const struct net_device_ops lionetdevops = {
3407         .ndo_open               = liquidio_open,
3408         .ndo_stop               = liquidio_stop,
3409         .ndo_start_xmit         = liquidio_xmit,
3410         .ndo_get_stats          = liquidio_get_stats,
3411         .ndo_set_mac_address    = liquidio_set_mac,
3412         .ndo_set_rx_mode        = liquidio_set_mcast_list,
3413         .ndo_tx_timeout         = liquidio_tx_timeout,
3414
3415         .ndo_vlan_rx_add_vid    = liquidio_vlan_rx_add_vid,
3416         .ndo_vlan_rx_kill_vid   = liquidio_vlan_rx_kill_vid,
3417         .ndo_change_mtu         = liquidio_change_mtu,
3418         .ndo_do_ioctl           = liquidio_ioctl,
3419         .ndo_fix_features       = liquidio_fix_features,
3420         .ndo_set_features       = liquidio_set_features,
3421         .ndo_udp_tunnel_add     = liquidio_add_vxlan_port,
3422         .ndo_udp_tunnel_del     = liquidio_del_vxlan_port,
3423         .ndo_set_vf_mac         = liquidio_set_vf_mac,
3424         .ndo_set_vf_vlan        = liquidio_set_vf_vlan,
3425         .ndo_get_vf_config      = liquidio_get_vf_config,
3426         .ndo_set_vf_link_state  = liquidio_set_vf_link_state,
3427 };
3428
3429 /** \brief Entry point for the liquidio module
3430  */
3431 static int __init liquidio_init(void)
3432 {
3433         int i;
3434         struct handshake *hs;
3435
3436         init_completion(&first_stage);
3437
3438         octeon_init_device_list(OCTEON_CONFIG_TYPE_DEFAULT);
3439
3440         if (liquidio_init_pci())
3441                 return -EINVAL;
3442
3443         wait_for_completion_timeout(&first_stage, msecs_to_jiffies(1000));
3444
3445         for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
3446                 hs = &handshake[i];
3447                 if (hs->pci_dev) {
3448                         wait_for_completion(&hs->init);
3449                         if (!hs->init_ok) {
3450                                 /* init handshake failed */
3451                                 dev_err(&hs->pci_dev->dev,
3452                                         "Failed to init device\n");
3453                                 liquidio_deinit_pci();
3454                                 return -EIO;
3455                         }
3456                 }
3457         }
3458
3459         for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
3460                 hs = &handshake[i];
3461                 if (hs->pci_dev) {
3462                         wait_for_completion_timeout(&hs->started,
3463                                                     msecs_to_jiffies(30000));
3464                         if (!hs->started_ok) {
3465                                 /* starter handshake failed */
3466                                 dev_err(&hs->pci_dev->dev,
3467                                         "Firmware failed to start\n");
3468                                 liquidio_deinit_pci();
3469                                 return -EIO;
3470                         }
3471                 }
3472         }
3473
3474         return 0;
3475 }
3476
3477 static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
3478 {
3479         struct octeon_device *oct = (struct octeon_device *)buf;
3480         struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
3481         int gmxport = 0;
3482         union oct_link_status *ls;
3483         int i;
3484
3485         if (recv_pkt->buffer_size[0] != (sizeof(*ls) + OCT_DROQ_INFO_SIZE)) {
3486                 dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
3487                         recv_pkt->buffer_size[0],
3488                         recv_pkt->rh.r_nic_info.gmxport);
3489                 goto nic_info_err;
3490         }
3491
3492         gmxport = recv_pkt->rh.r_nic_info.gmxport;
3493         ls = (union oct_link_status *)(get_rbd(recv_pkt->buffer_ptr[0]) +
3494                 OCT_DROQ_INFO_SIZE);
3495
3496         octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3);
3497         for (i = 0; i < oct->ifcount; i++) {
3498                 if (oct->props[i].gmxport == gmxport) {
3499                         update_link_status(oct->props[i].netdev, ls);
3500                         break;
3501                 }
3502         }
3503
3504 nic_info_err:
3505         for (i = 0; i < recv_pkt->buffer_count; i++)
3506                 recv_buffer_free(recv_pkt->buffer_ptr[i]);
3507         octeon_free_recv_info(recv_info);
3508         return 0;
3509 }
3510
3511 /**
3512  * \brief Setup network interfaces
3513  * @param octeon_dev  octeon device
3514  *
3515  * Called during init time for each device. It assumes the NIC
3516  * is already up and running.  The link information for each
3517  * interface is passed in link_info.
3518  */
3519 static int setup_nic_devices(struct octeon_device *octeon_dev)
3520 {
3521         struct lio *lio = NULL;
3522         struct net_device *netdev;
3523         u8 mac[6], i, j, *fw_ver;
3524         struct octeon_soft_command *sc;
3525         struct liquidio_if_cfg_context *ctx;
3526         struct liquidio_if_cfg_resp *resp;
3527         struct octdev_props *props;
3528         int retval, num_iqueues, num_oqueues;
3529         union oct_nic_if_cfg if_cfg;
3530         unsigned int base_queue;
3531         unsigned int gmx_port_id;
3532         u32 resp_size, ctx_size, data_size;
3533         u32 ifidx_or_pfnum;
3534         struct lio_version *vdata;
3535         struct devlink *devlink;
3536         struct lio_devlink_priv *lio_devlink;
3537
3538         /* This is to handle link status changes */
3539         octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
3540                                     OPCODE_NIC_INFO,
3541                                     lio_nic_info, octeon_dev);
3542
3543         /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions.
3544          * They are handled directly.
3545          */
3546         octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET,
3547                                         free_netbuf);
3548
3549         octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG,
3550                                         free_netsgbuf);
3551
3552         octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG,
3553                                         free_netsgbuf_with_resp);
3554
3555         for (i = 0; i < octeon_dev->ifcount; i++) {
3556                 resp_size = sizeof(struct liquidio_if_cfg_resp);
3557                 ctx_size = sizeof(struct liquidio_if_cfg_context);
3558                 data_size = sizeof(struct lio_version);
3559                 sc = (struct octeon_soft_command *)
3560                         octeon_alloc_soft_command(octeon_dev, data_size,
3561                                                   resp_size, ctx_size);
3562                 resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
3563                 ctx  = (struct liquidio_if_cfg_context *)sc->ctxptr;
3564                 vdata = (struct lio_version *)sc->virtdptr;
3565
3566                 *((u64 *)vdata) = 0;
3567                 vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION);
3568                 vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION);
3569                 vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION);
3570
3571                 if (OCTEON_CN23XX_PF(octeon_dev)) {
3572                         num_iqueues = octeon_dev->sriov_info.num_pf_rings;
3573                         num_oqueues = octeon_dev->sriov_info.num_pf_rings;
3574                         base_queue = octeon_dev->sriov_info.pf_srn;
3575
3576                         gmx_port_id = octeon_dev->pf_num;
3577                         ifidx_or_pfnum = octeon_dev->pf_num;
3578                 } else {
3579                         num_iqueues = CFG_GET_NUM_TXQS_NIC_IF(
3580                                                 octeon_get_conf(octeon_dev), i);
3581                         num_oqueues = CFG_GET_NUM_RXQS_NIC_IF(
3582                                                 octeon_get_conf(octeon_dev), i);
3583                         base_queue = CFG_GET_BASE_QUE_NIC_IF(
3584                                                 octeon_get_conf(octeon_dev), i);
3585                         gmx_port_id = CFG_GET_GMXID_NIC_IF(
3586                                                 octeon_get_conf(octeon_dev), i);
3587                         ifidx_or_pfnum = i;
3588                 }
3589
3590                 dev_dbg(&octeon_dev->pci_dev->dev,
3591                         "requesting config for interface %d, iqs %d, oqs %d\n",
3592                         ifidx_or_pfnum, num_iqueues, num_oqueues);
3593                 WRITE_ONCE(ctx->cond, 0);
3594                 ctx->octeon_id = lio_get_device_id(octeon_dev);
3595                 init_waitqueue_head(&ctx->wc);
3596
3597                 if_cfg.u64 = 0;
3598                 if_cfg.s.num_iqueues = num_iqueues;
3599                 if_cfg.s.num_oqueues = num_oqueues;
3600                 if_cfg.s.base_queue = base_queue;
3601                 if_cfg.s.gmx_port_id = gmx_port_id;
3602
3603                 sc->iq_no = 0;
3604
3605                 octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC,
3606                                             OPCODE_NIC_IF_CFG, 0,
3607                                             if_cfg.u64, 0);
3608
3609                 sc->callback = if_cfg_callback;
3610                 sc->callback_arg = sc;
3611                 sc->wait_time = 3000;
3612
3613                 retval = octeon_send_soft_command(octeon_dev, sc);
3614                 if (retval == IQ_SEND_FAILED) {
3615                         dev_err(&octeon_dev->pci_dev->dev,
3616                                 "iq/oq config failed status: %x\n",
3617                                 retval);
3618                         /* Soft instr is freed by driver in case of failure. */
3619                         goto setup_nic_dev_fail;
3620                 }
3621
3622                 /* Sleep on a wait queue till the cond flag indicates that the
3623                  * response arrived or timed-out.
3624                  */
3625                 if (sleep_cond(&ctx->wc, &ctx->cond) == -EINTR) {
3626                         dev_err(&octeon_dev->pci_dev->dev, "Wait interrupted\n");
3627                         goto setup_nic_wait_intr;
3628                 }
3629
3630                 retval = resp->status;
3631                 if (retval) {
3632                         dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n");
3633                         goto setup_nic_dev_fail;
3634                 }
3635
3636                 /* Verify f/w version (in case of 'auto' loading from flash) */
3637                 fw_ver = octeon_dev->fw_info.liquidio_firmware_version;
3638                 if (memcmp(LIQUIDIO_BASE_VERSION,
3639                            fw_ver,
3640                            strlen(LIQUIDIO_BASE_VERSION))) {
3641                         dev_err(&octeon_dev->pci_dev->dev,
3642                                 "Unmatched firmware version. Expected %s.x, got %s.\n",
3643                                 LIQUIDIO_BASE_VERSION, fw_ver);
3644                         goto setup_nic_dev_fail;
3645                 } else if (atomic_read(octeon_dev->adapter_fw_state) ==
3646                            FW_IS_PRELOADED) {
3647                         dev_info(&octeon_dev->pci_dev->dev,
3648                                  "Using auto-loaded firmware version %s.\n",
3649                                  fw_ver);
3650                 }
3651
3652                 octeon_swap_8B_data((u64 *)(&resp->cfg_info),
3653                                     (sizeof(struct liquidio_if_cfg_info)) >> 3);
3654
3655                 num_iqueues = hweight64(resp->cfg_info.iqmask);
3656                 num_oqueues = hweight64(resp->cfg_info.oqmask);
3657
3658                 if (!(num_iqueues) || !(num_oqueues)) {
3659                         dev_err(&octeon_dev->pci_dev->dev,
3660                                 "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n",
3661                                 resp->cfg_info.iqmask,
3662                                 resp->cfg_info.oqmask);
3663                         goto setup_nic_dev_fail;
3664                 }
3665                 dev_dbg(&octeon_dev->pci_dev->dev,
3666                         "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d\n",
3667                         i, resp->cfg_info.iqmask, resp->cfg_info.oqmask,
3668                         num_iqueues, num_oqueues);
3669                 netdev = alloc_etherdev_mq(LIO_SIZE, num_iqueues);
3670
3671                 if (!netdev) {
3672                         dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n");
3673                         goto setup_nic_dev_fail;
3674                 }
3675
3676                 SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev);
3677
3678                 /* Associate the routines that will handle different
3679                  * netdev tasks.
3680                  */
3681                 netdev->netdev_ops = &lionetdevops;
3682                 SWITCHDEV_SET_OPS(netdev, &lio_pf_switchdev_ops);
3683
3684                 lio = GET_LIO(netdev);
3685
3686                 memset(lio, 0, sizeof(struct lio));
3687
3688                 lio->ifidx = ifidx_or_pfnum;
3689
3690                 props = &octeon_dev->props[i];
3691                 props->gmxport = resp->cfg_info.linfo.gmxport;
3692                 props->netdev = netdev;
3693
3694                 lio->linfo.num_rxpciq = num_oqueues;
3695                 lio->linfo.num_txpciq = num_iqueues;
3696                 for (j = 0; j < num_oqueues; j++) {
3697                         lio->linfo.rxpciq[j].u64 =
3698                                 resp->cfg_info.linfo.rxpciq[j].u64;
3699                 }
3700                 for (j = 0; j < num_iqueues; j++) {
3701                         lio->linfo.txpciq[j].u64 =
3702                                 resp->cfg_info.linfo.txpciq[j].u64;
3703                 }
3704                 lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
3705                 lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
3706                 lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64;
3707
3708                 lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3709
3710                 if (OCTEON_CN23XX_PF(octeon_dev) ||
3711                     OCTEON_CN6XXX(octeon_dev)) {
3712                         lio->dev_capability = NETIF_F_HIGHDMA
3713                                               | NETIF_F_IP_CSUM
3714                                               | NETIF_F_IPV6_CSUM
3715                                               | NETIF_F_SG | NETIF_F_RXCSUM
3716                                               | NETIF_F_GRO
3717                                               | NETIF_F_TSO | NETIF_F_TSO6
3718                                               | NETIF_F_LRO;
3719                 }
3720                 netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
3721
3722                 /*  Copy of transmit encapsulation capabilities:
3723                  *  TSO, TSO6, Checksums for this device
3724                  */
3725                 lio->enc_dev_capability = NETIF_F_IP_CSUM
3726                                           | NETIF_F_IPV6_CSUM
3727                                           | NETIF_F_GSO_UDP_TUNNEL
3728                                           | NETIF_F_HW_CSUM | NETIF_F_SG
3729                                           | NETIF_F_RXCSUM
3730                                           | NETIF_F_TSO | NETIF_F_TSO6
3731                                           | NETIF_F_LRO;
3732
3733                 netdev->hw_enc_features = (lio->enc_dev_capability &
3734                                            ~NETIF_F_LRO);
3735
3736                 lio->dev_capability |= NETIF_F_GSO_UDP_TUNNEL;
3737
3738                 netdev->vlan_features = lio->dev_capability;
3739                 /* Add any unchangeable hw features */
3740                 lio->dev_capability |=  NETIF_F_HW_VLAN_CTAG_FILTER |
3741                                         NETIF_F_HW_VLAN_CTAG_RX |
3742                                         NETIF_F_HW_VLAN_CTAG_TX;
3743
3744                 netdev->features = (lio->dev_capability & ~NETIF_F_LRO);
3745
3746                 netdev->hw_features = lio->dev_capability;
3747                 /*HW_VLAN_RX and HW_VLAN_FILTER is always on*/
3748                 netdev->hw_features = netdev->hw_features &
3749                         ~NETIF_F_HW_VLAN_CTAG_RX;
3750
3751                 /* MTU range: 68 - 16000 */
3752                 netdev->min_mtu = LIO_MIN_MTU_SIZE;
3753                 netdev->max_mtu = LIO_MAX_MTU_SIZE;
3754
3755                 /* Point to the  properties for octeon device to which this
3756                  * interface belongs.
3757                  */
3758                 lio->oct_dev = octeon_dev;
3759                 lio->octprops = props;
3760                 lio->netdev = netdev;
3761
3762                 dev_dbg(&octeon_dev->pci_dev->dev,
3763                         "if%d gmx: %d hw_addr: 0x%llx\n", i,
3764                         lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr));
3765
3766                 for (j = 0; j < octeon_dev->sriov_info.max_vfs; j++) {
3767                         u8 vfmac[ETH_ALEN];
3768
3769                         random_ether_addr(&vfmac[0]);
3770                         if (__liquidio_set_vf_mac(netdev, j,
3771                                                   &vfmac[0], false)) {
3772                                 dev_err(&octeon_dev->pci_dev->dev,
3773                                         "Error setting VF%d MAC address\n",
3774                                         j);
3775                                 goto setup_nic_dev_fail;
3776                         }
3777                 }
3778
3779                 /* 64-bit swap required on LE machines */
3780                 octeon_swap_8B_data(&lio->linfo.hw_addr, 1);
3781                 for (j = 0; j < 6; j++)
3782                         mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j));
3783
3784                 /* Copy MAC Address to OS network device structure */
3785
3786                 ether_addr_copy(netdev->dev_addr, mac);
3787
3788                 /* By default all interfaces on a single Octeon uses the same
3789                  * tx and rx queues
3790                  */
3791                 lio->txq = lio->linfo.txpciq[0].s.q_no;
3792                 lio->rxq = lio->linfo.rxpciq[0].s.q_no;
3793                 if (liquidio_setup_io_queues(octeon_dev, i,
3794                                              lio->linfo.num_txpciq,
3795                                              lio->linfo.num_rxpciq)) {
3796                         dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n");
3797                         goto setup_nic_dev_fail;
3798                 }
3799
3800                 ifstate_set(lio, LIO_IFSTATE_DROQ_OPS);
3801
3802                 lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq);
3803                 lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq);
3804
3805                 if (setup_glists(octeon_dev, lio, num_iqueues)) {
3806                         dev_err(&octeon_dev->pci_dev->dev,
3807                                 "Gather list allocation failed\n");
3808                         goto setup_nic_dev_fail;
3809                 }
3810
3811                 /* Register ethtool support */
3812                 liquidio_set_ethtool_ops(netdev);
3813                 if (lio->oct_dev->chip_id == OCTEON_CN23XX_PF_VID)
3814                         octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT;
3815                 else
3816                         octeon_dev->priv_flags = 0x0;
3817
3818                 if (netdev->features & NETIF_F_LRO)
3819                         liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
3820                                              OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
3821
3822                 liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
3823                                      OCTNET_CMD_VLAN_FILTER_ENABLE);
3824
3825                 if ((debug != -1) && (debug & NETIF_MSG_HW))
3826                         liquidio_set_feature(netdev,
3827                                              OCTNET_CMD_VERBOSE_ENABLE, 0);
3828
3829                 if (setup_link_status_change_wq(netdev))
3830                         goto setup_nic_dev_fail;
3831
3832                 if ((octeon_dev->fw_info.app_cap_flags &
3833                      LIQUIDIO_TIME_SYNC_CAP) &&
3834                     setup_sync_octeon_time_wq(netdev))
3835                         goto setup_nic_dev_fail;
3836
3837                 if (setup_rx_oom_poll_fn(netdev))
3838                         goto setup_nic_dev_fail;
3839
3840                 /* Register the network device with the OS */
3841                 if (register_netdev(netdev)) {
3842                         dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n");
3843                         goto setup_nic_dev_fail;
3844                 }
3845
3846                 dev_dbg(&octeon_dev->pci_dev->dev,
3847                         "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
3848                         i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
3849                 netif_carrier_off(netdev);
3850                 lio->link_changes++;
3851
3852                 ifstate_set(lio, LIO_IFSTATE_REGISTERED);
3853
3854                 /* Sending command to firmware to enable Rx checksum offload
3855                  * by default at the time of setup of Liquidio driver for
3856                  * this device
3857                  */
3858                 liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
3859                                             OCTNET_CMD_RXCSUM_ENABLE);
3860                 liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL,
3861                                      OCTNET_CMD_TXCSUM_ENABLE);
3862
3863                 dev_dbg(&octeon_dev->pci_dev->dev,
3864                         "NIC ifidx:%d Setup successful\n", i);
3865
3866                 octeon_free_soft_command(octeon_dev, sc);
3867         }
3868
3869         devlink = devlink_alloc(&liquidio_devlink_ops,
3870                                 sizeof(struct lio_devlink_priv));
3871         if (!devlink) {
3872                 dev_err(&octeon_dev->pci_dev->dev, "devlink alloc failed\n");
3873                 goto setup_nic_wait_intr;
3874         }
3875
3876         lio_devlink = devlink_priv(devlink);
3877         lio_devlink->oct = octeon_dev;
3878
3879         if (devlink_register(devlink, &octeon_dev->pci_dev->dev)) {
3880                 devlink_free(devlink);
3881                 dev_err(&octeon_dev->pci_dev->dev,
3882                         "devlink registration failed\n");
3883                 goto setup_nic_wait_intr;
3884         }
3885
3886         octeon_dev->devlink = devlink;
3887         octeon_dev->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY;
3888
3889         return 0;
3890
3891 setup_nic_dev_fail:
3892
3893         octeon_free_soft_command(octeon_dev, sc);
3894
3895 setup_nic_wait_intr:
3896
3897         while (i--) {
3898                 dev_err(&octeon_dev->pci_dev->dev,
3899                         "NIC ifidx:%d Setup failed\n", i);
3900                 liquidio_destroy_nic_device(octeon_dev, i);
3901         }
3902         return -ENODEV;
3903 }
3904
3905 #ifdef CONFIG_PCI_IOV
3906 static int octeon_enable_sriov(struct octeon_device *oct)
3907 {
3908         unsigned int num_vfs_alloced = oct->sriov_info.num_vfs_alloced;
3909         struct pci_dev *vfdev;
3910         int err;
3911         u32 u;
3912
3913         if (OCTEON_CN23XX_PF(oct) && num_vfs_alloced) {
3914                 err = pci_enable_sriov(oct->pci_dev,
3915                                        oct->sriov_info.num_vfs_alloced);
3916                 if (err) {
3917                         dev_err(&oct->pci_dev->dev,
3918                                 "OCTEON: Failed to enable PCI sriov: %d\n",
3919                                 err);
3920                         oct->sriov_info.num_vfs_alloced = 0;
3921                         return err;
3922                 }
3923                 oct->sriov_info.sriov_enabled = 1;
3924
3925                 /* init lookup table that maps DPI ring number to VF pci_dev
3926                  * struct pointer
3927                  */
3928                 u = 0;
3929                 vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
3930                                        OCTEON_CN23XX_VF_VID, NULL);
3931                 while (vfdev) {
3932                         if (vfdev->is_virtfn &&
3933                             (vfdev->physfn == oct->pci_dev)) {
3934                                 oct->sriov_info.dpiring_to_vfpcidev_lut[u] =
3935                                         vfdev;
3936                                 u += oct->sriov_info.rings_per_vf;
3937                         }
3938                         vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
3939                                                OCTEON_CN23XX_VF_VID, vfdev);
3940                 }
3941         }
3942
3943         return num_vfs_alloced;
3944 }
3945
3946 static int lio_pci_sriov_disable(struct octeon_device *oct)
3947 {
3948         int u;
3949
3950         if (pci_vfs_assigned(oct->pci_dev)) {
3951                 dev_err(&oct->pci_dev->dev, "VFs are still assigned to VMs.\n");
3952                 return -EPERM;
3953         }
3954
3955         pci_disable_sriov(oct->pci_dev);
3956
3957         u = 0;
3958         while (u < MAX_POSSIBLE_VFS) {
3959                 oct->sriov_info.dpiring_to_vfpcidev_lut[u] = NULL;
3960                 u += oct->sriov_info.rings_per_vf;
3961         }
3962
3963         oct->sriov_info.num_vfs_alloced = 0;
3964         dev_info(&oct->pci_dev->dev, "oct->pf_num:%d disabled VFs\n",
3965                  oct->pf_num);
3966
3967         return 0;
3968 }
3969
3970 static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs)
3971 {
3972         struct octeon_device *oct = pci_get_drvdata(dev);
3973         int ret = 0;
3974
3975         if ((num_vfs == oct->sriov_info.num_vfs_alloced) &&
3976             (oct->sriov_info.sriov_enabled)) {
3977                 dev_info(&oct->pci_dev->dev, "oct->pf_num:%d already enabled num_vfs:%d\n",
3978                          oct->pf_num, num_vfs);
3979                 return 0;
3980         }
3981
3982         if (!num_vfs) {
3983                 lio_vf_rep_destroy(oct);
3984                 ret = lio_pci_sriov_disable(oct);
3985         } else if (num_vfs > oct->sriov_info.max_vfs) {
3986                 dev_err(&oct->pci_dev->dev,
3987                         "OCTEON: Max allowed VFs:%d user requested:%d",
3988                         oct->sriov_info.max_vfs, num_vfs);
3989                 ret = -EPERM;
3990         } else {
3991                 oct->sriov_info.num_vfs_alloced = num_vfs;
3992                 ret = octeon_enable_sriov(oct);
3993                 dev_info(&oct->pci_dev->dev, "oct->pf_num:%d num_vfs:%d\n",
3994                          oct->pf_num, num_vfs);
3995                 ret = lio_vf_rep_create(oct);
3996                 if (ret)
3997                         dev_info(&oct->pci_dev->dev,
3998                                  "vf representor create failed");
3999         }
4000
4001         return ret;
4002 }
4003 #endif
4004
4005 /**
4006  * \brief initialize the NIC
4007  * @param oct octeon device
4008  *
4009  * This initialization routine is called once the Octeon device application is
4010  * up and running
4011  */
4012 static int liquidio_init_nic_module(struct octeon_device *oct)
4013 {
4014         int i, retval = 0;
4015         int num_nic_ports = CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct));
4016
4017         dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n");
4018
4019         /* only default iq and oq were initialized
4020          * initialize the rest as well
4021          */
4022         /* run port_config command for each port */
4023         oct->ifcount = num_nic_ports;
4024
4025         memset(oct->props, 0, sizeof(struct octdev_props) * num_nic_ports);
4026
4027         for (i = 0; i < MAX_OCTEON_LINKS; i++)
4028                 oct->props[i].gmxport = -1;
4029
4030         retval = setup_nic_devices(oct);
4031         if (retval) {
4032                 dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n");
4033                 goto octnet_init_failure;
4034         }
4035
4036         /* Call vf_rep_modinit if the firmware is switchdev capable
4037          * and do it from the first liquidio function probed.
4038          */
4039         if (!oct->octeon_id &&
4040             oct->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP) {
4041                 retval = lio_vf_rep_modinit();
4042                 if (retval) {
4043                         liquidio_stop_nic_module(oct);
4044                         goto octnet_init_failure;
4045                 }
4046         }
4047
4048         liquidio_ptp_init(oct);
4049
4050         dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n");
4051
4052         return retval;
4053
4054 octnet_init_failure:
4055
4056         oct->ifcount = 0;
4057
4058         return retval;
4059 }
4060
4061 /**
4062  * \brief starter callback that invokes the remaining initialization work after
4063  * the NIC is up and running.
4064  * @param octptr  work struct work_struct
4065  */
4066 static void nic_starter(struct work_struct *work)
4067 {
4068         struct octeon_device *oct;
4069         struct cavium_wk *wk = (struct cavium_wk *)work;
4070
4071         oct = (struct octeon_device *)wk->ctxptr;
4072
4073         if (atomic_read(&oct->status) == OCT_DEV_RUNNING)
4074                 return;
4075
4076         /* If the status of the device is CORE_OK, the core
4077          * application has reported its application type. Call
4078          * any registered handlers now and move to the RUNNING
4079          * state.
4080          */
4081         if (atomic_read(&oct->status) != OCT_DEV_CORE_OK) {
4082                 schedule_delayed_work(&oct->nic_poll_work.work,
4083                                       LIQUIDIO_STARTER_POLL_INTERVAL_MS);
4084                 return;
4085         }
4086
4087         atomic_set(&oct->status, OCT_DEV_RUNNING);
4088
4089         if (oct->app_mode && oct->app_mode == CVM_DRV_NIC_APP) {
4090                 dev_dbg(&oct->pci_dev->dev, "Starting NIC module\n");
4091
4092                 if (liquidio_init_nic_module(oct))
4093                         dev_err(&oct->pci_dev->dev, "NIC initialization failed\n");
4094                 else
4095                         handshake[oct->octeon_id].started_ok = 1;
4096         } else {
4097                 dev_err(&oct->pci_dev->dev,
4098                         "Unexpected application running on NIC (%d). Check firmware.\n",
4099                         oct->app_mode);
4100         }
4101
4102         complete(&handshake[oct->octeon_id].started);
4103 }
4104
4105 static int
4106 octeon_recv_vf_drv_notice(struct octeon_recv_info *recv_info, void *buf)
4107 {
4108         struct octeon_device *oct = (struct octeon_device *)buf;
4109         struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
4110         int i, notice, vf_idx;
4111         bool cores_crashed;
4112         u64 *data, vf_num;
4113
4114         notice = recv_pkt->rh.r.ossp;
4115         data = (u64 *)(get_rbd(recv_pkt->buffer_ptr[0]) + OCT_DROQ_INFO_SIZE);
4116
4117         /* the first 64-bit word of data is the vf_num */
4118         vf_num = data[0];
4119         octeon_swap_8B_data(&vf_num, 1);
4120         vf_idx = (int)vf_num - 1;
4121
4122         cores_crashed = READ_ONCE(oct->cores_crashed);
4123
4124         if (notice == VF_DRV_LOADED) {
4125                 if (!(oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx))) {
4126                         oct->sriov_info.vf_drv_loaded_mask |= BIT_ULL(vf_idx);
4127                         dev_info(&oct->pci_dev->dev,
4128                                  "driver for VF%d was loaded\n", vf_idx);
4129                         if (!cores_crashed)
4130                                 try_module_get(THIS_MODULE);
4131                 }
4132         } else if (notice == VF_DRV_REMOVED) {
4133                 if (oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx)) {
4134                         oct->sriov_info.vf_drv_loaded_mask &= ~BIT_ULL(vf_idx);
4135                         dev_info(&oct->pci_dev->dev,
4136                                  "driver for VF%d was removed\n", vf_idx);
4137                         if (!cores_crashed)
4138                                 module_put(THIS_MODULE);
4139                 }
4140         } else if (notice == VF_DRV_MACADDR_CHANGED) {
4141                 u8 *b = (u8 *)&data[1];
4142
4143                 oct->sriov_info.vf_macaddr[vf_idx] = data[1];
4144                 dev_info(&oct->pci_dev->dev,
4145                          "VF driver changed VF%d's MAC address to %pM\n",
4146                          vf_idx, b + 2);
4147         }
4148
4149         for (i = 0; i < recv_pkt->buffer_count; i++)
4150                 recv_buffer_free(recv_pkt->buffer_ptr[i]);
4151         octeon_free_recv_info(recv_info);
4152
4153         return 0;
4154 }
4155
4156 /**
4157  * \brief Device initialization for each Octeon device that is probed
4158  * @param octeon_dev  octeon device
4159  */
4160 static int octeon_device_init(struct octeon_device *octeon_dev)
4161 {
4162         int j, ret;
4163         char bootcmd[] = "\n";
4164         char *dbg_enb = NULL;
4165         enum lio_fw_state fw_state;
4166         struct octeon_device_priv *oct_priv =
4167                 (struct octeon_device_priv *)octeon_dev->priv;
4168         atomic_set(&octeon_dev->status, OCT_DEV_BEGIN_STATE);
4169
4170         /* Enable access to the octeon device and make its DMA capability
4171          * known to the OS.
4172          */
4173         if (octeon_pci_os_setup(octeon_dev))
4174                 return 1;
4175
4176         atomic_set(&octeon_dev->status, OCT_DEV_PCI_ENABLE_DONE);
4177
4178         /* Identify the Octeon type and map the BAR address space. */
4179         if (octeon_chip_specific_setup(octeon_dev)) {
4180                 dev_err(&octeon_dev->pci_dev->dev, "Chip specific setup failed\n");
4181                 return 1;
4182         }
4183
4184         atomic_set(&octeon_dev->status, OCT_DEV_PCI_MAP_DONE);
4185
4186         /* Only add a reference after setting status 'OCT_DEV_PCI_MAP_DONE',
4187          * since that is what is required for the reference to be removed
4188          * during de-initialization (see 'octeon_destroy_resources').
4189          */
4190         octeon_register_device(octeon_dev, octeon_dev->pci_dev->bus->number,
4191                                PCI_SLOT(octeon_dev->pci_dev->devfn),
4192                                PCI_FUNC(octeon_dev->pci_dev->devfn),
4193                                true);
4194
4195         octeon_dev->app_mode = CVM_DRV_INVALID_APP;
4196
4197         /* CN23XX supports preloaded firmware if the following is true:
4198          *
4199          * The adapter indicates that firmware is currently running AND
4200          * 'fw_type' is 'auto'.
4201          *
4202          * (default state is NEEDS_TO_BE_LOADED, override it if appropriate).
4203          */
4204         if (OCTEON_CN23XX_PF(octeon_dev) &&
4205             cn23xx_fw_loaded(octeon_dev) && fw_type_is_auto()) {
4206                 atomic_cmpxchg(octeon_dev->adapter_fw_state,
4207                                FW_NEEDS_TO_BE_LOADED, FW_IS_PRELOADED);
4208         }
4209
4210         /* If loading firmware, only first device of adapter needs to do so. */
4211         fw_state = atomic_cmpxchg(octeon_dev->adapter_fw_state,
4212                                   FW_NEEDS_TO_BE_LOADED,
4213                                   FW_IS_BEING_LOADED);
4214
4215         /* Here, [local variable] 'fw_state' is set to one of:
4216          *
4217          *   FW_IS_PRELOADED:       No firmware is to be loaded (see above)
4218          *   FW_NEEDS_TO_BE_LOADED: The driver's first instance will load
4219          *                          firmware to the adapter.
4220          *   FW_IS_BEING_LOADED:    The driver's second instance will not load
4221          *                          firmware to the adapter.
4222          */
4223
4224         /* Prior to f/w load, perform a soft reset of the Octeon device;
4225          * if error resetting, return w/error.
4226          */
4227         if (fw_state == FW_NEEDS_TO_BE_LOADED)
4228                 if (octeon_dev->fn_list.soft_reset(octeon_dev))
4229                         return 1;
4230
4231         /* Initialize the dispatch mechanism used to push packets arriving on
4232          * Octeon Output queues.
4233          */
4234         if (octeon_init_dispatch_list(octeon_dev))
4235                 return 1;
4236
4237         octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
4238                                     OPCODE_NIC_CORE_DRV_ACTIVE,
4239                                     octeon_core_drv_init,
4240                                     octeon_dev);
4241
4242         octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
4243                                     OPCODE_NIC_VF_DRV_NOTICE,
4244                                     octeon_recv_vf_drv_notice, octeon_dev);
4245         INIT_DELAYED_WORK(&octeon_dev->nic_poll_work.work, nic_starter);
4246         octeon_dev->nic_poll_work.ctxptr = (void *)octeon_dev;
4247         schedule_delayed_work(&octeon_dev->nic_poll_work.work,
4248                               LIQUIDIO_STARTER_POLL_INTERVAL_MS);
4249
4250         atomic_set(&octeon_dev->status, OCT_DEV_DISPATCH_INIT_DONE);
4251
4252         if (octeon_set_io_queues_off(octeon_dev)) {
4253                 dev_err(&octeon_dev->pci_dev->dev, "setting io queues off failed\n");
4254                 return 1;
4255         }
4256
4257         if (OCTEON_CN23XX_PF(octeon_dev)) {
4258                 ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
4259                 if (ret) {
4260                         dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Failed to configure device registers\n");
4261                         return ret;
4262                 }
4263         }
4264
4265         /* Initialize soft command buffer pool
4266          */
4267         if (octeon_setup_sc_buffer_pool(octeon_dev)) {
4268                 dev_err(&octeon_dev->pci_dev->dev, "sc buffer pool allocation failed\n");
4269                 return 1;
4270         }
4271         atomic_set(&octeon_dev->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE);
4272
4273         /*  Setup the data structures that manage this Octeon's Input queues. */
4274         if (octeon_setup_instr_queues(octeon_dev)) {
4275                 dev_err(&octeon_dev->pci_dev->dev,
4276                         "instruction queue initialization failed\n");
4277                 return 1;
4278         }
4279         atomic_set(&octeon_dev->status, OCT_DEV_INSTR_QUEUE_INIT_DONE);
4280
4281         /* Initialize lists to manage the requests of different types that
4282          * arrive from user & kernel applications for this octeon device.
4283          */
4284         if (octeon_setup_response_list(octeon_dev)) {
4285                 dev_err(&octeon_dev->pci_dev->dev, "Response list allocation failed\n");
4286                 return 1;
4287         }
4288         atomic_set(&octeon_dev->status, OCT_DEV_RESP_LIST_INIT_DONE);
4289
4290         if (octeon_setup_output_queues(octeon_dev)) {
4291                 dev_err(&octeon_dev->pci_dev->dev, "Output queue initialization failed\n");
4292                 return 1;
4293         }
4294
4295         atomic_set(&octeon_dev->status, OCT_DEV_DROQ_INIT_DONE);
4296
4297         if (OCTEON_CN23XX_PF(octeon_dev)) {
4298                 if (octeon_dev->fn_list.setup_mbox(octeon_dev)) {
4299                         dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Mailbox setup failed\n");
4300                         return 1;
4301                 }
4302                 atomic_set(&octeon_dev->status, OCT_DEV_MBOX_SETUP_DONE);
4303
4304                 if (octeon_allocate_ioq_vector(octeon_dev)) {
4305                         dev_err(&octeon_dev->pci_dev->dev, "OCTEON: ioq vector allocation failed\n");
4306                         return 1;
4307                 }
4308                 atomic_set(&octeon_dev->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE);
4309
4310         } else {
4311                 /* The input and output queue registers were setup earlier (the
4312                  * queues were not enabled). Any additional registers
4313                  * that need to be programmed should be done now.
4314                  */
4315                 ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
4316                 if (ret) {
4317                         dev_err(&octeon_dev->pci_dev->dev,
4318                                 "Failed to configure device registers\n");
4319                         return ret;
4320                 }
4321         }
4322
4323         /* Initialize the tasklet that handles output queue packet processing.*/
4324         dev_dbg(&octeon_dev->pci_dev->dev, "Initializing droq tasklet\n");
4325         tasklet_init(&oct_priv->droq_tasklet, octeon_droq_bh,
4326                      (unsigned long)octeon_dev);
4327
4328         /* Setup the interrupt handler and record the INT SUM register address
4329          */
4330         if (octeon_setup_interrupt(octeon_dev,
4331                                    octeon_dev->sriov_info.num_pf_rings))
4332                 return 1;
4333
4334         /* Enable Octeon device interrupts */
4335         octeon_dev->fn_list.enable_interrupt(octeon_dev, OCTEON_ALL_INTR);
4336
4337         atomic_set(&octeon_dev->status, OCT_DEV_INTR_SET_DONE);
4338
4339         /* Send Credit for Octeon Output queues. Credits are always sent BEFORE
4340          * the output queue is enabled.
4341          * This ensures that we'll receive the f/w CORE DRV_ACTIVE message in
4342          * case we've configured CN23XX_SLI_GBL_CONTROL[NOPTR_D] = 0.
4343          * Otherwise, it is possible that the DRV_ACTIVE message will be sent
4344          * before any credits have been issued, causing the ring to be reset
4345          * (and the f/w appear to never have started).
4346          */
4347         for (j = 0; j < octeon_dev->num_oqs; j++)
4348                 writel(octeon_dev->droq[j]->max_count,
4349                        octeon_dev->droq[j]->pkts_credit_reg);
4350
4351         /* Enable the input and output queues for this Octeon device */
4352         ret = octeon_dev->fn_list.enable_io_queues(octeon_dev);
4353         if (ret) {
4354                 dev_err(&octeon_dev->pci_dev->dev, "Failed to enable input/output queues");
4355                 return ret;
4356         }
4357
4358         atomic_set(&octeon_dev->status, OCT_DEV_IO_QUEUES_DONE);
4359
4360         if (fw_state == FW_NEEDS_TO_BE_LOADED) {
4361                 dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n");
4362                 if (!ddr_timeout) {
4363                         dev_info(&octeon_dev->pci_dev->dev,
4364                                  "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
4365                 }
4366
4367                 schedule_timeout_uninterruptible(HZ * LIO_RESET_SECS);
4368
4369                 /* Wait for the octeon to initialize DDR after the soft-reset.*/
4370                 while (!ddr_timeout) {
4371                         set_current_state(TASK_INTERRUPTIBLE);
4372                         if (schedule_timeout(HZ / 10)) {
4373                                 /* user probably pressed Control-C */
4374                                 return 1;
4375                         }
4376                 }
4377                 ret = octeon_wait_for_ddr_init(octeon_dev, &ddr_timeout);
4378                 if (ret) {
4379                         dev_err(&octeon_dev->pci_dev->dev,
4380                                 "DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n",
4381                                 ret);
4382                         return 1;
4383                 }
4384
4385                 if (octeon_wait_for_bootloader(octeon_dev, 1000)) {
4386                         dev_err(&octeon_dev->pci_dev->dev, "Board not responding\n");
4387                         return 1;
4388                 }
4389
4390                 /* Divert uboot to take commands from host instead. */
4391                 ret = octeon_console_send_cmd(octeon_dev, bootcmd, 50);
4392
4393                 dev_dbg(&octeon_dev->pci_dev->dev, "Initializing consoles\n");
4394                 ret = octeon_init_consoles(octeon_dev);
4395                 if (ret) {
4396                         dev_err(&octeon_dev->pci_dev->dev, "Could not access board consoles\n");
4397                         return 1;
4398                 }
4399                 /* If console debug enabled, specify empty string to use default
4400                  * enablement ELSE specify NULL string for 'disabled'.
4401                  */
4402                 dbg_enb = octeon_console_debug_enabled(0) ? "" : NULL;
4403                 ret = octeon_add_console(octeon_dev, 0, dbg_enb);
4404                 if (ret) {
4405                         dev_err(&octeon_dev->pci_dev->dev, "Could not access board console\n");
4406                         return 1;
4407                 } else if (octeon_console_debug_enabled(0)) {
4408                         /* If console was added AND we're logging console output
4409                          * then set our console print function.
4410                          */
4411                         octeon_dev->console[0].print = octeon_dbg_console_print;
4412                 }
4413
4414                 atomic_set(&octeon_dev->status, OCT_DEV_CONSOLE_INIT_DONE);
4415
4416                 dev_dbg(&octeon_dev->pci_dev->dev, "Loading firmware\n");
4417                 ret = load_firmware(octeon_dev);
4418                 if (ret) {
4419                         dev_err(&octeon_dev->pci_dev->dev, "Could not load firmware to board\n");
4420                         return 1;
4421                 }
4422
4423                 atomic_set(octeon_dev->adapter_fw_state, FW_HAS_BEEN_LOADED);
4424         }
4425
4426         handshake[octeon_dev->octeon_id].init_ok = 1;
4427         complete(&handshake[octeon_dev->octeon_id].init);
4428
4429         atomic_set(&octeon_dev->status, OCT_DEV_HOST_OK);
4430
4431         return 0;
4432 }
4433
4434 /**
4435  * \brief Debug console print function
4436  * @param octeon_dev  octeon device
4437  * @param console_num console number
4438  * @param prefix      first portion of line to display
4439  * @param suffix      second portion of line to display
4440  *
4441  * The OCTEON debug console outputs entire lines (excluding '\n').
4442  * Normally, the line will be passed in the 'prefix' parameter.
4443  * However, due to buffering, it is possible for a line to be split into two
4444  * parts, in which case they will be passed as the 'prefix' parameter and
4445  * 'suffix' parameter.
4446  */
4447 static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num,
4448                                     char *prefix, char *suffix)
4449 {
4450         if (prefix && suffix)
4451                 dev_info(&oct->pci_dev->dev, "%u: %s%s\n", console_num, prefix,
4452                          suffix);
4453         else if (prefix)
4454                 dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, prefix);
4455         else if (suffix)
4456                 dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, suffix);
4457
4458         return 0;
4459 }
4460
4461 /**
4462  * \brief Exits the module
4463  */
4464 static void __exit liquidio_exit(void)
4465 {
4466         liquidio_deinit_pci();
4467
4468         pr_info("LiquidIO network module is now unloaded\n");
4469 }
4470
4471 module_init(liquidio_init);
4472 module_exit(liquidio_exit);