1 /**************************************************************************/
3 /* IBM System i and System p Virtual NIC Device Driver */
4 /* Copyright (C) 2014 IBM Corp. */
5 /* Santiago Leon (santi_leon@yahoo.com) */
6 /* Thomas Falcon (tlfalcon@linux.vnet.ibm.com) */
7 /* John Allen (jallen@linux.vnet.ibm.com) */
9 /* This program is free software; you can redistribute it and/or modify */
10 /* it under the terms of the GNU General Public License as published by */
11 /* the Free Software Foundation; either version 2 of the License, or */
12 /* (at your option) any later version. */
14 /* This program is distributed in the hope that it will be useful, */
15 /* but WITHOUT ANY WARRANTY; without even the implied warranty of */
16 /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */
17 /* GNU General Public License for more details. */
19 /* You should have received a copy of the GNU General Public License */
20 /* along with this program. */
22 /* This module contains the implementation of a virtual ethernet device */
23 /* for use with IBM i/p Series LPAR Linux. It utilizes the logical LAN */
24 /* option of the RS/6000 Platform Architecture to interface with virtual */
25 /* ethernet NICs that are presented to the partition by the hypervisor. */
27 /* Messages are passed between the VNIC driver and the VNIC server using */
28 /* Command/Response Queues (CRQs) and sub CRQs (sCRQs). CRQs are used to */
29 /* issue and receive commands that initiate communication with the server */
30 /* on driver initialization. Sub CRQs (sCRQs) are similar to CRQs, but */
31 /* are used by the driver to notify the server that a packet is */
32 /* ready for transmission or that a buffer has been added to receive a */
33 /* packet. Subsequently, sCRQs are used by the server to notify the */
34 /* driver that a packet transmission has been completed or that a packet */
35 /* has been received and placed in a waiting buffer. */
37 /* In lieu of a more conventional "on-the-fly" DMA mapping strategy in */
38 /* which skbs are DMA mapped and immediately unmapped when the transmit */
39 /* or receive has been completed, the VNIC driver is required to use */
40 /* "long term mapping". This entails that large, continuous DMA mapped */
41 /* buffers are allocated on driver initialization and these buffers are */
42 /* then continuously reused to pass skbs to and from the VNIC server. */
44 /**************************************************************************/
46 #include <linux/module.h>
47 #include <linux/moduleparam.h>
48 #include <linux/types.h>
49 #include <linux/errno.h>
50 #include <linux/completion.h>
51 #include <linux/ioport.h>
52 #include <linux/dma-mapping.h>
53 #include <linux/kernel.h>
54 #include <linux/netdevice.h>
55 #include <linux/etherdevice.h>
56 #include <linux/skbuff.h>
57 #include <linux/init.h>
58 #include <linux/delay.h>
60 #include <linux/ethtool.h>
61 #include <linux/proc_fs.h>
64 #include <linux/ipv6.h>
65 #include <linux/irq.h>
66 #include <linux/kthread.h>
67 #include <linux/seq_file.h>
68 #include <linux/debugfs.h>
69 #include <linux/interrupt.h>
70 #include <net/net_namespace.h>
71 #include <asm/hvcall.h>
72 #include <linux/atomic.h>
74 #include <asm/iommu.h>
75 #include <linux/uaccess.h>
76 #include <asm/firmware.h>
77 #include <linux/seq_file.h>
78 #include <linux/workqueue.h>
82 static const char ibmvnic_driver_name[] = "ibmvnic";
83 static const char ibmvnic_driver_string[] = "IBM System i/p Virtual NIC Driver";
85 MODULE_AUTHOR("Santiago Leon <santi_leon@yahoo.com>");
86 MODULE_DESCRIPTION("IBM System i/p Virtual NIC Driver");
87 MODULE_LICENSE("GPL");
88 MODULE_VERSION(IBMVNIC_DRIVER_VERSION);
90 static int ibmvnic_version = IBMVNIC_INITIAL_VERSION;
91 static int ibmvnic_remove(struct vio_dev *);
92 static void release_sub_crqs(struct ibmvnic_adapter *);
93 static void release_sub_crqs_no_irqs(struct ibmvnic_adapter *);
94 static int ibmvnic_reset_crq(struct ibmvnic_adapter *);
95 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *);
96 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *);
97 static int ibmvnic_send_crq(struct ibmvnic_adapter *, union ibmvnic_crq *);
98 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
99 union sub_crq *sub_crq);
100 static int send_subcrq_indirect(struct ibmvnic_adapter *, u64, u64, u64);
101 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance);
102 static int enable_scrq_irq(struct ibmvnic_adapter *,
103 struct ibmvnic_sub_crq_queue *);
104 static int disable_scrq_irq(struct ibmvnic_adapter *,
105 struct ibmvnic_sub_crq_queue *);
106 static int pending_scrq(struct ibmvnic_adapter *,
107 struct ibmvnic_sub_crq_queue *);
108 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *,
109 struct ibmvnic_sub_crq_queue *);
110 static int ibmvnic_poll(struct napi_struct *napi, int data);
111 static void send_map_query(struct ibmvnic_adapter *adapter);
112 static void send_request_map(struct ibmvnic_adapter *, dma_addr_t, __be32, u8);
113 static void send_request_unmap(struct ibmvnic_adapter *, u8);
115 struct ibmvnic_stat {
116 char name[ETH_GSTRING_LEN];
120 #define IBMVNIC_STAT_OFF(stat) (offsetof(struct ibmvnic_adapter, stats) + \
121 offsetof(struct ibmvnic_statistics, stat))
122 #define IBMVNIC_GET_STAT(a, off) (*((u64 *)(((unsigned long)(a)) + off)))
124 static const struct ibmvnic_stat ibmvnic_stats[] = {
125 {"rx_packets", IBMVNIC_STAT_OFF(rx_packets)},
126 {"rx_bytes", IBMVNIC_STAT_OFF(rx_bytes)},
127 {"tx_packets", IBMVNIC_STAT_OFF(tx_packets)},
128 {"tx_bytes", IBMVNIC_STAT_OFF(tx_bytes)},
129 {"ucast_tx_packets", IBMVNIC_STAT_OFF(ucast_tx_packets)},
130 {"ucast_rx_packets", IBMVNIC_STAT_OFF(ucast_rx_packets)},
131 {"mcast_tx_packets", IBMVNIC_STAT_OFF(mcast_tx_packets)},
132 {"mcast_rx_packets", IBMVNIC_STAT_OFF(mcast_rx_packets)},
133 {"bcast_tx_packets", IBMVNIC_STAT_OFF(bcast_tx_packets)},
134 {"bcast_rx_packets", IBMVNIC_STAT_OFF(bcast_rx_packets)},
135 {"align_errors", IBMVNIC_STAT_OFF(align_errors)},
136 {"fcs_errors", IBMVNIC_STAT_OFF(fcs_errors)},
137 {"single_collision_frames", IBMVNIC_STAT_OFF(single_collision_frames)},
138 {"multi_collision_frames", IBMVNIC_STAT_OFF(multi_collision_frames)},
139 {"sqe_test_errors", IBMVNIC_STAT_OFF(sqe_test_errors)},
140 {"deferred_tx", IBMVNIC_STAT_OFF(deferred_tx)},
141 {"late_collisions", IBMVNIC_STAT_OFF(late_collisions)},
142 {"excess_collisions", IBMVNIC_STAT_OFF(excess_collisions)},
143 {"internal_mac_tx_errors", IBMVNIC_STAT_OFF(internal_mac_tx_errors)},
144 {"carrier_sense", IBMVNIC_STAT_OFF(carrier_sense)},
145 {"too_long_frames", IBMVNIC_STAT_OFF(too_long_frames)},
146 {"internal_mac_rx_errors", IBMVNIC_STAT_OFF(internal_mac_rx_errors)},
149 static long h_reg_sub_crq(unsigned long unit_address, unsigned long token,
150 unsigned long length, unsigned long *number,
153 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
156 rc = plpar_hcall(H_REG_SUB_CRQ, retbuf, unit_address, token, length);
163 /* net_device_ops functions */
165 static void init_rx_pool(struct ibmvnic_adapter *adapter,
166 struct ibmvnic_rx_pool *rx_pool, int num, int index,
167 int buff_size, int active)
169 netdev_dbg(adapter->netdev,
170 "Initializing rx_pool %d, %d buffs, %d bytes each\n",
171 index, num, buff_size);
173 rx_pool->index = index;
174 rx_pool->buff_size = buff_size;
175 rx_pool->active = active;
178 static int alloc_long_term_buff(struct ibmvnic_adapter *adapter,
179 struct ibmvnic_long_term_buff *ltb, int size)
181 struct device *dev = &adapter->vdev->dev;
184 ltb->buff = dma_alloc_coherent(dev, ltb->size, <b->addr,
188 dev_err(dev, "Couldn't alloc long term buffer\n");
191 ltb->map_id = adapter->map_id;
193 send_request_map(adapter, ltb->addr,
194 ltb->size, ltb->map_id);
195 init_completion(&adapter->fw_done);
196 wait_for_completion(&adapter->fw_done);
200 static void free_long_term_buff(struct ibmvnic_adapter *adapter,
201 struct ibmvnic_long_term_buff *ltb)
203 struct device *dev = &adapter->vdev->dev;
205 dma_free_coherent(dev, ltb->size, ltb->buff, ltb->addr);
206 if (!adapter->failover)
207 send_request_unmap(adapter, ltb->map_id);
210 static int alloc_rx_pool(struct ibmvnic_adapter *adapter,
211 struct ibmvnic_rx_pool *pool)
213 struct device *dev = &adapter->vdev->dev;
216 pool->free_map = kcalloc(pool->size, sizeof(int), GFP_KERNEL);
220 pool->rx_buff = kcalloc(pool->size, sizeof(struct ibmvnic_rx_buff),
223 if (!pool->rx_buff) {
224 dev_err(dev, "Couldn't alloc rx buffers\n");
225 kfree(pool->free_map);
229 if (alloc_long_term_buff(adapter, &pool->long_term_buff,
230 pool->size * pool->buff_size)) {
231 kfree(pool->free_map);
232 kfree(pool->rx_buff);
236 for (i = 0; i < pool->size; ++i)
237 pool->free_map[i] = i;
239 atomic_set(&pool->available, 0);
240 pool->next_alloc = 0;
246 static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
247 struct ibmvnic_rx_pool *pool)
249 int count = pool->size - atomic_read(&pool->available);
250 struct device *dev = &adapter->vdev->dev;
251 int buffers_added = 0;
252 unsigned long lpar_rc;
253 union sub_crq sub_crq;
263 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
264 be32_to_cpu(adapter->login_rsp_buf->
267 for (i = 0; i < count; ++i) {
268 skb = alloc_skb(pool->buff_size, GFP_ATOMIC);
270 dev_err(dev, "Couldn't replenish rx buff\n");
271 adapter->replenish_no_mem++;
275 index = pool->free_map[pool->next_free];
277 if (pool->rx_buff[index].skb)
278 dev_err(dev, "Inconsistent free_map!\n");
280 /* Copy the skb to the long term mapped DMA buffer */
281 offset = index * pool->buff_size;
282 dst = pool->long_term_buff.buff + offset;
283 memset(dst, 0, pool->buff_size);
284 dma_addr = pool->long_term_buff.addr + offset;
285 pool->rx_buff[index].data = dst;
287 pool->free_map[pool->next_free] = IBMVNIC_INVALID_MAP;
288 pool->rx_buff[index].dma = dma_addr;
289 pool->rx_buff[index].skb = skb;
290 pool->rx_buff[index].pool_index = pool->index;
291 pool->rx_buff[index].size = pool->buff_size;
293 memset(&sub_crq, 0, sizeof(sub_crq));
294 sub_crq.rx_add.first = IBMVNIC_CRQ_CMD;
295 sub_crq.rx_add.correlator =
296 cpu_to_be64((u64)&pool->rx_buff[index]);
297 sub_crq.rx_add.ioba = cpu_to_be32(dma_addr);
298 sub_crq.rx_add.map_id = pool->long_term_buff.map_id;
300 /* The length field of the sCRQ is defined to be 24 bits so the
301 * buffer size needs to be left shifted by a byte before it is
302 * converted to big endian to prevent the last byte from being
305 #ifdef __LITTLE_ENDIAN__
308 sub_crq.rx_add.len = cpu_to_be32(pool->buff_size << shift);
310 lpar_rc = send_subcrq(adapter, handle_array[pool->index],
312 if (lpar_rc != H_SUCCESS)
316 adapter->replenish_add_buff_success++;
317 pool->next_free = (pool->next_free + 1) % pool->size;
319 atomic_add(buffers_added, &pool->available);
323 dev_info(dev, "replenish pools failure\n");
324 pool->free_map[pool->next_free] = index;
325 pool->rx_buff[index].skb = NULL;
326 if (!dma_mapping_error(dev, dma_addr))
327 dma_unmap_single(dev, dma_addr, pool->buff_size,
330 dev_kfree_skb_any(skb);
331 adapter->replenish_add_buff_failure++;
332 atomic_add(buffers_added, &pool->available);
335 static void replenish_pools(struct ibmvnic_adapter *adapter)
339 if (adapter->migrated)
342 adapter->replenish_task_cycles++;
343 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
345 if (adapter->rx_pool[i].active)
346 replenish_rx_pool(adapter, &adapter->rx_pool[i]);
350 static void free_rx_pool(struct ibmvnic_adapter *adapter,
351 struct ibmvnic_rx_pool *pool)
355 kfree(pool->free_map);
356 pool->free_map = NULL;
361 for (i = 0; i < pool->size; i++) {
362 if (pool->rx_buff[i].skb) {
363 dev_kfree_skb_any(pool->rx_buff[i].skb);
364 pool->rx_buff[i].skb = NULL;
367 kfree(pool->rx_buff);
368 pool->rx_buff = NULL;
371 static int ibmvnic_open(struct net_device *netdev)
373 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
374 struct device *dev = &adapter->vdev->dev;
375 struct ibmvnic_tx_pool *tx_pool;
376 union ibmvnic_crq crq;
383 be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
385 be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
386 size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
387 be32_to_cpu(adapter->login_rsp_buf->
388 off_rxadd_buff_size));
390 adapter->napi = kcalloc(adapter->req_rx_queues,
391 sizeof(struct napi_struct), GFP_KERNEL);
393 goto alloc_napi_failed;
394 for (i = 0; i < adapter->req_rx_queues; i++) {
395 netif_napi_add(netdev, &adapter->napi[i], ibmvnic_poll,
397 napi_enable(&adapter->napi[i]);
400 kcalloc(rxadd_subcrqs, sizeof(struct ibmvnic_rx_pool), GFP_KERNEL);
402 if (!adapter->rx_pool)
403 goto rx_pool_arr_alloc_failed;
404 send_map_query(adapter);
405 for (i = 0; i < rxadd_subcrqs; i++) {
406 init_rx_pool(adapter, &adapter->rx_pool[i],
407 IBMVNIC_BUFFS_PER_POOL, i,
408 be64_to_cpu(size_array[i]), 1);
409 if (alloc_rx_pool(adapter, &adapter->rx_pool[i])) {
410 dev_err(dev, "Couldn't alloc rx pool\n");
411 goto rx_pool_alloc_failed;
415 kcalloc(tx_subcrqs, sizeof(struct ibmvnic_tx_pool), GFP_KERNEL);
417 if (!adapter->tx_pool)
418 goto tx_pool_arr_alloc_failed;
419 for (i = 0; i < tx_subcrqs; i++) {
420 tx_pool = &adapter->tx_pool[i];
422 kcalloc(adapter->max_tx_entries_per_subcrq,
423 sizeof(struct ibmvnic_tx_buff), GFP_KERNEL);
424 if (!tx_pool->tx_buff)
425 goto tx_pool_alloc_failed;
427 if (alloc_long_term_buff(adapter, &tx_pool->long_term_buff,
428 adapter->max_tx_entries_per_subcrq *
430 goto tx_ltb_alloc_failed;
433 kcalloc(adapter->max_tx_entries_per_subcrq,
434 sizeof(int), GFP_KERNEL);
435 if (!tx_pool->free_map)
436 goto tx_fm_alloc_failed;
438 for (j = 0; j < adapter->max_tx_entries_per_subcrq; j++)
439 tx_pool->free_map[j] = j;
441 tx_pool->consumer_index = 0;
442 tx_pool->producer_index = 0;
444 adapter->bounce_buffer_size =
445 (netdev->mtu + ETH_HLEN - 1) / PAGE_SIZE + 1;
446 adapter->bounce_buffer = kmalloc(adapter->bounce_buffer_size,
448 if (!adapter->bounce_buffer)
449 goto bounce_alloc_failed;
451 adapter->bounce_buffer_dma = dma_map_single(dev, adapter->bounce_buffer,
452 adapter->bounce_buffer_size,
454 if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
455 dev_err(dev, "Couldn't map tx bounce buffer\n");
456 goto bounce_map_failed;
458 replenish_pools(adapter);
460 /* We're ready to receive frames, enable the sub-crq interrupts and
461 * set the logical link state to up
463 for (i = 0; i < adapter->req_rx_queues; i++)
464 enable_scrq_irq(adapter, adapter->rx_scrq[i]);
466 for (i = 0; i < adapter->req_tx_queues; i++)
467 enable_scrq_irq(adapter, adapter->tx_scrq[i]);
469 memset(&crq, 0, sizeof(crq));
470 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
471 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
472 crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_UP;
473 ibmvnic_send_crq(adapter, &crq);
475 netif_tx_start_all_queues(netdev);
480 kfree(adapter->bounce_buffer);
483 kfree(adapter->tx_pool[i].free_map);
485 free_long_term_buff(adapter, &adapter->tx_pool[i].long_term_buff);
487 kfree(adapter->tx_pool[i].tx_buff);
488 tx_pool_alloc_failed:
489 for (j = 0; j < i; j++) {
490 kfree(adapter->tx_pool[j].tx_buff);
491 free_long_term_buff(adapter,
492 &adapter->tx_pool[j].long_term_buff);
493 kfree(adapter->tx_pool[j].free_map);
495 kfree(adapter->tx_pool);
496 adapter->tx_pool = NULL;
497 tx_pool_arr_alloc_failed:
499 rx_pool_alloc_failed:
500 for (j = 0; j < i; j++) {
501 free_rx_pool(adapter, &adapter->rx_pool[j]);
502 free_long_term_buff(adapter,
503 &adapter->rx_pool[j].long_term_buff);
505 kfree(adapter->rx_pool);
506 adapter->rx_pool = NULL;
507 rx_pool_arr_alloc_failed:
508 for (i = 0; i < adapter->req_rx_queues; i++)
509 napi_enable(&adapter->napi[i]);
514 static int ibmvnic_close(struct net_device *netdev)
516 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
517 struct device *dev = &adapter->vdev->dev;
518 union ibmvnic_crq crq;
521 adapter->closing = true;
523 for (i = 0; i < adapter->req_rx_queues; i++)
524 napi_disable(&adapter->napi[i]);
526 if (!adapter->failover)
527 netif_tx_stop_all_queues(netdev);
529 if (adapter->bounce_buffer) {
530 if (!dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
531 dma_unmap_single(&adapter->vdev->dev,
532 adapter->bounce_buffer_dma,
533 adapter->bounce_buffer_size,
535 adapter->bounce_buffer_dma = DMA_ERROR_CODE;
537 kfree(adapter->bounce_buffer);
538 adapter->bounce_buffer = NULL;
541 memset(&crq, 0, sizeof(crq));
542 crq.logical_link_state.first = IBMVNIC_CRQ_CMD;
543 crq.logical_link_state.cmd = LOGICAL_LINK_STATE;
544 crq.logical_link_state.link_state = IBMVNIC_LOGICAL_LNK_DN;
545 ibmvnic_send_crq(adapter, &crq);
547 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
549 kfree(adapter->tx_pool[i].tx_buff);
550 free_long_term_buff(adapter,
551 &adapter->tx_pool[i].long_term_buff);
552 kfree(adapter->tx_pool[i].free_map);
554 kfree(adapter->tx_pool);
555 adapter->tx_pool = NULL;
557 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
559 free_rx_pool(adapter, &adapter->rx_pool[i]);
560 free_long_term_buff(adapter,
561 &adapter->rx_pool[i].long_term_buff);
563 kfree(adapter->rx_pool);
564 adapter->rx_pool = NULL;
566 adapter->closing = false;
572 * build_hdr_data - creates L2/L3/L4 header data buffer
573 * @hdr_field - bitfield determining needed headers
574 * @skb - socket buffer
575 * @hdr_len - array of header lengths
576 * @tot_len - total length of data
578 * Reads hdr_field to determine which headers are needed by firmware.
579 * Builds a buffer containing these headers. Saves individual header
580 * lengths and total buffer length to be used to build descriptors.
582 static int build_hdr_data(u8 hdr_field, struct sk_buff *skb,
583 int *hdr_len, u8 *hdr_data)
588 hdr_len[0] = sizeof(struct ethhdr);
590 if (skb->protocol == htons(ETH_P_IP)) {
591 hdr_len[1] = ip_hdr(skb)->ihl * 4;
592 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
593 hdr_len[2] = tcp_hdrlen(skb);
594 else if (ip_hdr(skb)->protocol == IPPROTO_UDP)
595 hdr_len[2] = sizeof(struct udphdr);
596 } else if (skb->protocol == htons(ETH_P_IPV6)) {
597 hdr_len[1] = sizeof(struct ipv6hdr);
598 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
599 hdr_len[2] = tcp_hdrlen(skb);
600 else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
601 hdr_len[2] = sizeof(struct udphdr);
604 memset(hdr_data, 0, 120);
605 if ((hdr_field >> 6) & 1) {
606 hdr = skb_mac_header(skb);
607 memcpy(hdr_data, hdr, hdr_len[0]);
611 if ((hdr_field >> 5) & 1) {
612 hdr = skb_network_header(skb);
613 memcpy(hdr_data + len, hdr, hdr_len[1]);
617 if ((hdr_field >> 4) & 1) {
618 hdr = skb_transport_header(skb);
619 memcpy(hdr_data + len, hdr, hdr_len[2]);
626 * create_hdr_descs - create header and header extension descriptors
627 * @hdr_field - bitfield determining needed headers
628 * @data - buffer containing header data
629 * @len - length of data buffer
630 * @hdr_len - array of individual header lengths
631 * @scrq_arr - descriptor array
633 * Creates header and, if needed, header extension descriptors and
634 * places them in a descriptor array, scrq_arr
637 static void create_hdr_descs(u8 hdr_field, u8 *hdr_data, int len, int *hdr_len,
638 union sub_crq *scrq_arr)
640 union sub_crq hdr_desc;
645 while (tmp_len > 0) {
646 cur = hdr_data + len - tmp_len;
648 memset(&hdr_desc, 0, sizeof(hdr_desc));
649 if (cur != hdr_data) {
650 data = hdr_desc.hdr_ext.data;
651 tmp = tmp_len > 29 ? 29 : tmp_len;
652 hdr_desc.hdr_ext.first = IBMVNIC_CRQ_CMD;
653 hdr_desc.hdr_ext.type = IBMVNIC_HDR_EXT_DESC;
654 hdr_desc.hdr_ext.len = tmp;
656 data = hdr_desc.hdr.data;
657 tmp = tmp_len > 24 ? 24 : tmp_len;
658 hdr_desc.hdr.first = IBMVNIC_CRQ_CMD;
659 hdr_desc.hdr.type = IBMVNIC_HDR_DESC;
660 hdr_desc.hdr.len = tmp;
661 hdr_desc.hdr.l2_len = (u8)hdr_len[0];
662 hdr_desc.hdr.l3_len = cpu_to_be16((u16)hdr_len[1]);
663 hdr_desc.hdr.l4_len = (u8)hdr_len[2];
664 hdr_desc.hdr.flag = hdr_field << 1;
666 memcpy(data, cur, tmp);
668 *scrq_arr = hdr_desc;
674 * build_hdr_descs_arr - build a header descriptor array
675 * @skb - socket buffer
676 * @num_entries - number of descriptors to be sent
677 * @subcrq - first TX descriptor
678 * @hdr_field - bit field determining which headers will be sent
680 * This function will build a TX descriptor array with applicable
681 * L2/L3/L4 packet header descriptors to be sent by send_subcrq_indirect.
684 static void build_hdr_descs_arr(struct ibmvnic_tx_buff *txbuff,
685 int *num_entries, u8 hdr_field)
687 int hdr_len[3] = {0, 0, 0};
689 u8 *hdr_data = txbuff->hdr_data;
691 tot_len = build_hdr_data(hdr_field, txbuff->skb, hdr_len,
696 num_entries += len % 29 ? len / 29 + 1 : len / 29;
697 create_hdr_descs(hdr_field, hdr_data, tot_len, hdr_len,
698 txbuff->indir_arr + 1);
701 static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
703 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
704 int queue_num = skb_get_queue_mapping(skb);
705 u8 *hdrs = (u8 *)&adapter->tx_rx_desc_req;
706 struct device *dev = &adapter->vdev->dev;
707 struct ibmvnic_tx_buff *tx_buff = NULL;
708 struct ibmvnic_tx_pool *tx_pool;
709 unsigned int tx_send_failed = 0;
710 unsigned int tx_map_failed = 0;
711 unsigned int tx_dropped = 0;
712 unsigned int tx_packets = 0;
713 unsigned int tx_bytes = 0;
714 dma_addr_t data_dma_addr;
715 struct netdev_queue *txq;
716 bool used_bounce = false;
717 unsigned long lpar_rc;
718 union sub_crq tx_crq;
726 tx_pool = &adapter->tx_pool[queue_num];
727 txq = netdev_get_tx_queue(netdev, skb_get_queue_mapping(skb));
728 handle_array = (u64 *)((u8 *)(adapter->login_rsp_buf) +
729 be32_to_cpu(adapter->login_rsp_buf->
730 off_txsubm_subcrqs));
731 if (adapter->migrated) {
734 ret = NETDEV_TX_BUSY;
738 index = tx_pool->free_map[tx_pool->consumer_index];
739 offset = index * adapter->req_mtu;
740 dst = tx_pool->long_term_buff.buff + offset;
741 memset(dst, 0, adapter->req_mtu);
742 skb_copy_from_linear_data(skb, dst, skb->len);
743 data_dma_addr = tx_pool->long_term_buff.addr + offset;
745 tx_pool->consumer_index =
746 (tx_pool->consumer_index + 1) %
747 adapter->max_tx_entries_per_subcrq;
749 tx_buff = &tx_pool->tx_buff[index];
751 tx_buff->data_dma[0] = data_dma_addr;
752 tx_buff->data_len[0] = skb->len;
753 tx_buff->index = index;
754 tx_buff->pool_index = queue_num;
755 tx_buff->last_frag = true;
756 tx_buff->used_bounce = used_bounce;
758 memset(&tx_crq, 0, sizeof(tx_crq));
759 tx_crq.v1.first = IBMVNIC_CRQ_CMD;
760 tx_crq.v1.type = IBMVNIC_TX_DESC;
761 tx_crq.v1.n_crq_elem = 1;
763 tx_crq.v1.flags1 = IBMVNIC_TX_COMP_NEEDED;
764 tx_crq.v1.correlator = cpu_to_be32(index);
765 tx_crq.v1.dma_reg = cpu_to_be16(tx_pool->long_term_buff.map_id);
766 tx_crq.v1.sge_len = cpu_to_be32(skb->len);
767 tx_crq.v1.ioba = cpu_to_be64(data_dma_addr);
769 if (adapter->vlan_header_insertion) {
770 tx_crq.v1.flags2 |= IBMVNIC_TX_VLAN_INSERT;
771 tx_crq.v1.vlan_id = cpu_to_be16(skb->vlan_tci);
774 if (skb->protocol == htons(ETH_P_IP)) {
775 if (ip_hdr(skb)->version == 4)
776 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV4;
777 else if (ip_hdr(skb)->version == 6)
778 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_IPV6;
780 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
781 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_TCP;
782 else if (ip_hdr(skb)->protocol != IPPROTO_TCP)
783 tx_crq.v1.flags1 |= IBMVNIC_TX_PROT_UDP;
786 if (skb->ip_summed == CHECKSUM_PARTIAL) {
787 tx_crq.v1.flags1 |= IBMVNIC_TX_CHKSUM_OFFLOAD;
790 /* determine if l2/3/4 headers are sent to firmware */
791 if ((*hdrs >> 7) & 1 &&
792 (skb->protocol == htons(ETH_P_IP) ||
793 skb->protocol == htons(ETH_P_IPV6))) {
794 build_hdr_descs_arr(tx_buff, &num_entries, *hdrs);
795 tx_crq.v1.n_crq_elem = num_entries;
796 tx_buff->indir_arr[0] = tx_crq;
797 tx_buff->indir_dma = dma_map_single(dev, tx_buff->indir_arr,
798 sizeof(tx_buff->indir_arr),
800 if (dma_mapping_error(dev, tx_buff->indir_dma)) {
801 if (!firmware_has_feature(FW_FEATURE_CMO))
802 dev_err(dev, "tx: unable to map descriptor array\n");
805 ret = NETDEV_TX_BUSY;
808 lpar_rc = send_subcrq_indirect(adapter, handle_array[queue_num],
809 (u64)tx_buff->indir_dma,
812 lpar_rc = send_subcrq(adapter, handle_array[queue_num],
815 if (lpar_rc != H_SUCCESS) {
816 dev_err(dev, "tx failed with code %ld\n", lpar_rc);
818 if (tx_pool->consumer_index == 0)
819 tx_pool->consumer_index =
820 adapter->max_tx_entries_per_subcrq - 1;
822 tx_pool->consumer_index--;
826 ret = NETDEV_TX_BUSY;
830 tx_bytes += skb->len;
831 txq->trans_start = jiffies;
835 netdev->stats.tx_dropped += tx_dropped;
836 netdev->stats.tx_bytes += tx_bytes;
837 netdev->stats.tx_packets += tx_packets;
838 adapter->tx_send_failed += tx_send_failed;
839 adapter->tx_map_failed += tx_map_failed;
844 static void ibmvnic_set_multi(struct net_device *netdev)
846 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
847 struct netdev_hw_addr *ha;
848 union ibmvnic_crq crq;
850 memset(&crq, 0, sizeof(crq));
851 crq.request_capability.first = IBMVNIC_CRQ_CMD;
852 crq.request_capability.cmd = REQUEST_CAPABILITY;
854 if (netdev->flags & IFF_PROMISC) {
855 if (!adapter->promisc_supported)
858 if (netdev->flags & IFF_ALLMULTI) {
859 /* Accept all multicast */
860 memset(&crq, 0, sizeof(crq));
861 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
862 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
863 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_ALL;
864 ibmvnic_send_crq(adapter, &crq);
865 } else if (netdev_mc_empty(netdev)) {
866 /* Reject all multicast */
867 memset(&crq, 0, sizeof(crq));
868 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
869 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
870 crq.multicast_ctrl.flags = IBMVNIC_DISABLE_ALL;
871 ibmvnic_send_crq(adapter, &crq);
873 /* Accept one or more multicast(s) */
874 netdev_for_each_mc_addr(ha, netdev) {
875 memset(&crq, 0, sizeof(crq));
876 crq.multicast_ctrl.first = IBMVNIC_CRQ_CMD;
877 crq.multicast_ctrl.cmd = MULTICAST_CTRL;
878 crq.multicast_ctrl.flags = IBMVNIC_ENABLE_MC;
879 ether_addr_copy(&crq.multicast_ctrl.mac_addr[0],
881 ibmvnic_send_crq(adapter, &crq);
887 static int ibmvnic_set_mac(struct net_device *netdev, void *p)
889 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
890 struct sockaddr *addr = p;
891 union ibmvnic_crq crq;
893 if (!is_valid_ether_addr(addr->sa_data))
894 return -EADDRNOTAVAIL;
896 memset(&crq, 0, sizeof(crq));
897 crq.change_mac_addr.first = IBMVNIC_CRQ_CMD;
898 crq.change_mac_addr.cmd = CHANGE_MAC_ADDR;
899 ether_addr_copy(&crq.change_mac_addr.mac_addr[0], addr->sa_data);
900 ibmvnic_send_crq(adapter, &crq);
901 /* netdev->dev_addr is changed in handle_change_mac_rsp function */
905 static void ibmvnic_tx_timeout(struct net_device *dev)
907 struct ibmvnic_adapter *adapter = netdev_priv(dev);
910 /* Adapter timed out, resetting it */
911 release_sub_crqs(adapter);
912 rc = ibmvnic_reset_crq(adapter);
914 dev_err(&adapter->vdev->dev, "Adapter timeout, reset failed\n");
916 ibmvnic_send_crq_init(adapter);
919 static void remove_buff_from_pool(struct ibmvnic_adapter *adapter,
920 struct ibmvnic_rx_buff *rx_buff)
922 struct ibmvnic_rx_pool *pool = &adapter->rx_pool[rx_buff->pool_index];
926 pool->free_map[pool->next_alloc] = (int)(rx_buff - pool->rx_buff);
927 pool->next_alloc = (pool->next_alloc + 1) % pool->size;
929 atomic_dec(&pool->available);
932 static int ibmvnic_poll(struct napi_struct *napi, int budget)
934 struct net_device *netdev = napi->dev;
935 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
936 int scrq_num = (int)(napi - adapter->napi);
937 int frames_processed = 0;
939 while (frames_processed < budget) {
941 struct ibmvnic_rx_buff *rx_buff;
947 if (!pending_scrq(adapter, adapter->rx_scrq[scrq_num]))
949 next = ibmvnic_next_scrq(adapter, adapter->rx_scrq[scrq_num]);
951 (struct ibmvnic_rx_buff *)be64_to_cpu(next->
953 /* do error checking */
954 if (next->rx_comp.rc) {
955 netdev_err(netdev, "rx error %x\n", next->rx_comp.rc);
957 next->rx_comp.first = 0;
958 remove_buff_from_pool(adapter, rx_buff);
962 length = be32_to_cpu(next->rx_comp.len);
963 offset = be16_to_cpu(next->rx_comp.off_frame_data);
964 flags = next->rx_comp.flags;
966 skb_copy_to_linear_data(skb, rx_buff->data + offset,
968 skb->vlan_tci = be16_to_cpu(next->rx_comp.vlan_tci);
970 next->rx_comp.first = 0;
971 remove_buff_from_pool(adapter, rx_buff);
973 skb_put(skb, length);
974 skb->protocol = eth_type_trans(skb, netdev);
976 if (flags & IBMVNIC_IP_CHKSUM_GOOD &&
977 flags & IBMVNIC_TCP_UDP_CHKSUM_GOOD) {
978 skb->ip_summed = CHECKSUM_UNNECESSARY;
982 napi_gro_receive(napi, skb); /* send it up */
983 netdev->stats.rx_packets++;
984 netdev->stats.rx_bytes += length;
987 replenish_rx_pool(adapter, &adapter->rx_pool[scrq_num]);
989 if (frames_processed < budget) {
990 enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
992 if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) &&
993 napi_reschedule(napi)) {
994 disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
998 return frames_processed;
1001 #ifdef CONFIG_NET_POLL_CONTROLLER
1002 static void ibmvnic_netpoll_controller(struct net_device *dev)
1004 struct ibmvnic_adapter *adapter = netdev_priv(dev);
1007 replenish_pools(netdev_priv(dev));
1008 for (i = 0; i < adapter->req_rx_queues; i++)
1009 ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
1010 adapter->rx_scrq[i]);
1014 static const struct net_device_ops ibmvnic_netdev_ops = {
1015 .ndo_open = ibmvnic_open,
1016 .ndo_stop = ibmvnic_close,
1017 .ndo_start_xmit = ibmvnic_xmit,
1018 .ndo_set_rx_mode = ibmvnic_set_multi,
1019 .ndo_set_mac_address = ibmvnic_set_mac,
1020 .ndo_validate_addr = eth_validate_addr,
1021 .ndo_tx_timeout = ibmvnic_tx_timeout,
1022 #ifdef CONFIG_NET_POLL_CONTROLLER
1023 .ndo_poll_controller = ibmvnic_netpoll_controller,
1027 /* ethtool functions */
1029 static int ibmvnic_get_settings(struct net_device *netdev,
1030 struct ethtool_cmd *cmd)
1032 cmd->supported = (SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg |
1034 cmd->advertising = (ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg |
1036 ethtool_cmd_speed_set(cmd, SPEED_1000);
1037 cmd->duplex = DUPLEX_FULL;
1038 cmd->port = PORT_FIBRE;
1039 cmd->phy_address = 0;
1040 cmd->transceiver = XCVR_INTERNAL;
1041 cmd->autoneg = AUTONEG_ENABLE;
1047 static void ibmvnic_get_drvinfo(struct net_device *dev,
1048 struct ethtool_drvinfo *info)
1050 strlcpy(info->driver, ibmvnic_driver_name, sizeof(info->driver));
1051 strlcpy(info->version, IBMVNIC_DRIVER_VERSION, sizeof(info->version));
1054 static u32 ibmvnic_get_msglevel(struct net_device *netdev)
1056 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1058 return adapter->msg_enable;
1061 static void ibmvnic_set_msglevel(struct net_device *netdev, u32 data)
1063 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1065 adapter->msg_enable = data;
1068 static u32 ibmvnic_get_link(struct net_device *netdev)
1070 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
1072 /* Don't need to send a query because we request a logical link up at
1073 * init and then we wait for link state indications
1075 return adapter->logical_link_state;
1078 static void ibmvnic_get_ringparam(struct net_device *netdev,
1079 struct ethtool_ringparam *ring)
1081 ring->rx_max_pending = 0;
1082 ring->tx_max_pending = 0;
1083 ring->rx_mini_max_pending = 0;
1084 ring->rx_jumbo_max_pending = 0;
1085 ring->rx_pending = 0;
1086 ring->tx_pending = 0;
1087 ring->rx_mini_pending = 0;
1088 ring->rx_jumbo_pending = 0;
1091 static void ibmvnic_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1095 if (stringset != ETH_SS_STATS)
1098 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++, data += ETH_GSTRING_LEN)
1099 memcpy(data, ibmvnic_stats[i].name, ETH_GSTRING_LEN);
1102 static int ibmvnic_get_sset_count(struct net_device *dev, int sset)
1106 return ARRAY_SIZE(ibmvnic_stats);
1112 static void ibmvnic_get_ethtool_stats(struct net_device *dev,
1113 struct ethtool_stats *stats, u64 *data)
1115 struct ibmvnic_adapter *adapter = netdev_priv(dev);
1116 union ibmvnic_crq crq;
1119 memset(&crq, 0, sizeof(crq));
1120 crq.request_statistics.first = IBMVNIC_CRQ_CMD;
1121 crq.request_statistics.cmd = REQUEST_STATISTICS;
1122 crq.request_statistics.ioba = cpu_to_be32(adapter->stats_token);
1123 crq.request_statistics.len =
1124 cpu_to_be32(sizeof(struct ibmvnic_statistics));
1125 ibmvnic_send_crq(adapter, &crq);
1127 /* Wait for data to be written */
1128 init_completion(&adapter->stats_done);
1129 wait_for_completion(&adapter->stats_done);
1131 for (i = 0; i < ARRAY_SIZE(ibmvnic_stats); i++)
1132 data[i] = IBMVNIC_GET_STAT(adapter, ibmvnic_stats[i].offset);
1135 static const struct ethtool_ops ibmvnic_ethtool_ops = {
1136 .get_settings = ibmvnic_get_settings,
1137 .get_drvinfo = ibmvnic_get_drvinfo,
1138 .get_msglevel = ibmvnic_get_msglevel,
1139 .set_msglevel = ibmvnic_set_msglevel,
1140 .get_link = ibmvnic_get_link,
1141 .get_ringparam = ibmvnic_get_ringparam,
1142 .get_strings = ibmvnic_get_strings,
1143 .get_sset_count = ibmvnic_get_sset_count,
1144 .get_ethtool_stats = ibmvnic_get_ethtool_stats,
1147 /* Routines for managing CRQs/sCRQs */
1149 static void release_sub_crq_queue(struct ibmvnic_adapter *adapter,
1150 struct ibmvnic_sub_crq_queue *scrq)
1152 struct device *dev = &adapter->vdev->dev;
1155 netdev_dbg(adapter->netdev, "Releasing sub-CRQ\n");
1157 /* Close the sub-crqs */
1159 rc = plpar_hcall_norets(H_FREE_SUB_CRQ,
1160 adapter->vdev->unit_address,
1162 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
1164 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
1166 free_pages((unsigned long)scrq->msgs, 2);
1170 static struct ibmvnic_sub_crq_queue *init_sub_crq_queue(struct ibmvnic_adapter
1173 struct device *dev = &adapter->vdev->dev;
1174 struct ibmvnic_sub_crq_queue *scrq;
1177 scrq = kmalloc(sizeof(*scrq), GFP_ATOMIC);
1181 scrq->msgs = (union sub_crq *)__get_free_pages(GFP_ATOMIC, 2);
1182 memset(scrq->msgs, 0, 4 * PAGE_SIZE);
1184 dev_warn(dev, "Couldn't allocate crq queue messages page\n");
1185 goto zero_page_failed;
1188 scrq->msg_token = dma_map_single(dev, scrq->msgs, 4 * PAGE_SIZE,
1190 if (dma_mapping_error(dev, scrq->msg_token)) {
1191 dev_warn(dev, "Couldn't map crq queue messages page\n");
1195 rc = h_reg_sub_crq(adapter->vdev->unit_address, scrq->msg_token,
1196 4 * PAGE_SIZE, &scrq->crq_num, &scrq->hw_irq);
1198 if (rc == H_RESOURCE)
1199 rc = ibmvnic_reset_crq(adapter);
1201 if (rc == H_CLOSED) {
1202 dev_warn(dev, "Partner adapter not ready, waiting.\n");
1204 dev_warn(dev, "Error %d registering sub-crq\n", rc);
1208 scrq->adapter = adapter;
1209 scrq->size = 4 * PAGE_SIZE / sizeof(*scrq->msgs);
1211 scrq->rx_skb_top = NULL;
1212 spin_lock_init(&scrq->lock);
1214 netdev_dbg(adapter->netdev,
1215 "sub-crq initialized, num %lx, hw_irq=%lx, irq=%x\n",
1216 scrq->crq_num, scrq->hw_irq, scrq->irq);
1221 dma_unmap_single(dev, scrq->msg_token, 4 * PAGE_SIZE,
1224 free_pages((unsigned long)scrq->msgs, 2);
1231 static void release_sub_crqs(struct ibmvnic_adapter *adapter)
1235 if (adapter->tx_scrq) {
1236 for (i = 0; i < adapter->req_tx_queues; i++)
1237 if (adapter->tx_scrq[i]) {
1238 free_irq(adapter->tx_scrq[i]->irq,
1239 adapter->tx_scrq[i]);
1240 irq_dispose_mapping(adapter->tx_scrq[i]->irq);
1241 release_sub_crq_queue(adapter,
1242 adapter->tx_scrq[i]);
1244 adapter->tx_scrq = NULL;
1247 if (adapter->rx_scrq) {
1248 for (i = 0; i < adapter->req_rx_queues; i++)
1249 if (adapter->rx_scrq[i]) {
1250 free_irq(adapter->rx_scrq[i]->irq,
1251 adapter->rx_scrq[i]);
1252 irq_dispose_mapping(adapter->rx_scrq[i]->irq);
1253 release_sub_crq_queue(adapter,
1254 adapter->rx_scrq[i]);
1256 adapter->rx_scrq = NULL;
1259 adapter->requested_caps = 0;
1262 static void release_sub_crqs_no_irqs(struct ibmvnic_adapter *adapter)
1266 if (adapter->tx_scrq) {
1267 for (i = 0; i < adapter->req_tx_queues; i++)
1268 if (adapter->tx_scrq[i])
1269 release_sub_crq_queue(adapter,
1270 adapter->tx_scrq[i]);
1271 adapter->tx_scrq = NULL;
1274 if (adapter->rx_scrq) {
1275 for (i = 0; i < adapter->req_rx_queues; i++)
1276 if (adapter->rx_scrq[i])
1277 release_sub_crq_queue(adapter,
1278 adapter->rx_scrq[i]);
1279 adapter->rx_scrq = NULL;
1282 adapter->requested_caps = 0;
1285 static int disable_scrq_irq(struct ibmvnic_adapter *adapter,
1286 struct ibmvnic_sub_crq_queue *scrq)
1288 struct device *dev = &adapter->vdev->dev;
1291 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
1292 H_DISABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
1294 dev_err(dev, "Couldn't disable scrq irq 0x%lx. rc=%ld\n",
1299 static int enable_scrq_irq(struct ibmvnic_adapter *adapter,
1300 struct ibmvnic_sub_crq_queue *scrq)
1302 struct device *dev = &adapter->vdev->dev;
1305 if (scrq->hw_irq > 0x100000000ULL) {
1306 dev_err(dev, "bad hw_irq = %lx\n", scrq->hw_irq);
1310 rc = plpar_hcall_norets(H_VIOCTL, adapter->vdev->unit_address,
1311 H_ENABLE_VIO_INTERRUPT, scrq->hw_irq, 0, 0);
1313 dev_err(dev, "Couldn't enable scrq irq 0x%lx. rc=%ld\n",
1318 static int ibmvnic_complete_tx(struct ibmvnic_adapter *adapter,
1319 struct ibmvnic_sub_crq_queue *scrq)
1321 struct device *dev = &adapter->vdev->dev;
1322 struct ibmvnic_tx_buff *txbuff;
1323 union sub_crq *next;
1329 while (pending_scrq(adapter, scrq)) {
1330 unsigned int pool = scrq->pool_index;
1332 next = ibmvnic_next_scrq(adapter, scrq);
1333 for (i = 0; i < next->tx_comp.num_comps; i++) {
1334 if (next->tx_comp.rcs[i]) {
1335 dev_err(dev, "tx error %x\n",
1336 next->tx_comp.rcs[i]);
1339 index = be32_to_cpu(next->tx_comp.correlators[i]);
1340 txbuff = &adapter->tx_pool[pool].tx_buff[index];
1342 for (j = 0; j < IBMVNIC_MAX_FRAGS_PER_CRQ; j++) {
1343 if (!txbuff->data_dma[j])
1346 txbuff->data_dma[j] = 0;
1347 txbuff->used_bounce = false;
1349 /* if sub_crq was sent indirectly */
1350 first = txbuff->indir_arr[0].generic.first;
1351 if (first == IBMVNIC_CRQ_CMD) {
1352 dma_unmap_single(dev, txbuff->indir_dma,
1353 sizeof(txbuff->indir_arr),
1357 if (txbuff->last_frag)
1358 dev_kfree_skb_any(txbuff->skb);
1360 adapter->tx_pool[pool].free_map[adapter->tx_pool[pool].
1361 producer_index] = index;
1362 adapter->tx_pool[pool].producer_index =
1363 (adapter->tx_pool[pool].producer_index + 1) %
1364 adapter->max_tx_entries_per_subcrq;
1366 /* remove tx_comp scrq*/
1367 next->tx_comp.first = 0;
1370 enable_scrq_irq(adapter, scrq);
1372 if (pending_scrq(adapter, scrq)) {
1373 disable_scrq_irq(adapter, scrq);
1380 static irqreturn_t ibmvnic_interrupt_tx(int irq, void *instance)
1382 struct ibmvnic_sub_crq_queue *scrq = instance;
1383 struct ibmvnic_adapter *adapter = scrq->adapter;
1385 disable_scrq_irq(adapter, scrq);
1386 ibmvnic_complete_tx(adapter, scrq);
1391 static irqreturn_t ibmvnic_interrupt_rx(int irq, void *instance)
1393 struct ibmvnic_sub_crq_queue *scrq = instance;
1394 struct ibmvnic_adapter *adapter = scrq->adapter;
1396 if (napi_schedule_prep(&adapter->napi[scrq->scrq_num])) {
1397 disable_scrq_irq(adapter, scrq);
1398 __napi_schedule(&adapter->napi[scrq->scrq_num]);
1404 static int init_sub_crq_irqs(struct ibmvnic_adapter *adapter)
1406 struct device *dev = &adapter->vdev->dev;
1407 struct ibmvnic_sub_crq_queue *scrq;
1411 for (i = 0; i < adapter->req_tx_queues; i++) {
1412 scrq = adapter->tx_scrq[i];
1413 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
1417 dev_err(dev, "Error mapping irq\n");
1418 goto req_tx_irq_failed;
1421 rc = request_irq(scrq->irq, ibmvnic_interrupt_tx,
1422 0, "ibmvnic_tx", scrq);
1425 dev_err(dev, "Couldn't register tx irq 0x%x. rc=%d\n",
1427 irq_dispose_mapping(scrq->irq);
1428 goto req_rx_irq_failed;
1432 for (i = 0; i < adapter->req_rx_queues; i++) {
1433 scrq = adapter->rx_scrq[i];
1434 scrq->irq = irq_create_mapping(NULL, scrq->hw_irq);
1437 dev_err(dev, "Error mapping irq\n");
1438 goto req_rx_irq_failed;
1440 rc = request_irq(scrq->irq, ibmvnic_interrupt_rx,
1441 0, "ibmvnic_rx", scrq);
1443 dev_err(dev, "Couldn't register rx irq 0x%x. rc=%d\n",
1445 irq_dispose_mapping(scrq->irq);
1446 goto req_rx_irq_failed;
1452 for (j = 0; j < i; j++) {
1453 free_irq(adapter->rx_scrq[j]->irq, adapter->rx_scrq[j]);
1454 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
1456 i = adapter->req_tx_queues;
1458 for (j = 0; j < i; j++) {
1459 free_irq(adapter->tx_scrq[j]->irq, adapter->tx_scrq[j]);
1460 irq_dispose_mapping(adapter->rx_scrq[j]->irq);
1462 release_sub_crqs_no_irqs(adapter);
1466 static void init_sub_crqs(struct ibmvnic_adapter *adapter, int retry)
1468 struct device *dev = &adapter->vdev->dev;
1469 struct ibmvnic_sub_crq_queue **allqueues;
1470 int registered_queues = 0;
1471 union ibmvnic_crq crq;
1477 /* Sub-CRQ entries are 32 byte long */
1478 int entries_page = 4 * PAGE_SIZE / (sizeof(u64) * 4);
1480 if (adapter->min_tx_entries_per_subcrq > entries_page ||
1481 adapter->min_rx_add_entries_per_subcrq > entries_page) {
1482 dev_err(dev, "Fatal, invalid entries per sub-crq\n");
1483 goto allqueues_failed;
1486 /* Get the minimum between the queried max and the entries
1487 * that fit in our PAGE_SIZE
1489 adapter->req_tx_entries_per_subcrq =
1490 adapter->max_tx_entries_per_subcrq > entries_page ?
1491 entries_page : adapter->max_tx_entries_per_subcrq;
1492 adapter->req_rx_add_entries_per_subcrq =
1493 adapter->max_rx_add_entries_per_subcrq > entries_page ?
1494 entries_page : adapter->max_rx_add_entries_per_subcrq;
1496 /* Choosing the maximum number of queues supported by firmware*/
1497 adapter->req_tx_queues = adapter->max_tx_queues;
1498 adapter->req_rx_queues = adapter->max_rx_queues;
1499 adapter->req_rx_add_queues = adapter->max_rx_add_queues;
1501 adapter->req_mtu = adapter->max_mtu;
1504 total_queues = adapter->req_tx_queues + adapter->req_rx_queues;
1506 allqueues = kcalloc(total_queues, sizeof(*allqueues), GFP_ATOMIC);
1508 goto allqueues_failed;
1510 for (i = 0; i < total_queues; i++) {
1511 allqueues[i] = init_sub_crq_queue(adapter);
1512 if (!allqueues[i]) {
1513 dev_warn(dev, "Couldn't allocate all sub-crqs\n");
1516 registered_queues++;
1519 /* Make sure we were able to register the minimum number of queues */
1520 if (registered_queues <
1521 adapter->min_tx_queues + adapter->min_rx_queues) {
1522 dev_err(dev, "Fatal: Couldn't init min number of sub-crqs\n");
1526 /* Distribute the failed allocated queues*/
1527 for (i = 0; i < total_queues - registered_queues + more ; i++) {
1528 netdev_dbg(adapter->netdev, "Reducing number of queues\n");
1531 if (adapter->req_rx_queues > adapter->min_rx_queues)
1532 adapter->req_rx_queues--;
1537 if (adapter->req_tx_queues > adapter->min_tx_queues)
1538 adapter->req_tx_queues--;
1545 adapter->tx_scrq = kcalloc(adapter->req_tx_queues,
1546 sizeof(*adapter->tx_scrq), GFP_ATOMIC);
1547 if (!adapter->tx_scrq)
1550 for (i = 0; i < adapter->req_tx_queues; i++) {
1551 adapter->tx_scrq[i] = allqueues[i];
1552 adapter->tx_scrq[i]->pool_index = i;
1555 adapter->rx_scrq = kcalloc(adapter->req_rx_queues,
1556 sizeof(*adapter->rx_scrq), GFP_ATOMIC);
1557 if (!adapter->rx_scrq)
1560 for (i = 0; i < adapter->req_rx_queues; i++) {
1561 adapter->rx_scrq[i] = allqueues[i + adapter->req_tx_queues];
1562 adapter->rx_scrq[i]->scrq_num = i;
1565 memset(&crq, 0, sizeof(crq));
1566 crq.request_capability.first = IBMVNIC_CRQ_CMD;
1567 crq.request_capability.cmd = REQUEST_CAPABILITY;
1569 crq.request_capability.capability = cpu_to_be16(REQ_TX_QUEUES);
1570 crq.request_capability.number = cpu_to_be64(adapter->req_tx_queues);
1571 ibmvnic_send_crq(adapter, &crq);
1573 crq.request_capability.capability = cpu_to_be16(REQ_RX_QUEUES);
1574 crq.request_capability.number = cpu_to_be64(adapter->req_rx_queues);
1575 ibmvnic_send_crq(adapter, &crq);
1577 crq.request_capability.capability = cpu_to_be16(REQ_RX_ADD_QUEUES);
1578 crq.request_capability.number = cpu_to_be64(adapter->req_rx_add_queues);
1579 ibmvnic_send_crq(adapter, &crq);
1581 crq.request_capability.capability =
1582 cpu_to_be16(REQ_TX_ENTRIES_PER_SUBCRQ);
1583 crq.request_capability.number =
1584 cpu_to_be64(adapter->req_tx_entries_per_subcrq);
1585 ibmvnic_send_crq(adapter, &crq);
1587 crq.request_capability.capability =
1588 cpu_to_be16(REQ_RX_ADD_ENTRIES_PER_SUBCRQ);
1589 crq.request_capability.number =
1590 cpu_to_be64(adapter->req_rx_add_entries_per_subcrq);
1591 ibmvnic_send_crq(adapter, &crq);
1593 crq.request_capability.capability = cpu_to_be16(REQ_MTU);
1594 crq.request_capability.number = cpu_to_be64(adapter->req_mtu);
1595 ibmvnic_send_crq(adapter, &crq);
1597 if (adapter->netdev->flags & IFF_PROMISC) {
1598 if (adapter->promisc_supported) {
1599 crq.request_capability.capability =
1600 cpu_to_be16(PROMISC_REQUESTED);
1601 crq.request_capability.number = cpu_to_be64(1);
1602 ibmvnic_send_crq(adapter, &crq);
1605 crq.request_capability.capability =
1606 cpu_to_be16(PROMISC_REQUESTED);
1607 crq.request_capability.number = cpu_to_be64(0);
1608 ibmvnic_send_crq(adapter, &crq);
1616 kfree(adapter->tx_scrq);
1617 adapter->tx_scrq = NULL;
1619 for (i = 0; i < registered_queues; i++)
1620 release_sub_crq_queue(adapter, allqueues[i]);
1623 ibmvnic_remove(adapter->vdev);
1626 static int pending_scrq(struct ibmvnic_adapter *adapter,
1627 struct ibmvnic_sub_crq_queue *scrq)
1629 union sub_crq *entry = &scrq->msgs[scrq->cur];
1631 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP || adapter->closing)
1637 static union sub_crq *ibmvnic_next_scrq(struct ibmvnic_adapter *adapter,
1638 struct ibmvnic_sub_crq_queue *scrq)
1640 union sub_crq *entry;
1641 unsigned long flags;
1643 spin_lock_irqsave(&scrq->lock, flags);
1644 entry = &scrq->msgs[scrq->cur];
1645 if (entry->generic.first & IBMVNIC_CRQ_CMD_RSP) {
1646 if (++scrq->cur == scrq->size)
1651 spin_unlock_irqrestore(&scrq->lock, flags);
1656 static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
1658 struct ibmvnic_crq_queue *queue = &adapter->crq;
1659 union ibmvnic_crq *crq;
1661 crq = &queue->msgs[queue->cur];
1662 if (crq->generic.first & IBMVNIC_CRQ_CMD_RSP) {
1663 if (++queue->cur == queue->size)
1672 static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
1673 union sub_crq *sub_crq)
1675 unsigned int ua = adapter->vdev->unit_address;
1676 struct device *dev = &adapter->vdev->dev;
1677 u64 *u64_crq = (u64 *)sub_crq;
1680 netdev_dbg(adapter->netdev,
1681 "Sending sCRQ %016lx: %016lx %016lx %016lx %016lx\n",
1682 (unsigned long int)cpu_to_be64(remote_handle),
1683 (unsigned long int)cpu_to_be64(u64_crq[0]),
1684 (unsigned long int)cpu_to_be64(u64_crq[1]),
1685 (unsigned long int)cpu_to_be64(u64_crq[2]),
1686 (unsigned long int)cpu_to_be64(u64_crq[3]));
1688 /* Make sure the hypervisor sees the complete request */
1691 rc = plpar_hcall_norets(H_SEND_SUB_CRQ, ua,
1692 cpu_to_be64(remote_handle),
1693 cpu_to_be64(u64_crq[0]),
1694 cpu_to_be64(u64_crq[1]),
1695 cpu_to_be64(u64_crq[2]),
1696 cpu_to_be64(u64_crq[3]));
1700 dev_warn(dev, "CRQ Queue closed\n");
1701 dev_err(dev, "Send error (rc=%d)\n", rc);
1707 static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
1708 u64 remote_handle, u64 ioba, u64 num_entries)
1710 unsigned int ua = adapter->vdev->unit_address;
1711 struct device *dev = &adapter->vdev->dev;
1714 /* Make sure the hypervisor sees the complete request */
1716 rc = plpar_hcall_norets(H_SEND_SUB_CRQ_INDIRECT, ua,
1717 cpu_to_be64(remote_handle),
1722 dev_warn(dev, "CRQ Queue closed\n");
1723 dev_err(dev, "Send (indirect) error (rc=%d)\n", rc);
1729 static int ibmvnic_send_crq(struct ibmvnic_adapter *adapter,
1730 union ibmvnic_crq *crq)
1732 unsigned int ua = adapter->vdev->unit_address;
1733 struct device *dev = &adapter->vdev->dev;
1734 u64 *u64_crq = (u64 *)crq;
1737 netdev_dbg(adapter->netdev, "Sending CRQ: %016lx %016lx\n",
1738 (unsigned long int)cpu_to_be64(u64_crq[0]),
1739 (unsigned long int)cpu_to_be64(u64_crq[1]));
1741 /* Make sure the hypervisor sees the complete request */
1744 rc = plpar_hcall_norets(H_SEND_CRQ, ua,
1745 cpu_to_be64(u64_crq[0]),
1746 cpu_to_be64(u64_crq[1]));
1750 dev_warn(dev, "CRQ Queue closed\n");
1751 dev_warn(dev, "Send error (rc=%d)\n", rc);
1757 static int ibmvnic_send_crq_init(struct ibmvnic_adapter *adapter)
1759 union ibmvnic_crq crq;
1761 memset(&crq, 0, sizeof(crq));
1762 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
1763 crq.generic.cmd = IBMVNIC_CRQ_INIT;
1764 netdev_dbg(adapter->netdev, "Sending CRQ init\n");
1766 return ibmvnic_send_crq(adapter, &crq);
1769 static int ibmvnic_send_crq_init_complete(struct ibmvnic_adapter *adapter)
1771 union ibmvnic_crq crq;
1773 memset(&crq, 0, sizeof(crq));
1774 crq.generic.first = IBMVNIC_CRQ_INIT_CMD;
1775 crq.generic.cmd = IBMVNIC_CRQ_INIT_COMPLETE;
1776 netdev_dbg(adapter->netdev, "Sending CRQ init complete\n");
1778 return ibmvnic_send_crq(adapter, &crq);
1781 static int send_version_xchg(struct ibmvnic_adapter *adapter)
1783 union ibmvnic_crq crq;
1785 memset(&crq, 0, sizeof(crq));
1786 crq.version_exchange.first = IBMVNIC_CRQ_CMD;
1787 crq.version_exchange.cmd = VERSION_EXCHANGE;
1788 crq.version_exchange.version = cpu_to_be16(ibmvnic_version);
1790 return ibmvnic_send_crq(adapter, &crq);
1793 static void send_login(struct ibmvnic_adapter *adapter)
1795 struct ibmvnic_login_rsp_buffer *login_rsp_buffer;
1796 struct ibmvnic_login_buffer *login_buffer;
1797 struct ibmvnic_inflight_cmd *inflight_cmd;
1798 struct device *dev = &adapter->vdev->dev;
1799 dma_addr_t rsp_buffer_token;
1800 dma_addr_t buffer_token;
1801 size_t rsp_buffer_size;
1802 union ibmvnic_crq crq;
1803 unsigned long flags;
1810 sizeof(struct ibmvnic_login_buffer) +
1811 sizeof(u64) * (adapter->req_tx_queues + adapter->req_rx_queues);
1813 login_buffer = kmalloc(buffer_size, GFP_ATOMIC);
1815 goto buf_alloc_failed;
1817 buffer_token = dma_map_single(dev, login_buffer, buffer_size,
1819 if (dma_mapping_error(dev, buffer_token)) {
1820 dev_err(dev, "Couldn't map login buffer\n");
1821 goto buf_map_failed;
1824 rsp_buffer_size = sizeof(struct ibmvnic_login_rsp_buffer) +
1825 sizeof(u64) * adapter->req_tx_queues +
1826 sizeof(u64) * adapter->req_rx_queues +
1827 sizeof(u64) * adapter->req_rx_queues +
1828 sizeof(u8) * IBMVNIC_TX_DESC_VERSIONS;
1830 login_rsp_buffer = kmalloc(rsp_buffer_size, GFP_ATOMIC);
1831 if (!login_rsp_buffer)
1832 goto buf_rsp_alloc_failed;
1834 rsp_buffer_token = dma_map_single(dev, login_rsp_buffer,
1835 rsp_buffer_size, DMA_FROM_DEVICE);
1836 if (dma_mapping_error(dev, rsp_buffer_token)) {
1837 dev_err(dev, "Couldn't map login rsp buffer\n");
1838 goto buf_rsp_map_failed;
1840 inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC);
1841 if (!inflight_cmd) {
1842 dev_err(dev, "Couldn't allocate inflight_cmd\n");
1843 goto inflight_alloc_failed;
1845 adapter->login_buf = login_buffer;
1846 adapter->login_buf_token = buffer_token;
1847 adapter->login_buf_sz = buffer_size;
1848 adapter->login_rsp_buf = login_rsp_buffer;
1849 adapter->login_rsp_buf_token = rsp_buffer_token;
1850 adapter->login_rsp_buf_sz = rsp_buffer_size;
1852 login_buffer->len = cpu_to_be32(buffer_size);
1853 login_buffer->version = cpu_to_be32(INITIAL_VERSION_LB);
1854 login_buffer->num_txcomp_subcrqs = cpu_to_be32(adapter->req_tx_queues);
1855 login_buffer->off_txcomp_subcrqs =
1856 cpu_to_be32(sizeof(struct ibmvnic_login_buffer));
1857 login_buffer->num_rxcomp_subcrqs = cpu_to_be32(adapter->req_rx_queues);
1858 login_buffer->off_rxcomp_subcrqs =
1859 cpu_to_be32(sizeof(struct ibmvnic_login_buffer) +
1860 sizeof(u64) * adapter->req_tx_queues);
1861 login_buffer->login_rsp_ioba = cpu_to_be32(rsp_buffer_token);
1862 login_buffer->login_rsp_len = cpu_to_be32(rsp_buffer_size);
1864 tx_list_p = (__be64 *)((char *)login_buffer +
1865 sizeof(struct ibmvnic_login_buffer));
1866 rx_list_p = (__be64 *)((char *)login_buffer +
1867 sizeof(struct ibmvnic_login_buffer) +
1868 sizeof(u64) * adapter->req_tx_queues);
1870 for (i = 0; i < adapter->req_tx_queues; i++) {
1871 if (adapter->tx_scrq[i]) {
1872 tx_list_p[i] = cpu_to_be64(adapter->tx_scrq[i]->
1877 for (i = 0; i < adapter->req_rx_queues; i++) {
1878 if (adapter->rx_scrq[i]) {
1879 rx_list_p[i] = cpu_to_be64(adapter->rx_scrq[i]->
1884 netdev_dbg(adapter->netdev, "Login Buffer:\n");
1885 for (i = 0; i < (adapter->login_buf_sz - 1) / 8 + 1; i++) {
1886 netdev_dbg(adapter->netdev, "%016lx\n",
1887 ((unsigned long int *)(adapter->login_buf))[i]);
1890 memset(&crq, 0, sizeof(crq));
1891 crq.login.first = IBMVNIC_CRQ_CMD;
1892 crq.login.cmd = LOGIN;
1893 crq.login.ioba = cpu_to_be32(buffer_token);
1894 crq.login.len = cpu_to_be32(buffer_size);
1896 memcpy(&inflight_cmd->crq, &crq, sizeof(crq));
1898 spin_lock_irqsave(&adapter->inflight_lock, flags);
1899 list_add_tail(&inflight_cmd->list, &adapter->inflight);
1900 spin_unlock_irqrestore(&adapter->inflight_lock, flags);
1902 ibmvnic_send_crq(adapter, &crq);
1906 inflight_alloc_failed:
1907 dma_unmap_single(dev, rsp_buffer_token, rsp_buffer_size,
1910 kfree(login_rsp_buffer);
1911 buf_rsp_alloc_failed:
1912 dma_unmap_single(dev, buffer_token, buffer_size, DMA_TO_DEVICE);
1914 kfree(login_buffer);
1919 static void send_request_map(struct ibmvnic_adapter *adapter, dma_addr_t addr,
1922 union ibmvnic_crq crq;
1924 memset(&crq, 0, sizeof(crq));
1925 crq.request_map.first = IBMVNIC_CRQ_CMD;
1926 crq.request_map.cmd = REQUEST_MAP;
1927 crq.request_map.map_id = map_id;
1928 crq.request_map.ioba = cpu_to_be32(addr);
1929 crq.request_map.len = cpu_to_be32(len);
1930 ibmvnic_send_crq(adapter, &crq);
1933 static void send_request_unmap(struct ibmvnic_adapter *adapter, u8 map_id)
1935 union ibmvnic_crq crq;
1937 memset(&crq, 0, sizeof(crq));
1938 crq.request_unmap.first = IBMVNIC_CRQ_CMD;
1939 crq.request_unmap.cmd = REQUEST_UNMAP;
1940 crq.request_unmap.map_id = map_id;
1941 ibmvnic_send_crq(adapter, &crq);
1944 static void send_map_query(struct ibmvnic_adapter *adapter)
1946 union ibmvnic_crq crq;
1948 memset(&crq, 0, sizeof(crq));
1949 crq.query_map.first = IBMVNIC_CRQ_CMD;
1950 crq.query_map.cmd = QUERY_MAP;
1951 ibmvnic_send_crq(adapter, &crq);
1954 /* Send a series of CRQs requesting various capabilities of the VNIC server */
1955 static void send_cap_queries(struct ibmvnic_adapter *adapter)
1957 union ibmvnic_crq crq;
1959 atomic_set(&adapter->running_cap_queries, 0);
1960 memset(&crq, 0, sizeof(crq));
1961 crq.query_capability.first = IBMVNIC_CRQ_CMD;
1962 crq.query_capability.cmd = QUERY_CAPABILITY;
1964 crq.query_capability.capability = cpu_to_be16(MIN_TX_QUEUES);
1965 atomic_inc(&adapter->running_cap_queries);
1966 ibmvnic_send_crq(adapter, &crq);
1968 crq.query_capability.capability = cpu_to_be16(MIN_RX_QUEUES);
1969 atomic_inc(&adapter->running_cap_queries);
1970 ibmvnic_send_crq(adapter, &crq);
1972 crq.query_capability.capability = cpu_to_be16(MIN_RX_ADD_QUEUES);
1973 atomic_inc(&adapter->running_cap_queries);
1974 ibmvnic_send_crq(adapter, &crq);
1976 crq.query_capability.capability = cpu_to_be16(MAX_TX_QUEUES);
1977 atomic_inc(&adapter->running_cap_queries);
1978 ibmvnic_send_crq(adapter, &crq);
1980 crq.query_capability.capability = cpu_to_be16(MAX_RX_QUEUES);
1981 atomic_inc(&adapter->running_cap_queries);
1982 ibmvnic_send_crq(adapter, &crq);
1984 crq.query_capability.capability = cpu_to_be16(MAX_RX_ADD_QUEUES);
1985 atomic_inc(&adapter->running_cap_queries);
1986 ibmvnic_send_crq(adapter, &crq);
1988 crq.query_capability.capability =
1989 cpu_to_be16(MIN_TX_ENTRIES_PER_SUBCRQ);
1990 atomic_inc(&adapter->running_cap_queries);
1991 ibmvnic_send_crq(adapter, &crq);
1993 crq.query_capability.capability =
1994 cpu_to_be16(MIN_RX_ADD_ENTRIES_PER_SUBCRQ);
1995 atomic_inc(&adapter->running_cap_queries);
1996 ibmvnic_send_crq(adapter, &crq);
1998 crq.query_capability.capability =
1999 cpu_to_be16(MAX_TX_ENTRIES_PER_SUBCRQ);
2000 atomic_inc(&adapter->running_cap_queries);
2001 ibmvnic_send_crq(adapter, &crq);
2003 crq.query_capability.capability =
2004 cpu_to_be16(MAX_RX_ADD_ENTRIES_PER_SUBCRQ);
2005 atomic_inc(&adapter->running_cap_queries);
2006 ibmvnic_send_crq(adapter, &crq);
2008 crq.query_capability.capability = cpu_to_be16(TCP_IP_OFFLOAD);
2009 atomic_inc(&adapter->running_cap_queries);
2010 ibmvnic_send_crq(adapter, &crq);
2012 crq.query_capability.capability = cpu_to_be16(PROMISC_SUPPORTED);
2013 atomic_inc(&adapter->running_cap_queries);
2014 ibmvnic_send_crq(adapter, &crq);
2016 crq.query_capability.capability = cpu_to_be16(MIN_MTU);
2017 atomic_inc(&adapter->running_cap_queries);
2018 ibmvnic_send_crq(adapter, &crq);
2020 crq.query_capability.capability = cpu_to_be16(MAX_MTU);
2021 atomic_inc(&adapter->running_cap_queries);
2022 ibmvnic_send_crq(adapter, &crq);
2024 crq.query_capability.capability = cpu_to_be16(MAX_MULTICAST_FILTERS);
2025 atomic_inc(&adapter->running_cap_queries);
2026 ibmvnic_send_crq(adapter, &crq);
2028 crq.query_capability.capability = cpu_to_be16(VLAN_HEADER_INSERTION);
2029 atomic_inc(&adapter->running_cap_queries);
2030 ibmvnic_send_crq(adapter, &crq);
2032 crq.query_capability.capability = cpu_to_be16(MAX_TX_SG_ENTRIES);
2033 atomic_inc(&adapter->running_cap_queries);
2034 ibmvnic_send_crq(adapter, &crq);
2036 crq.query_capability.capability = cpu_to_be16(RX_SG_SUPPORTED);
2037 atomic_inc(&adapter->running_cap_queries);
2038 ibmvnic_send_crq(adapter, &crq);
2040 crq.query_capability.capability = cpu_to_be16(OPT_TX_COMP_SUB_QUEUES);
2041 atomic_inc(&adapter->running_cap_queries);
2042 ibmvnic_send_crq(adapter, &crq);
2044 crq.query_capability.capability = cpu_to_be16(OPT_RX_COMP_QUEUES);
2045 atomic_inc(&adapter->running_cap_queries);
2046 ibmvnic_send_crq(adapter, &crq);
2048 crq.query_capability.capability =
2049 cpu_to_be16(OPT_RX_BUFADD_Q_PER_RX_COMP_Q);
2050 atomic_inc(&adapter->running_cap_queries);
2051 ibmvnic_send_crq(adapter, &crq);
2053 crq.query_capability.capability =
2054 cpu_to_be16(OPT_TX_ENTRIES_PER_SUBCRQ);
2055 atomic_inc(&adapter->running_cap_queries);
2056 ibmvnic_send_crq(adapter, &crq);
2058 crq.query_capability.capability =
2059 cpu_to_be16(OPT_RXBA_ENTRIES_PER_SUBCRQ);
2060 atomic_inc(&adapter->running_cap_queries);
2061 ibmvnic_send_crq(adapter, &crq);
2063 crq.query_capability.capability = cpu_to_be16(TX_RX_DESC_REQ);
2064 atomic_inc(&adapter->running_cap_queries);
2065 ibmvnic_send_crq(adapter, &crq);
2068 static void handle_query_ip_offload_rsp(struct ibmvnic_adapter *adapter)
2070 struct device *dev = &adapter->vdev->dev;
2071 struct ibmvnic_query_ip_offload_buffer *buf = &adapter->ip_offload_buf;
2072 union ibmvnic_crq crq;
2075 dma_unmap_single(dev, adapter->ip_offload_tok,
2076 sizeof(adapter->ip_offload_buf), DMA_FROM_DEVICE);
2078 netdev_dbg(adapter->netdev, "Query IP Offload Buffer:\n");
2079 for (i = 0; i < (sizeof(adapter->ip_offload_buf) - 1) / 8 + 1; i++)
2080 netdev_dbg(adapter->netdev, "%016lx\n",
2081 ((unsigned long int *)(buf))[i]);
2083 netdev_dbg(adapter->netdev, "ipv4_chksum = %d\n", buf->ipv4_chksum);
2084 netdev_dbg(adapter->netdev, "ipv6_chksum = %d\n", buf->ipv6_chksum);
2085 netdev_dbg(adapter->netdev, "tcp_ipv4_chksum = %d\n",
2086 buf->tcp_ipv4_chksum);
2087 netdev_dbg(adapter->netdev, "tcp_ipv6_chksum = %d\n",
2088 buf->tcp_ipv6_chksum);
2089 netdev_dbg(adapter->netdev, "udp_ipv4_chksum = %d\n",
2090 buf->udp_ipv4_chksum);
2091 netdev_dbg(adapter->netdev, "udp_ipv6_chksum = %d\n",
2092 buf->udp_ipv6_chksum);
2093 netdev_dbg(adapter->netdev, "large_tx_ipv4 = %d\n",
2094 buf->large_tx_ipv4);
2095 netdev_dbg(adapter->netdev, "large_tx_ipv6 = %d\n",
2096 buf->large_tx_ipv6);
2097 netdev_dbg(adapter->netdev, "large_rx_ipv4 = %d\n",
2098 buf->large_rx_ipv4);
2099 netdev_dbg(adapter->netdev, "large_rx_ipv6 = %d\n",
2100 buf->large_rx_ipv6);
2101 netdev_dbg(adapter->netdev, "max_ipv4_hdr_sz = %d\n",
2102 buf->max_ipv4_header_size);
2103 netdev_dbg(adapter->netdev, "max_ipv6_hdr_sz = %d\n",
2104 buf->max_ipv6_header_size);
2105 netdev_dbg(adapter->netdev, "max_tcp_hdr_size = %d\n",
2106 buf->max_tcp_header_size);
2107 netdev_dbg(adapter->netdev, "max_udp_hdr_size = %d\n",
2108 buf->max_udp_header_size);
2109 netdev_dbg(adapter->netdev, "max_large_tx_size = %d\n",
2110 buf->max_large_tx_size);
2111 netdev_dbg(adapter->netdev, "max_large_rx_size = %d\n",
2112 buf->max_large_rx_size);
2113 netdev_dbg(adapter->netdev, "ipv6_ext_hdr = %d\n",
2114 buf->ipv6_extension_header);
2115 netdev_dbg(adapter->netdev, "tcp_pseudosum_req = %d\n",
2116 buf->tcp_pseudosum_req);
2117 netdev_dbg(adapter->netdev, "num_ipv6_ext_hd = %d\n",
2118 buf->num_ipv6_ext_headers);
2119 netdev_dbg(adapter->netdev, "off_ipv6_ext_hd = %d\n",
2120 buf->off_ipv6_ext_headers);
2122 adapter->ip_offload_ctrl_tok =
2123 dma_map_single(dev, &adapter->ip_offload_ctrl,
2124 sizeof(adapter->ip_offload_ctrl), DMA_TO_DEVICE);
2126 if (dma_mapping_error(dev, adapter->ip_offload_ctrl_tok)) {
2127 dev_err(dev, "Couldn't map ip offload control buffer\n");
2131 adapter->ip_offload_ctrl.version = cpu_to_be32(INITIAL_VERSION_IOB);
2132 adapter->ip_offload_ctrl.tcp_ipv4_chksum = buf->tcp_ipv4_chksum;
2133 adapter->ip_offload_ctrl.udp_ipv4_chksum = buf->udp_ipv4_chksum;
2134 adapter->ip_offload_ctrl.tcp_ipv6_chksum = buf->tcp_ipv6_chksum;
2135 adapter->ip_offload_ctrl.udp_ipv6_chksum = buf->udp_ipv6_chksum;
2137 /* large_tx/rx disabled for now, additional features needed */
2138 adapter->ip_offload_ctrl.large_tx_ipv4 = 0;
2139 adapter->ip_offload_ctrl.large_tx_ipv6 = 0;
2140 adapter->ip_offload_ctrl.large_rx_ipv4 = 0;
2141 adapter->ip_offload_ctrl.large_rx_ipv6 = 0;
2143 adapter->netdev->features = NETIF_F_GSO;
2145 if (buf->tcp_ipv4_chksum || buf->udp_ipv4_chksum)
2146 adapter->netdev->features |= NETIF_F_IP_CSUM;
2148 if (buf->tcp_ipv6_chksum || buf->udp_ipv6_chksum)
2149 adapter->netdev->features |= NETIF_F_IPV6_CSUM;
2151 if ((adapter->netdev->features &
2152 (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)))
2153 adapter->netdev->features |= NETIF_F_RXCSUM;
2155 memset(&crq, 0, sizeof(crq));
2156 crq.control_ip_offload.first = IBMVNIC_CRQ_CMD;
2157 crq.control_ip_offload.cmd = CONTROL_IP_OFFLOAD;
2158 crq.control_ip_offload.len =
2159 cpu_to_be32(sizeof(adapter->ip_offload_ctrl));
2160 crq.control_ip_offload.ioba = cpu_to_be32(adapter->ip_offload_ctrl_tok);
2161 ibmvnic_send_crq(adapter, &crq);
2164 static void handle_error_info_rsp(union ibmvnic_crq *crq,
2165 struct ibmvnic_adapter *adapter)
2167 struct device *dev = &adapter->vdev->dev;
2168 struct ibmvnic_error_buff *error_buff, *tmp;
2169 unsigned long flags;
2173 if (!crq->request_error_rsp.rc.code) {
2174 dev_info(dev, "Request Error Rsp returned with rc=%x\n",
2175 crq->request_error_rsp.rc.code);
2179 spin_lock_irqsave(&adapter->error_list_lock, flags);
2180 list_for_each_entry_safe(error_buff, tmp, &adapter->errors, list)
2181 if (error_buff->error_id == crq->request_error_rsp.error_id) {
2183 list_del(&error_buff->list);
2186 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
2189 dev_err(dev, "Couldn't find error id %x\n",
2190 crq->request_error_rsp.error_id);
2194 dev_err(dev, "Detailed info for error id %x:",
2195 crq->request_error_rsp.error_id);
2197 for (i = 0; i < error_buff->len; i++) {
2198 pr_cont("%02x", (int)error_buff->buff[i]);
2204 dma_unmap_single(dev, error_buff->dma, error_buff->len,
2206 kfree(error_buff->buff);
2210 static void handle_dump_size_rsp(union ibmvnic_crq *crq,
2211 struct ibmvnic_adapter *adapter)
2213 int len = be32_to_cpu(crq->request_dump_size_rsp.len);
2214 struct ibmvnic_inflight_cmd *inflight_cmd;
2215 struct device *dev = &adapter->vdev->dev;
2216 union ibmvnic_crq newcrq;
2217 unsigned long flags;
2219 /* allocate and map buffer */
2220 adapter->dump_data = kmalloc(len, GFP_KERNEL);
2221 if (!adapter->dump_data) {
2222 complete(&adapter->fw_done);
2226 adapter->dump_data_token = dma_map_single(dev, adapter->dump_data, len,
2229 if (dma_mapping_error(dev, adapter->dump_data_token)) {
2230 if (!firmware_has_feature(FW_FEATURE_CMO))
2231 dev_err(dev, "Couldn't map dump data\n");
2232 kfree(adapter->dump_data);
2233 complete(&adapter->fw_done);
2237 inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC);
2238 if (!inflight_cmd) {
2239 dma_unmap_single(dev, adapter->dump_data_token, len,
2241 kfree(adapter->dump_data);
2242 complete(&adapter->fw_done);
2246 memset(&newcrq, 0, sizeof(newcrq));
2247 newcrq.request_dump.first = IBMVNIC_CRQ_CMD;
2248 newcrq.request_dump.cmd = REQUEST_DUMP;
2249 newcrq.request_dump.ioba = cpu_to_be32(adapter->dump_data_token);
2250 newcrq.request_dump.len = cpu_to_be32(adapter->dump_data_size);
2252 memcpy(&inflight_cmd->crq, &newcrq, sizeof(newcrq));
2254 spin_lock_irqsave(&adapter->inflight_lock, flags);
2255 list_add_tail(&inflight_cmd->list, &adapter->inflight);
2256 spin_unlock_irqrestore(&adapter->inflight_lock, flags);
2258 ibmvnic_send_crq(adapter, &newcrq);
2261 static void handle_error_indication(union ibmvnic_crq *crq,
2262 struct ibmvnic_adapter *adapter)
2264 int detail_len = be32_to_cpu(crq->error_indication.detail_error_sz);
2265 struct ibmvnic_inflight_cmd *inflight_cmd;
2266 struct device *dev = &adapter->vdev->dev;
2267 struct ibmvnic_error_buff *error_buff;
2268 union ibmvnic_crq new_crq;
2269 unsigned long flags;
2271 dev_err(dev, "Firmware reports %serror id %x, cause %d\n",
2272 crq->error_indication.
2273 flags & IBMVNIC_FATAL_ERROR ? "FATAL " : "",
2274 crq->error_indication.error_id,
2275 crq->error_indication.error_cause);
2277 error_buff = kmalloc(sizeof(*error_buff), GFP_ATOMIC);
2281 error_buff->buff = kmalloc(detail_len, GFP_ATOMIC);
2282 if (!error_buff->buff) {
2287 error_buff->dma = dma_map_single(dev, error_buff->buff, detail_len,
2289 if (dma_mapping_error(dev, error_buff->dma)) {
2290 if (!firmware_has_feature(FW_FEATURE_CMO))
2291 dev_err(dev, "Couldn't map error buffer\n");
2292 kfree(error_buff->buff);
2297 inflight_cmd = kmalloc(sizeof(*inflight_cmd), GFP_ATOMIC);
2298 if (!inflight_cmd) {
2299 dma_unmap_single(dev, error_buff->dma, detail_len,
2301 kfree(error_buff->buff);
2306 error_buff->len = detail_len;
2307 error_buff->error_id = crq->error_indication.error_id;
2309 spin_lock_irqsave(&adapter->error_list_lock, flags);
2310 list_add_tail(&error_buff->list, &adapter->errors);
2311 spin_unlock_irqrestore(&adapter->error_list_lock, flags);
2313 memset(&new_crq, 0, sizeof(new_crq));
2314 new_crq.request_error_info.first = IBMVNIC_CRQ_CMD;
2315 new_crq.request_error_info.cmd = REQUEST_ERROR_INFO;
2316 new_crq.request_error_info.ioba = cpu_to_be32(error_buff->dma);
2317 new_crq.request_error_info.len = cpu_to_be32(detail_len);
2318 new_crq.request_error_info.error_id = crq->error_indication.error_id;
2320 memcpy(&inflight_cmd->crq, &crq, sizeof(crq));
2322 spin_lock_irqsave(&adapter->inflight_lock, flags);
2323 list_add_tail(&inflight_cmd->list, &adapter->inflight);
2324 spin_unlock_irqrestore(&adapter->inflight_lock, flags);
2326 ibmvnic_send_crq(adapter, &new_crq);
2329 static void handle_change_mac_rsp(union ibmvnic_crq *crq,
2330 struct ibmvnic_adapter *adapter)
2332 struct net_device *netdev = adapter->netdev;
2333 struct device *dev = &adapter->vdev->dev;
2336 rc = crq->change_mac_addr_rsp.rc.code;
2338 dev_err(dev, "Error %ld in CHANGE_MAC_ADDR_RSP\n", rc);
2341 memcpy(netdev->dev_addr, &crq->change_mac_addr_rsp.mac_addr[0],
2345 static void handle_request_cap_rsp(union ibmvnic_crq *crq,
2346 struct ibmvnic_adapter *adapter)
2348 struct device *dev = &adapter->vdev->dev;
2352 switch (be16_to_cpu(crq->request_capability_rsp.capability)) {
2354 req_value = &adapter->req_tx_queues;
2358 req_value = &adapter->req_rx_queues;
2361 case REQ_RX_ADD_QUEUES:
2362 req_value = &adapter->req_rx_add_queues;
2365 case REQ_TX_ENTRIES_PER_SUBCRQ:
2366 req_value = &adapter->req_tx_entries_per_subcrq;
2367 name = "tx_entries_per_subcrq";
2369 case REQ_RX_ADD_ENTRIES_PER_SUBCRQ:
2370 req_value = &adapter->req_rx_add_entries_per_subcrq;
2371 name = "rx_add_entries_per_subcrq";
2374 req_value = &adapter->req_mtu;
2377 case PROMISC_REQUESTED:
2378 req_value = &adapter->promisc;
2382 dev_err(dev, "Got invalid cap request rsp %d\n",
2383 crq->request_capability.capability);
2387 switch (crq->request_capability_rsp.rc.code) {
2390 case PARTIALSUCCESS:
2391 dev_info(dev, "req=%lld, rsp=%ld in %s queue, retrying.\n",
2393 (long int)be32_to_cpu(crq->request_capability_rsp.
2395 release_sub_crqs_no_irqs(adapter);
2396 *req_value = be32_to_cpu(crq->request_capability_rsp.number);
2397 init_sub_crqs(adapter, 1);
2400 dev_err(dev, "Error %d in request cap rsp\n",
2401 crq->request_capability_rsp.rc.code);
2405 /* Done receiving requested capabilities, query IP offload support */
2406 if (++adapter->requested_caps == 7) {
2407 union ibmvnic_crq newcrq;
2408 int buf_sz = sizeof(struct ibmvnic_query_ip_offload_buffer);
2409 struct ibmvnic_query_ip_offload_buffer *ip_offload_buf =
2410 &adapter->ip_offload_buf;
2412 adapter->ip_offload_tok = dma_map_single(dev, ip_offload_buf,
2416 if (dma_mapping_error(dev, adapter->ip_offload_tok)) {
2417 if (!firmware_has_feature(FW_FEATURE_CMO))
2418 dev_err(dev, "Couldn't map offload buffer\n");
2422 memset(&newcrq, 0, sizeof(newcrq));
2423 newcrq.query_ip_offload.first = IBMVNIC_CRQ_CMD;
2424 newcrq.query_ip_offload.cmd = QUERY_IP_OFFLOAD;
2425 newcrq.query_ip_offload.len = cpu_to_be32(buf_sz);
2426 newcrq.query_ip_offload.ioba =
2427 cpu_to_be32(adapter->ip_offload_tok);
2429 ibmvnic_send_crq(adapter, &newcrq);
2433 static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq,
2434 struct ibmvnic_adapter *adapter)
2436 struct device *dev = &adapter->vdev->dev;
2437 struct ibmvnic_login_rsp_buffer *login_rsp = adapter->login_rsp_buf;
2438 struct ibmvnic_login_buffer *login = adapter->login_buf;
2439 union ibmvnic_crq crq;
2442 dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz,
2444 dma_unmap_single(dev, adapter->login_rsp_buf_token,
2445 adapter->login_rsp_buf_sz, DMA_BIDIRECTIONAL);
2447 /* If the number of queues requested can't be allocated by the
2448 * server, the login response will return with code 1. We will need
2449 * to resend the login buffer with fewer queues requested.
2451 if (login_rsp_crq->generic.rc.code) {
2452 adapter->renegotiate = true;
2453 complete(&adapter->init_done);
2457 netdev_dbg(adapter->netdev, "Login Response Buffer:\n");
2458 for (i = 0; i < (adapter->login_rsp_buf_sz - 1) / 8 + 1; i++) {
2459 netdev_dbg(adapter->netdev, "%016lx\n",
2460 ((unsigned long int *)(adapter->login_rsp_buf))[i]);
2464 if (login->num_txcomp_subcrqs != login_rsp->num_txsubm_subcrqs ||
2465 (be32_to_cpu(login->num_rxcomp_subcrqs) *
2466 adapter->req_rx_add_queues !=
2467 be32_to_cpu(login_rsp->num_rxadd_subcrqs))) {
2468 dev_err(dev, "FATAL: Inconsistent login and login rsp\n");
2469 ibmvnic_remove(adapter->vdev);
2472 complete(&adapter->init_done);
2474 memset(&crq, 0, sizeof(crq));
2475 crq.request_ras_comp_num.first = IBMVNIC_CRQ_CMD;
2476 crq.request_ras_comp_num.cmd = REQUEST_RAS_COMP_NUM;
2477 ibmvnic_send_crq(adapter, &crq);
2482 static void handle_request_map_rsp(union ibmvnic_crq *crq,
2483 struct ibmvnic_adapter *adapter)
2485 struct device *dev = &adapter->vdev->dev;
2486 u8 map_id = crq->request_map_rsp.map_id;
2492 tx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_txsubm_subcrqs);
2493 rx_subcrqs = be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
2495 rc = crq->request_map_rsp.rc.code;
2497 dev_err(dev, "Error %ld in REQUEST_MAP_RSP\n", rc);
2499 /* need to find and zero tx/rx_pool map_id */
2500 for (i = 0; i < tx_subcrqs; i++) {
2501 if (adapter->tx_pool[i].long_term_buff.map_id == map_id)
2502 adapter->tx_pool[i].long_term_buff.map_id = 0;
2504 for (i = 0; i < rx_subcrqs; i++) {
2505 if (adapter->rx_pool[i].long_term_buff.map_id == map_id)
2506 adapter->rx_pool[i].long_term_buff.map_id = 0;
2509 complete(&adapter->fw_done);
2512 static void handle_request_unmap_rsp(union ibmvnic_crq *crq,
2513 struct ibmvnic_adapter *adapter)
2515 struct device *dev = &adapter->vdev->dev;
2518 rc = crq->request_unmap_rsp.rc.code;
2520 dev_err(dev, "Error %ld in REQUEST_UNMAP_RSP\n", rc);
2523 static void handle_query_map_rsp(union ibmvnic_crq *crq,
2524 struct ibmvnic_adapter *adapter)
2526 struct net_device *netdev = adapter->netdev;
2527 struct device *dev = &adapter->vdev->dev;
2530 rc = crq->query_map_rsp.rc.code;
2532 dev_err(dev, "Error %ld in QUERY_MAP_RSP\n", rc);
2535 netdev_dbg(netdev, "page_size = %d\ntot_pages = %d\nfree_pages = %d\n",
2536 crq->query_map_rsp.page_size, crq->query_map_rsp.tot_pages,
2537 crq->query_map_rsp.free_pages);
2540 static void handle_query_cap_rsp(union ibmvnic_crq *crq,
2541 struct ibmvnic_adapter *adapter)
2543 struct net_device *netdev = adapter->netdev;
2544 struct device *dev = &adapter->vdev->dev;
2547 atomic_dec(&adapter->running_cap_queries);
2548 netdev_dbg(netdev, "Outstanding queries: %d\n",
2549 atomic_read(&adapter->running_cap_queries));
2550 rc = crq->query_capability.rc.code;
2552 dev_err(dev, "Error %ld in QUERY_CAP_RSP\n", rc);
2556 switch (be16_to_cpu(crq->query_capability.capability)) {
2558 adapter->min_tx_queues =
2559 be64_to_cpu(crq->query_capability.number);
2560 netdev_dbg(netdev, "min_tx_queues = %lld\n",
2561 adapter->min_tx_queues);
2564 adapter->min_rx_queues =
2565 be64_to_cpu(crq->query_capability.number);
2566 netdev_dbg(netdev, "min_rx_queues = %lld\n",
2567 adapter->min_rx_queues);
2569 case MIN_RX_ADD_QUEUES:
2570 adapter->min_rx_add_queues =
2571 be64_to_cpu(crq->query_capability.number);
2572 netdev_dbg(netdev, "min_rx_add_queues = %lld\n",
2573 adapter->min_rx_add_queues);
2576 adapter->max_tx_queues =
2577 be64_to_cpu(crq->query_capability.number);
2578 netdev_dbg(netdev, "max_tx_queues = %lld\n",
2579 adapter->max_tx_queues);
2582 adapter->max_rx_queues =
2583 be64_to_cpu(crq->query_capability.number);
2584 netdev_dbg(netdev, "max_rx_queues = %lld\n",
2585 adapter->max_rx_queues);
2587 case MAX_RX_ADD_QUEUES:
2588 adapter->max_rx_add_queues =
2589 be64_to_cpu(crq->query_capability.number);
2590 netdev_dbg(netdev, "max_rx_add_queues = %lld\n",
2591 adapter->max_rx_add_queues);
2593 case MIN_TX_ENTRIES_PER_SUBCRQ:
2594 adapter->min_tx_entries_per_subcrq =
2595 be64_to_cpu(crq->query_capability.number);
2596 netdev_dbg(netdev, "min_tx_entries_per_subcrq = %lld\n",
2597 adapter->min_tx_entries_per_subcrq);
2599 case MIN_RX_ADD_ENTRIES_PER_SUBCRQ:
2600 adapter->min_rx_add_entries_per_subcrq =
2601 be64_to_cpu(crq->query_capability.number);
2602 netdev_dbg(netdev, "min_rx_add_entrs_per_subcrq = %lld\n",
2603 adapter->min_rx_add_entries_per_subcrq);
2605 case MAX_TX_ENTRIES_PER_SUBCRQ:
2606 adapter->max_tx_entries_per_subcrq =
2607 be64_to_cpu(crq->query_capability.number);
2608 netdev_dbg(netdev, "max_tx_entries_per_subcrq = %lld\n",
2609 adapter->max_tx_entries_per_subcrq);
2611 case MAX_RX_ADD_ENTRIES_PER_SUBCRQ:
2612 adapter->max_rx_add_entries_per_subcrq =
2613 be64_to_cpu(crq->query_capability.number);
2614 netdev_dbg(netdev, "max_rx_add_entrs_per_subcrq = %lld\n",
2615 adapter->max_rx_add_entries_per_subcrq);
2617 case TCP_IP_OFFLOAD:
2618 adapter->tcp_ip_offload =
2619 be64_to_cpu(crq->query_capability.number);
2620 netdev_dbg(netdev, "tcp_ip_offload = %lld\n",
2621 adapter->tcp_ip_offload);
2623 case PROMISC_SUPPORTED:
2624 adapter->promisc_supported =
2625 be64_to_cpu(crq->query_capability.number);
2626 netdev_dbg(netdev, "promisc_supported = %lld\n",
2627 adapter->promisc_supported);
2630 adapter->min_mtu = be64_to_cpu(crq->query_capability.number);
2631 netdev->min_mtu = adapter->min_mtu;
2632 netdev_dbg(netdev, "min_mtu = %lld\n", adapter->min_mtu);
2635 adapter->max_mtu = be64_to_cpu(crq->query_capability.number);
2636 netdev->max_mtu = adapter->max_mtu;
2637 netdev_dbg(netdev, "max_mtu = %lld\n", adapter->max_mtu);
2639 case MAX_MULTICAST_FILTERS:
2640 adapter->max_multicast_filters =
2641 be64_to_cpu(crq->query_capability.number);
2642 netdev_dbg(netdev, "max_multicast_filters = %lld\n",
2643 adapter->max_multicast_filters);
2645 case VLAN_HEADER_INSERTION:
2646 adapter->vlan_header_insertion =
2647 be64_to_cpu(crq->query_capability.number);
2648 if (adapter->vlan_header_insertion)
2649 netdev->features |= NETIF_F_HW_VLAN_STAG_TX;
2650 netdev_dbg(netdev, "vlan_header_insertion = %lld\n",
2651 adapter->vlan_header_insertion);
2653 case MAX_TX_SG_ENTRIES:
2654 adapter->max_tx_sg_entries =
2655 be64_to_cpu(crq->query_capability.number);
2656 netdev_dbg(netdev, "max_tx_sg_entries = %lld\n",
2657 adapter->max_tx_sg_entries);
2659 case RX_SG_SUPPORTED:
2660 adapter->rx_sg_supported =
2661 be64_to_cpu(crq->query_capability.number);
2662 netdev_dbg(netdev, "rx_sg_supported = %lld\n",
2663 adapter->rx_sg_supported);
2665 case OPT_TX_COMP_SUB_QUEUES:
2666 adapter->opt_tx_comp_sub_queues =
2667 be64_to_cpu(crq->query_capability.number);
2668 netdev_dbg(netdev, "opt_tx_comp_sub_queues = %lld\n",
2669 adapter->opt_tx_comp_sub_queues);
2671 case OPT_RX_COMP_QUEUES:
2672 adapter->opt_rx_comp_queues =
2673 be64_to_cpu(crq->query_capability.number);
2674 netdev_dbg(netdev, "opt_rx_comp_queues = %lld\n",
2675 adapter->opt_rx_comp_queues);
2677 case OPT_RX_BUFADD_Q_PER_RX_COMP_Q:
2678 adapter->opt_rx_bufadd_q_per_rx_comp_q =
2679 be64_to_cpu(crq->query_capability.number);
2680 netdev_dbg(netdev, "opt_rx_bufadd_q_per_rx_comp_q = %lld\n",
2681 adapter->opt_rx_bufadd_q_per_rx_comp_q);
2683 case OPT_TX_ENTRIES_PER_SUBCRQ:
2684 adapter->opt_tx_entries_per_subcrq =
2685 be64_to_cpu(crq->query_capability.number);
2686 netdev_dbg(netdev, "opt_tx_entries_per_subcrq = %lld\n",
2687 adapter->opt_tx_entries_per_subcrq);
2689 case OPT_RXBA_ENTRIES_PER_SUBCRQ:
2690 adapter->opt_rxba_entries_per_subcrq =
2691 be64_to_cpu(crq->query_capability.number);
2692 netdev_dbg(netdev, "opt_rxba_entries_per_subcrq = %lld\n",
2693 adapter->opt_rxba_entries_per_subcrq);
2695 case TX_RX_DESC_REQ:
2696 adapter->tx_rx_desc_req = crq->query_capability.number;
2697 netdev_dbg(netdev, "tx_rx_desc_req = %llx\n",
2698 adapter->tx_rx_desc_req);
2702 netdev_err(netdev, "Got invalid cap rsp %d\n",
2703 crq->query_capability.capability);
2707 if (atomic_read(&adapter->running_cap_queries) == 0)
2708 init_sub_crqs(adapter, 0);
2709 /* We're done querying the capabilities, initialize sub-crqs */
2712 static void handle_control_ras_rsp(union ibmvnic_crq *crq,
2713 struct ibmvnic_adapter *adapter)
2715 u8 correlator = crq->control_ras_rsp.correlator;
2716 struct device *dev = &adapter->vdev->dev;
2720 if (crq->control_ras_rsp.rc.code) {
2721 dev_warn(dev, "Control ras failed rc=%d\n",
2722 crq->control_ras_rsp.rc.code);
2726 for (i = 0; i < adapter->ras_comp_num; i++) {
2727 if (adapter->ras_comps[i].correlator == correlator) {
2734 dev_warn(dev, "Correlator not found on control_ras_rsp\n");
2738 switch (crq->control_ras_rsp.op) {
2739 case IBMVNIC_TRACE_LEVEL:
2740 adapter->ras_comps[i].trace_level = crq->control_ras.level;
2742 case IBMVNIC_ERROR_LEVEL:
2743 adapter->ras_comps[i].error_check_level =
2744 crq->control_ras.level;
2746 case IBMVNIC_TRACE_PAUSE:
2747 adapter->ras_comp_int[i].paused = 1;
2749 case IBMVNIC_TRACE_RESUME:
2750 adapter->ras_comp_int[i].paused = 0;
2752 case IBMVNIC_TRACE_ON:
2753 adapter->ras_comps[i].trace_on = 1;
2755 case IBMVNIC_TRACE_OFF:
2756 adapter->ras_comps[i].trace_on = 0;
2758 case IBMVNIC_CHG_TRACE_BUFF_SZ:
2759 /* trace_buff_sz is 3 bytes, stuff it into an int */
2760 ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[0] = 0;
2761 ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[1] =
2762 crq->control_ras_rsp.trace_buff_sz[0];
2763 ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[2] =
2764 crq->control_ras_rsp.trace_buff_sz[1];
2765 ((u8 *)(&adapter->ras_comps[i].trace_buff_size))[3] =
2766 crq->control_ras_rsp.trace_buff_sz[2];
2769 dev_err(dev, "invalid op %d on control_ras_rsp",
2770 crq->control_ras_rsp.op);
2774 static ssize_t trace_read(struct file *file, char __user *user_buf, size_t len,
2777 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2778 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2779 struct device *dev = &adapter->vdev->dev;
2780 struct ibmvnic_fw_trace_entry *trace;
2781 int num = ras_comp_int->num;
2782 union ibmvnic_crq crq;
2783 dma_addr_t trace_tok;
2785 if (*ppos >= be32_to_cpu(adapter->ras_comps[num].trace_buff_size))
2789 dma_alloc_coherent(dev,
2790 be32_to_cpu(adapter->ras_comps[num].
2791 trace_buff_size), &trace_tok,
2794 dev_err(dev, "Couldn't alloc trace buffer\n");
2798 memset(&crq, 0, sizeof(crq));
2799 crq.collect_fw_trace.first = IBMVNIC_CRQ_CMD;
2800 crq.collect_fw_trace.cmd = COLLECT_FW_TRACE;
2801 crq.collect_fw_trace.correlator = adapter->ras_comps[num].correlator;
2802 crq.collect_fw_trace.ioba = cpu_to_be32(trace_tok);
2803 crq.collect_fw_trace.len = adapter->ras_comps[num].trace_buff_size;
2804 ibmvnic_send_crq(adapter, &crq);
2806 init_completion(&adapter->fw_done);
2807 wait_for_completion(&adapter->fw_done);
2809 if (*ppos + len > be32_to_cpu(adapter->ras_comps[num].trace_buff_size))
2811 be32_to_cpu(adapter->ras_comps[num].trace_buff_size) -
2814 copy_to_user(user_buf, &((u8 *)trace)[*ppos], len);
2816 dma_free_coherent(dev,
2817 be32_to_cpu(adapter->ras_comps[num].trace_buff_size),
2823 static const struct file_operations trace_ops = {
2824 .owner = THIS_MODULE,
2825 .open = simple_open,
2829 static ssize_t paused_read(struct file *file, char __user *user_buf, size_t len,
2832 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2833 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2834 int num = ras_comp_int->num;
2835 char buff[5]; /* 1 or 0 plus \n and \0 */
2838 size = sprintf(buff, "%d\n", adapter->ras_comp_int[num].paused);
2843 copy_to_user(user_buf, buff, size);
2848 static ssize_t paused_write(struct file *file, const char __user *user_buf,
2849 size_t len, loff_t *ppos)
2851 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2852 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2853 int num = ras_comp_int->num;
2854 union ibmvnic_crq crq;
2856 char buff[9]; /* decimal max int plus \n and \0 */
2858 copy_from_user(buff, user_buf, sizeof(buff));
2859 val = kstrtoul(buff, 10, NULL);
2861 adapter->ras_comp_int[num].paused = val ? 1 : 0;
2863 memset(&crq, 0, sizeof(crq));
2864 crq.control_ras.first = IBMVNIC_CRQ_CMD;
2865 crq.control_ras.cmd = CONTROL_RAS;
2866 crq.control_ras.correlator = adapter->ras_comps[num].correlator;
2867 crq.control_ras.op = val ? IBMVNIC_TRACE_PAUSE : IBMVNIC_TRACE_RESUME;
2868 ibmvnic_send_crq(adapter, &crq);
2873 static const struct file_operations paused_ops = {
2874 .owner = THIS_MODULE,
2875 .open = simple_open,
2876 .read = paused_read,
2877 .write = paused_write,
2880 static ssize_t tracing_read(struct file *file, char __user *user_buf,
2881 size_t len, loff_t *ppos)
2883 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2884 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2885 int num = ras_comp_int->num;
2886 char buff[5]; /* 1 or 0 plus \n and \0 */
2889 size = sprintf(buff, "%d\n", adapter->ras_comps[num].trace_on);
2894 copy_to_user(user_buf, buff, size);
2899 static ssize_t tracing_write(struct file *file, const char __user *user_buf,
2900 size_t len, loff_t *ppos)
2902 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2903 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2904 int num = ras_comp_int->num;
2905 union ibmvnic_crq crq;
2907 char buff[9]; /* decimal max int plus \n and \0 */
2909 copy_from_user(buff, user_buf, sizeof(buff));
2910 val = kstrtoul(buff, 10, NULL);
2912 memset(&crq, 0, sizeof(crq));
2913 crq.control_ras.first = IBMVNIC_CRQ_CMD;
2914 crq.control_ras.cmd = CONTROL_RAS;
2915 crq.control_ras.correlator = adapter->ras_comps[num].correlator;
2916 crq.control_ras.op = val ? IBMVNIC_TRACE_ON : IBMVNIC_TRACE_OFF;
2921 static const struct file_operations tracing_ops = {
2922 .owner = THIS_MODULE,
2923 .open = simple_open,
2924 .read = tracing_read,
2925 .write = tracing_write,
2928 static ssize_t error_level_read(struct file *file, char __user *user_buf,
2929 size_t len, loff_t *ppos)
2931 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2932 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2933 int num = ras_comp_int->num;
2934 char buff[5]; /* decimal max char plus \n and \0 */
2937 size = sprintf(buff, "%d\n", adapter->ras_comps[num].error_check_level);
2942 copy_to_user(user_buf, buff, size);
2947 static ssize_t error_level_write(struct file *file, const char __user *user_buf,
2948 size_t len, loff_t *ppos)
2950 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2951 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2952 int num = ras_comp_int->num;
2953 union ibmvnic_crq crq;
2955 char buff[9]; /* decimal max int plus \n and \0 */
2957 copy_from_user(buff, user_buf, sizeof(buff));
2958 val = kstrtoul(buff, 10, NULL);
2963 memset(&crq, 0, sizeof(crq));
2964 crq.control_ras.first = IBMVNIC_CRQ_CMD;
2965 crq.control_ras.cmd = CONTROL_RAS;
2966 crq.control_ras.correlator = adapter->ras_comps[num].correlator;
2967 crq.control_ras.op = IBMVNIC_ERROR_LEVEL;
2968 crq.control_ras.level = val;
2969 ibmvnic_send_crq(adapter, &crq);
2974 static const struct file_operations error_level_ops = {
2975 .owner = THIS_MODULE,
2976 .open = simple_open,
2977 .read = error_level_read,
2978 .write = error_level_write,
2981 static ssize_t trace_level_read(struct file *file, char __user *user_buf,
2982 size_t len, loff_t *ppos)
2984 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
2985 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
2986 int num = ras_comp_int->num;
2987 char buff[5]; /* decimal max char plus \n and \0 */
2990 size = sprintf(buff, "%d\n", adapter->ras_comps[num].trace_level);
2994 copy_to_user(user_buf, buff, size);
2999 static ssize_t trace_level_write(struct file *file, const char __user *user_buf,
3000 size_t len, loff_t *ppos)
3002 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
3003 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
3004 union ibmvnic_crq crq;
3006 char buff[9]; /* decimal max int plus \n and \0 */
3008 copy_from_user(buff, user_buf, sizeof(buff));
3009 val = kstrtoul(buff, 10, NULL);
3013 memset(&crq, 0, sizeof(crq));
3014 crq.control_ras.first = IBMVNIC_CRQ_CMD;
3015 crq.control_ras.cmd = CONTROL_RAS;
3016 crq.control_ras.correlator =
3017 adapter->ras_comps[ras_comp_int->num].correlator;
3018 crq.control_ras.op = IBMVNIC_TRACE_LEVEL;
3019 crq.control_ras.level = val;
3020 ibmvnic_send_crq(adapter, &crq);
3025 static const struct file_operations trace_level_ops = {
3026 .owner = THIS_MODULE,
3027 .open = simple_open,
3028 .read = trace_level_read,
3029 .write = trace_level_write,
3032 static ssize_t trace_buff_size_read(struct file *file, char __user *user_buf,
3033 size_t len, loff_t *ppos)
3035 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
3036 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
3037 int num = ras_comp_int->num;
3038 char buff[9]; /* decimal max int plus \n and \0 */
3041 size = sprintf(buff, "%d\n", adapter->ras_comps[num].trace_buff_size);
3045 copy_to_user(user_buf, buff, size);
3050 static ssize_t trace_buff_size_write(struct file *file,
3051 const char __user *user_buf, size_t len,
3054 struct ibmvnic_fw_comp_internal *ras_comp_int = file->private_data;
3055 struct ibmvnic_adapter *adapter = ras_comp_int->adapter;
3056 union ibmvnic_crq crq;
3058 char buff[9]; /* decimal max int plus \n and \0 */
3060 copy_from_user(buff, user_buf, sizeof(buff));
3061 val = kstrtoul(buff, 10, NULL);
3063 memset(&crq, 0, sizeof(crq));
3064 crq.control_ras.first = IBMVNIC_CRQ_CMD;
3065 crq.control_ras.cmd = CONTROL_RAS;
3066 crq.control_ras.correlator =
3067 adapter->ras_comps[ras_comp_int->num].correlator;
3068 crq.control_ras.op = IBMVNIC_CHG_TRACE_BUFF_SZ;
3069 /* trace_buff_sz is 3 bytes, stuff an int into it */
3070 crq.control_ras.trace_buff_sz[0] = ((u8 *)(&val))[5];
3071 crq.control_ras.trace_buff_sz[1] = ((u8 *)(&val))[6];
3072 crq.control_ras.trace_buff_sz[2] = ((u8 *)(&val))[7];
3073 ibmvnic_send_crq(adapter, &crq);
3078 static const struct file_operations trace_size_ops = {
3079 .owner = THIS_MODULE,
3080 .open = simple_open,
3081 .read = trace_buff_size_read,
3082 .write = trace_buff_size_write,
3085 static void handle_request_ras_comps_rsp(union ibmvnic_crq *crq,
3086 struct ibmvnic_adapter *adapter)
3088 struct device *dev = &adapter->vdev->dev;
3089 struct dentry *dir_ent;
3093 debugfs_remove_recursive(adapter->ras_comps_ent);
3095 adapter->ras_comps_ent = debugfs_create_dir("ras_comps",
3096 adapter->debugfs_dir);
3097 if (!adapter->ras_comps_ent || IS_ERR(adapter->ras_comps_ent)) {
3098 dev_info(dev, "debugfs create ras_comps dir failed\n");
3102 for (i = 0; i < adapter->ras_comp_num; i++) {
3103 dir_ent = debugfs_create_dir(adapter->ras_comps[i].name,
3104 adapter->ras_comps_ent);
3105 if (!dir_ent || IS_ERR(dir_ent)) {
3106 dev_info(dev, "debugfs create %s dir failed\n",
3107 adapter->ras_comps[i].name);
3111 adapter->ras_comp_int[i].adapter = adapter;
3112 adapter->ras_comp_int[i].num = i;
3113 adapter->ras_comp_int[i].desc_blob.data =
3114 &adapter->ras_comps[i].description;
3115 adapter->ras_comp_int[i].desc_blob.size =
3116 sizeof(adapter->ras_comps[i].description);
3118 /* Don't need to remember the dentry's because the debugfs dir
3119 * gets removed recursively
3121 ent = debugfs_create_blob("description", S_IRUGO, dir_ent,
3122 &adapter->ras_comp_int[i].desc_blob);
3123 ent = debugfs_create_file("trace_buf_size", S_IRUGO | S_IWUSR,
3124 dir_ent, &adapter->ras_comp_int[i],
3126 ent = debugfs_create_file("trace_level",
3128 (adapter->ras_comps[i].trace_level !=
3129 0xFF ? S_IWUSR : 0),
3130 dir_ent, &adapter->ras_comp_int[i],
3132 ent = debugfs_create_file("error_level",
3135 ras_comps[i].error_check_level !=
3136 0xFF ? S_IWUSR : 0),
3137 dir_ent, &adapter->ras_comp_int[i],
3139 ent = debugfs_create_file("tracing", S_IRUGO | S_IWUSR,
3140 dir_ent, &adapter->ras_comp_int[i],
3142 ent = debugfs_create_file("paused", S_IRUGO | S_IWUSR,
3143 dir_ent, &adapter->ras_comp_int[i],
3145 ent = debugfs_create_file("trace", S_IRUGO, dir_ent,
3146 &adapter->ras_comp_int[i],
3151 static void handle_request_ras_comp_num_rsp(union ibmvnic_crq *crq,
3152 struct ibmvnic_adapter *adapter)
3154 int len = adapter->ras_comp_num * sizeof(struct ibmvnic_fw_component);
3155 struct device *dev = &adapter->vdev->dev;
3156 union ibmvnic_crq newcrq;
3158 adapter->ras_comps = dma_alloc_coherent(dev, len,
3159 &adapter->ras_comps_tok,
3161 if (!adapter->ras_comps) {
3162 if (!firmware_has_feature(FW_FEATURE_CMO))
3163 dev_err(dev, "Couldn't alloc fw comps buffer\n");
3167 adapter->ras_comp_int = kmalloc(adapter->ras_comp_num *
3168 sizeof(struct ibmvnic_fw_comp_internal),
3170 if (!adapter->ras_comp_int)
3171 dma_free_coherent(dev, len, adapter->ras_comps,
3172 adapter->ras_comps_tok);
3174 memset(&newcrq, 0, sizeof(newcrq));
3175 newcrq.request_ras_comps.first = IBMVNIC_CRQ_CMD;
3176 newcrq.request_ras_comps.cmd = REQUEST_RAS_COMPS;
3177 newcrq.request_ras_comps.ioba = cpu_to_be32(adapter->ras_comps_tok);
3178 newcrq.request_ras_comps.len = cpu_to_be32(len);
3179 ibmvnic_send_crq(adapter, &newcrq);
3182 static void ibmvnic_free_inflight(struct ibmvnic_adapter *adapter)
3184 struct ibmvnic_inflight_cmd *inflight_cmd, *tmp1;
3185 struct device *dev = &adapter->vdev->dev;
3186 struct ibmvnic_error_buff *error_buff, *tmp2;
3187 unsigned long flags;
3188 unsigned long flags2;
3190 spin_lock_irqsave(&adapter->inflight_lock, flags);
3191 list_for_each_entry_safe(inflight_cmd, tmp1, &adapter->inflight, list) {
3192 switch (inflight_cmd->crq.generic.cmd) {
3194 dma_unmap_single(dev, adapter->login_buf_token,
3195 adapter->login_buf_sz,
3197 dma_unmap_single(dev, adapter->login_rsp_buf_token,
3198 adapter->login_rsp_buf_sz,
3200 kfree(adapter->login_rsp_buf);
3201 kfree(adapter->login_buf);
3204 complete(&adapter->fw_done);
3206 case REQUEST_ERROR_INFO:
3207 spin_lock_irqsave(&adapter->error_list_lock, flags2);
3208 list_for_each_entry_safe(error_buff, tmp2,
3209 &adapter->errors, list) {
3210 dma_unmap_single(dev, error_buff->dma,
3213 kfree(error_buff->buff);
3214 list_del(&error_buff->list);
3217 spin_unlock_irqrestore(&adapter->error_list_lock,
3221 list_del(&inflight_cmd->list);
3222 kfree(inflight_cmd);
3224 spin_unlock_irqrestore(&adapter->inflight_lock, flags);
3227 static void ibmvnic_xport_event(struct work_struct *work)
3229 struct ibmvnic_adapter *adapter = container_of(work,
3230 struct ibmvnic_adapter,
3232 struct device *dev = &adapter->vdev->dev;
3235 ibmvnic_free_inflight(adapter);
3236 release_sub_crqs(adapter);
3237 if (adapter->migrated) {
3238 rc = ibmvnic_reenable_crq_queue(adapter);
3240 dev_err(dev, "Error after enable rc=%ld\n", rc);
3241 adapter->migrated = false;
3242 rc = ibmvnic_send_crq_init(adapter);
3244 dev_err(dev, "Error sending init rc=%ld\n", rc);
3248 static void ibmvnic_handle_crq(union ibmvnic_crq *crq,
3249 struct ibmvnic_adapter *adapter)
3251 struct ibmvnic_generic_crq *gen_crq = &crq->generic;
3252 struct net_device *netdev = adapter->netdev;
3253 struct device *dev = &adapter->vdev->dev;
3256 netdev_dbg(netdev, "Handling CRQ: %016lx %016lx\n",
3257 ((unsigned long int *)crq)[0],
3258 ((unsigned long int *)crq)[1]);
3259 switch (gen_crq->first) {
3260 case IBMVNIC_CRQ_INIT_RSP:
3261 switch (gen_crq->cmd) {
3262 case IBMVNIC_CRQ_INIT:
3263 dev_info(dev, "Partner initialized\n");
3264 /* Send back a response */
3265 rc = ibmvnic_send_crq_init_complete(adapter);
3267 schedule_work(&adapter->vnic_crq_init);
3269 dev_err(dev, "Can't send initrsp rc=%ld\n", rc);
3271 case IBMVNIC_CRQ_INIT_COMPLETE:
3272 dev_info(dev, "Partner initialization complete\n");
3273 send_version_xchg(adapter);
3276 dev_err(dev, "Unknown crq cmd: %d\n", gen_crq->cmd);
3279 case IBMVNIC_CRQ_XPORT_EVENT:
3280 if (gen_crq->cmd == IBMVNIC_PARTITION_MIGRATED) {
3281 dev_info(dev, "Re-enabling adapter\n");
3282 adapter->migrated = true;
3283 schedule_work(&adapter->ibmvnic_xport);
3284 } else if (gen_crq->cmd == IBMVNIC_DEVICE_FAILOVER) {
3285 dev_info(dev, "Backing device failover detected\n");
3286 netif_carrier_off(netdev);
3287 adapter->failover = true;
3289 /* The adapter lost the connection */
3290 dev_err(dev, "Virtual Adapter failed (rc=%d)\n",
3292 schedule_work(&adapter->ibmvnic_xport);
3295 case IBMVNIC_CRQ_CMD_RSP:
3298 dev_err(dev, "Got an invalid msg type 0x%02x\n",
3303 switch (gen_crq->cmd) {
3304 case VERSION_EXCHANGE_RSP:
3305 rc = crq->version_exchange_rsp.rc.code;
3307 dev_err(dev, "Error %ld in VERSION_EXCHG_RSP\n", rc);
3310 dev_info(dev, "Partner protocol version is %d\n",
3311 crq->version_exchange_rsp.version);
3312 if (be16_to_cpu(crq->version_exchange_rsp.version) <
3315 be16_to_cpu(crq->version_exchange_rsp.version);
3316 send_cap_queries(adapter);
3318 case QUERY_CAPABILITY_RSP:
3319 handle_query_cap_rsp(crq, adapter);
3322 handle_query_map_rsp(crq, adapter);
3324 case REQUEST_MAP_RSP:
3325 handle_request_map_rsp(crq, adapter);
3327 case REQUEST_UNMAP_RSP:
3328 handle_request_unmap_rsp(crq, adapter);
3330 case REQUEST_CAPABILITY_RSP:
3331 handle_request_cap_rsp(crq, adapter);
3334 netdev_dbg(netdev, "Got Login Response\n");
3335 handle_login_rsp(crq, adapter);
3337 case LOGICAL_LINK_STATE_RSP:
3338 netdev_dbg(netdev, "Got Logical Link State Response\n");
3339 adapter->logical_link_state =
3340 crq->logical_link_state_rsp.link_state;
3342 case LINK_STATE_INDICATION:
3343 netdev_dbg(netdev, "Got Logical Link State Indication\n");
3344 adapter->phys_link_state =
3345 crq->link_state_indication.phys_link_state;
3346 adapter->logical_link_state =
3347 crq->link_state_indication.logical_link_state;
3349 case CHANGE_MAC_ADDR_RSP:
3350 netdev_dbg(netdev, "Got MAC address change Response\n");
3351 handle_change_mac_rsp(crq, adapter);
3353 case ERROR_INDICATION:
3354 netdev_dbg(netdev, "Got Error Indication\n");
3355 handle_error_indication(crq, adapter);
3357 case REQUEST_ERROR_RSP:
3358 netdev_dbg(netdev, "Got Error Detail Response\n");
3359 handle_error_info_rsp(crq, adapter);
3361 case REQUEST_STATISTICS_RSP:
3362 netdev_dbg(netdev, "Got Statistics Response\n");
3363 complete(&adapter->stats_done);
3365 case REQUEST_DUMP_SIZE_RSP:
3366 netdev_dbg(netdev, "Got Request Dump Size Response\n");
3367 handle_dump_size_rsp(crq, adapter);
3369 case REQUEST_DUMP_RSP:
3370 netdev_dbg(netdev, "Got Request Dump Response\n");
3371 complete(&adapter->fw_done);
3373 case QUERY_IP_OFFLOAD_RSP:
3374 netdev_dbg(netdev, "Got Query IP offload Response\n");
3375 handle_query_ip_offload_rsp(adapter);
3377 case MULTICAST_CTRL_RSP:
3378 netdev_dbg(netdev, "Got multicast control Response\n");
3380 case CONTROL_IP_OFFLOAD_RSP:
3381 netdev_dbg(netdev, "Got Control IP offload Response\n");
3382 dma_unmap_single(dev, adapter->ip_offload_ctrl_tok,
3383 sizeof(adapter->ip_offload_ctrl),
3385 /* We're done with the queries, perform the login */
3386 send_login(adapter);
3388 case REQUEST_RAS_COMP_NUM_RSP:
3389 netdev_dbg(netdev, "Got Request RAS Comp Num Response\n");
3390 if (crq->request_ras_comp_num_rsp.rc.code == 10) {
3391 netdev_dbg(netdev, "Request RAS Comp Num not supported\n");
3394 adapter->ras_comp_num =
3395 be32_to_cpu(crq->request_ras_comp_num_rsp.num_components);
3396 handle_request_ras_comp_num_rsp(crq, adapter);
3398 case REQUEST_RAS_COMPS_RSP:
3399 netdev_dbg(netdev, "Got Request RAS Comps Response\n");
3400 handle_request_ras_comps_rsp(crq, adapter);
3402 case CONTROL_RAS_RSP:
3403 netdev_dbg(netdev, "Got Control RAS Response\n");
3404 handle_control_ras_rsp(crq, adapter);
3406 case COLLECT_FW_TRACE_RSP:
3407 netdev_dbg(netdev, "Got Collect firmware trace Response\n");
3408 complete(&adapter->fw_done);
3411 netdev_err(netdev, "Got an invalid cmd type 0x%02x\n",
3416 static irqreturn_t ibmvnic_interrupt(int irq, void *instance)
3418 struct ibmvnic_adapter *adapter = instance;
3419 struct ibmvnic_crq_queue *queue = &adapter->crq;
3420 struct vio_dev *vdev = adapter->vdev;
3421 union ibmvnic_crq *crq;
3422 unsigned long flags;
3425 spin_lock_irqsave(&queue->lock, flags);
3426 vio_disable_interrupts(vdev);
3428 /* Pull all the valid messages off the CRQ */
3429 while ((crq = ibmvnic_next_crq(adapter)) != NULL) {
3430 ibmvnic_handle_crq(crq, adapter);
3431 crq->generic.first = 0;
3433 vio_enable_interrupts(vdev);
3434 crq = ibmvnic_next_crq(adapter);
3436 vio_disable_interrupts(vdev);
3437 ibmvnic_handle_crq(crq, adapter);
3438 crq->generic.first = 0;
3443 spin_unlock_irqrestore(&queue->lock, flags);
3447 static int ibmvnic_reenable_crq_queue(struct ibmvnic_adapter *adapter)
3449 struct vio_dev *vdev = adapter->vdev;
3453 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
3454 } while (rc == H_IN_PROGRESS || rc == H_BUSY || H_IS_LONG_BUSY(rc));
3457 dev_err(&vdev->dev, "Error enabling adapter (rc=%d)\n", rc);
3462 static int ibmvnic_reset_crq(struct ibmvnic_adapter *adapter)
3464 struct ibmvnic_crq_queue *crq = &adapter->crq;
3465 struct device *dev = &adapter->vdev->dev;
3466 struct vio_dev *vdev = adapter->vdev;
3471 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3472 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3474 /* Clean out the queue */
3475 memset(crq->msgs, 0, PAGE_SIZE);
3478 /* And re-open it again */
3479 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
3480 crq->msg_token, PAGE_SIZE);
3483 /* Adapter is good, but other end is not ready */
3484 dev_warn(dev, "Partner adapter not ready\n");
3486 dev_warn(dev, "Couldn't register crq (rc=%d)\n", rc);
3491 static void ibmvnic_release_crq_queue(struct ibmvnic_adapter *adapter)
3493 struct ibmvnic_crq_queue *crq = &adapter->crq;
3494 struct vio_dev *vdev = adapter->vdev;
3497 netdev_dbg(adapter->netdev, "Releasing CRQ\n");
3498 free_irq(vdev->irq, adapter);
3500 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3501 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3503 dma_unmap_single(&vdev->dev, crq->msg_token, PAGE_SIZE,
3505 free_page((unsigned long)crq->msgs);
3508 static int ibmvnic_init_crq_queue(struct ibmvnic_adapter *adapter)
3510 struct ibmvnic_crq_queue *crq = &adapter->crq;
3511 struct device *dev = &adapter->vdev->dev;
3512 struct vio_dev *vdev = adapter->vdev;
3513 int rc, retrc = -ENOMEM;
3515 crq->msgs = (union ibmvnic_crq *)get_zeroed_page(GFP_KERNEL);
3516 /* Should we allocate more than one page? */
3521 crq->size = PAGE_SIZE / sizeof(*crq->msgs);
3522 crq->msg_token = dma_map_single(dev, crq->msgs, PAGE_SIZE,
3524 if (dma_mapping_error(dev, crq->msg_token))
3527 rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
3528 crq->msg_token, PAGE_SIZE);
3530 if (rc == H_RESOURCE)
3531 /* maybe kexecing and resource is busy. try a reset */
3532 rc = ibmvnic_reset_crq(adapter);
3535 if (rc == H_CLOSED) {
3536 dev_warn(dev, "Partner adapter not ready\n");
3538 dev_warn(dev, "Error %d opening adapter\n", rc);
3539 goto reg_crq_failed;
3544 netdev_dbg(adapter->netdev, "registering irq 0x%x\n", vdev->irq);
3545 rc = request_irq(vdev->irq, ibmvnic_interrupt, 0, IBMVNIC_NAME,
3548 dev_err(dev, "Couldn't register irq 0x%x. rc=%d\n",
3550 goto req_irq_failed;
3553 rc = vio_enable_interrupts(vdev);
3555 dev_err(dev, "Error %d enabling interrupts\n", rc);
3556 goto req_irq_failed;
3560 spin_lock_init(&crq->lock);
3566 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
3567 } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
3569 dma_unmap_single(dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
3571 free_page((unsigned long)crq->msgs);
3575 /* debugfs for dump */
3576 static int ibmvnic_dump_show(struct seq_file *seq, void *v)
3578 struct net_device *netdev = seq->private;
3579 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3580 struct device *dev = &adapter->vdev->dev;
3581 union ibmvnic_crq crq;
3583 memset(&crq, 0, sizeof(crq));
3584 crq.request_dump_size.first = IBMVNIC_CRQ_CMD;
3585 crq.request_dump_size.cmd = REQUEST_DUMP_SIZE;
3586 ibmvnic_send_crq(adapter, &crq);
3588 init_completion(&adapter->fw_done);
3589 wait_for_completion(&adapter->fw_done);
3591 seq_write(seq, adapter->dump_data, adapter->dump_data_size);
3593 dma_unmap_single(dev, adapter->dump_data_token, adapter->dump_data_size,
3596 kfree(adapter->dump_data);
3601 static int ibmvnic_dump_open(struct inode *inode, struct file *file)
3603 return single_open(file, ibmvnic_dump_show, inode->i_private);
3606 static const struct file_operations ibmvnic_dump_ops = {
3607 .owner = THIS_MODULE,
3608 .open = ibmvnic_dump_open,
3610 .llseek = seq_lseek,
3611 .release = single_release,
3614 static void handle_crq_init_rsp(struct work_struct *work)
3616 struct ibmvnic_adapter *adapter = container_of(work,
3617 struct ibmvnic_adapter,
3619 struct device *dev = &adapter->vdev->dev;
3620 struct net_device *netdev = adapter->netdev;
3621 unsigned long timeout = msecs_to_jiffies(30000);
3622 bool restart = false;
3625 if (adapter->failover) {
3626 release_sub_crqs(adapter);
3627 if (netif_running(netdev)) {
3628 netif_tx_disable(netdev);
3629 ibmvnic_close(netdev);
3634 send_version_xchg(adapter);
3635 reinit_completion(&adapter->init_done);
3636 if (!wait_for_completion_timeout(&adapter->init_done, timeout)) {
3637 dev_err(dev, "Passive init timeout\n");
3642 if (adapter->renegotiate) {
3643 adapter->renegotiate = false;
3644 release_sub_crqs_no_irqs(adapter);
3645 send_cap_queries(adapter);
3647 reinit_completion(&adapter->init_done);
3648 if (!wait_for_completion_timeout(&adapter->init_done,
3650 dev_err(dev, "Passive init timeout\n");
3654 } while (adapter->renegotiate);
3655 rc = init_sub_crq_irqs(adapter);
3660 netdev->real_num_tx_queues = adapter->req_tx_queues;
3661 netdev->mtu = adapter->req_mtu;
3662 netdev->min_mtu = adapter->min_mtu;
3663 netdev->max_mtu = adapter->max_mtu;
3665 if (adapter->failover) {
3666 adapter->failover = false;
3668 rc = ibmvnic_open(netdev);
3670 goto restart_failed;
3672 netif_carrier_on(netdev);
3676 rc = register_netdev(netdev);
3679 "failed to register netdev rc=%d\n", rc);
3680 goto register_failed;
3682 dev_info(dev, "ibmvnic registered\n");
3687 dev_err(dev, "Failed to restart ibmvnic, rc=%d\n", rc);
3689 release_sub_crqs(adapter);
3691 dev_err(dev, "Passive initialization was not successful\n");
3694 static int ibmvnic_probe(struct vio_dev *dev, const struct vio_device_id *id)
3696 unsigned long timeout = msecs_to_jiffies(30000);
3697 struct ibmvnic_adapter *adapter;
3698 struct net_device *netdev;
3699 unsigned char *mac_addr_p;
3701 char buf[16]; /* debugfs name buf */
3704 dev_dbg(&dev->dev, "entering ibmvnic_probe for UA 0x%x\n",
3707 mac_addr_p = (unsigned char *)vio_get_attribute(dev,
3708 VETH_MAC_ADDR, NULL);
3711 "(%s:%3.3d) ERROR: Can't find MAC_ADDR attribute\n",
3712 __FILE__, __LINE__);
3716 netdev = alloc_etherdev_mq(sizeof(struct ibmvnic_adapter),
3717 IBMVNIC_MAX_TX_QUEUES);
3721 adapter = netdev_priv(netdev);
3722 dev_set_drvdata(&dev->dev, netdev);
3723 adapter->vdev = dev;
3724 adapter->netdev = netdev;
3725 adapter->failover = false;
3727 ether_addr_copy(adapter->mac_addr, mac_addr_p);
3728 ether_addr_copy(netdev->dev_addr, adapter->mac_addr);
3729 netdev->irq = dev->irq;
3730 netdev->netdev_ops = &ibmvnic_netdev_ops;
3731 netdev->ethtool_ops = &ibmvnic_ethtool_ops;
3732 SET_NETDEV_DEV(netdev, &dev->dev);
3734 INIT_WORK(&adapter->vnic_crq_init, handle_crq_init_rsp);
3735 INIT_WORK(&adapter->ibmvnic_xport, ibmvnic_xport_event);
3737 spin_lock_init(&adapter->stats_lock);
3739 rc = ibmvnic_init_crq_queue(adapter);
3741 dev_err(&dev->dev, "Couldn't initialize crq. rc=%d\n", rc);
3745 INIT_LIST_HEAD(&adapter->errors);
3746 INIT_LIST_HEAD(&adapter->inflight);
3747 spin_lock_init(&adapter->error_list_lock);
3748 spin_lock_init(&adapter->inflight_lock);
3750 adapter->stats_token = dma_map_single(&dev->dev, &adapter->stats,
3751 sizeof(struct ibmvnic_statistics),
3753 if (dma_mapping_error(&dev->dev, adapter->stats_token)) {
3754 if (!firmware_has_feature(FW_FEATURE_CMO))
3755 dev_err(&dev->dev, "Couldn't map stats buffer\n");
3760 snprintf(buf, sizeof(buf), "ibmvnic_%x", dev->unit_address);
3761 ent = debugfs_create_dir(buf, NULL);
3762 if (!ent || IS_ERR(ent)) {
3763 dev_info(&dev->dev, "debugfs create directory failed\n");
3764 adapter->debugfs_dir = NULL;
3766 adapter->debugfs_dir = ent;
3767 ent = debugfs_create_file("dump", S_IRUGO, adapter->debugfs_dir,
3768 netdev, &ibmvnic_dump_ops);
3769 if (!ent || IS_ERR(ent)) {
3771 "debugfs create dump file failed\n");
3772 adapter->debugfs_dump = NULL;
3774 adapter->debugfs_dump = ent;
3777 ibmvnic_send_crq_init(adapter);
3779 init_completion(&adapter->init_done);
3780 if (!wait_for_completion_timeout(&adapter->init_done, timeout))
3784 if (adapter->renegotiate) {
3785 adapter->renegotiate = false;
3786 release_sub_crqs_no_irqs(adapter);
3787 send_cap_queries(adapter);
3789 reinit_completion(&adapter->init_done);
3790 if (!wait_for_completion_timeout(&adapter->init_done,
3794 } while (adapter->renegotiate);
3796 rc = init_sub_crq_irqs(adapter);
3798 dev_err(&dev->dev, "failed to initialize sub crq irqs\n");
3802 netdev->real_num_tx_queues = adapter->req_tx_queues;
3803 netdev->mtu = adapter->req_mtu;
3805 rc = register_netdev(netdev);
3807 dev_err(&dev->dev, "failed to register netdev rc=%d\n", rc);
3810 dev_info(&dev->dev, "ibmvnic registered\n");
3815 release_sub_crqs(adapter);
3817 if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir))
3818 debugfs_remove_recursive(adapter->debugfs_dir);
3820 ibmvnic_release_crq_queue(adapter);
3822 free_netdev(netdev);
3826 static int ibmvnic_remove(struct vio_dev *dev)
3828 struct net_device *netdev = dev_get_drvdata(&dev->dev);
3829 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3831 unregister_netdev(netdev);
3833 release_sub_crqs(adapter);
3835 ibmvnic_release_crq_queue(adapter);
3837 if (adapter->debugfs_dir && !IS_ERR(adapter->debugfs_dir))
3838 debugfs_remove_recursive(adapter->debugfs_dir);
3840 if (adapter->ras_comps)
3841 dma_free_coherent(&dev->dev,
3842 adapter->ras_comp_num *
3843 sizeof(struct ibmvnic_fw_component),
3844 adapter->ras_comps, adapter->ras_comps_tok);
3846 kfree(adapter->ras_comp_int);
3848 free_netdev(netdev);
3849 dev_set_drvdata(&dev->dev, NULL);
3854 static unsigned long ibmvnic_get_desired_dma(struct vio_dev *vdev)
3856 struct net_device *netdev = dev_get_drvdata(&vdev->dev);
3857 struct ibmvnic_adapter *adapter;
3858 struct iommu_table *tbl;
3859 unsigned long ret = 0;
3862 tbl = get_iommu_table_base(&vdev->dev);
3864 /* netdev inits at probe time along with the structures we need below*/
3866 return IOMMU_PAGE_ALIGN(IBMVNIC_IO_ENTITLEMENT_DEFAULT, tbl);
3868 adapter = netdev_priv(netdev);
3870 ret += PAGE_SIZE; /* the crq message queue */
3871 ret += adapter->bounce_buffer_size;
3872 ret += IOMMU_PAGE_ALIGN(sizeof(struct ibmvnic_statistics), tbl);
3874 for (i = 0; i < adapter->req_tx_queues + adapter->req_rx_queues; i++)
3875 ret += 4 * PAGE_SIZE; /* the scrq message queue */
3877 for (i = 0; i < be32_to_cpu(adapter->login_rsp_buf->num_rxadd_subcrqs);
3879 ret += adapter->rx_pool[i].size *
3880 IOMMU_PAGE_ALIGN(adapter->rx_pool[i].buff_size, tbl);
3885 static int ibmvnic_resume(struct device *dev)
3887 struct net_device *netdev = dev_get_drvdata(dev);
3888 struct ibmvnic_adapter *adapter = netdev_priv(netdev);
3891 /* kick the interrupt handlers just in case we lost an interrupt */
3892 for (i = 0; i < adapter->req_rx_queues; i++)
3893 ibmvnic_interrupt_rx(adapter->rx_scrq[i]->irq,
3894 adapter->rx_scrq[i]);
3899 static struct vio_device_id ibmvnic_device_table[] = {
3900 {"network", "IBM,vnic"},
3903 MODULE_DEVICE_TABLE(vio, ibmvnic_device_table);
3905 static const struct dev_pm_ops ibmvnic_pm_ops = {
3906 .resume = ibmvnic_resume
3909 static struct vio_driver ibmvnic_driver = {
3910 .id_table = ibmvnic_device_table,
3911 .probe = ibmvnic_probe,
3912 .remove = ibmvnic_remove,
3913 .get_desired_dma = ibmvnic_get_desired_dma,
3914 .name = ibmvnic_driver_name,
3915 .pm = &ibmvnic_pm_ops,
3918 /* module functions */
3919 static int __init ibmvnic_module_init(void)
3921 pr_info("%s: %s %s\n", ibmvnic_driver_name, ibmvnic_driver_string,
3922 IBMVNIC_DRIVER_VERSION);
3924 return vio_register_driver(&ibmvnic_driver);
3927 static void __exit ibmvnic_module_exit(void)
3929 vio_unregister_driver(&ibmvnic_driver);
3932 module_init(ibmvnic_module_init);
3933 module_exit(ibmvnic_module_exit);