1 // SPDX-License-Identifier: GPL-2.0-only
2 /****************************************************************************
3 * Driver for Solarflare network controllers and boards
4 * Copyright 2018 Solarflare Communications Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
11 #include "net_driver.h"
12 #include <linux/module.h>
13 #include "efx_channels.h"
15 #include "efx_common.h"
16 #include "tx_common.h"
17 #include "rx_common.h"
20 #include "workarounds.h"
22 /* This is the first interrupt mode to try out of:
27 unsigned int efx_interrupt_mode = EFX_INT_MODE_MSIX;
29 /* This is the requested number of CPUs to use for Receive-Side Scaling (RSS),
30 * i.e. the number of CPUs among which we may distribute simultaneous
33 * Cards without MSI-X will only target one CPU via legacy or MSI interrupt.
34 * The default (0) means to assign an interrupt to each core.
36 unsigned int rss_cpus;
38 static unsigned int irq_adapt_low_thresh = 8000;
39 module_param(irq_adapt_low_thresh, uint, 0644);
40 MODULE_PARM_DESC(irq_adapt_low_thresh,
41 "Threshold score for reducing IRQ moderation");
43 static unsigned int irq_adapt_high_thresh = 16000;
44 module_param(irq_adapt_high_thresh, uint, 0644);
45 MODULE_PARM_DESC(irq_adapt_high_thresh,
46 "Threshold score for increasing IRQ moderation");
48 /* This is the weight assigned to each of the (per-channel) virtual
51 static int napi_weight = 64;
57 int efx_channel_dummy_op_int(struct efx_channel *channel)
62 void efx_channel_dummy_op_void(struct efx_channel *channel)
66 static const struct efx_channel_type efx_default_channel_type = {
67 .pre_probe = efx_channel_dummy_op_int,
68 .post_remove = efx_channel_dummy_op_void,
69 .get_name = efx_get_channel_name,
70 .copy = efx_copy_channel,
71 .want_txqs = efx_default_channel_want_txqs,
80 static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
82 cpumask_var_t thread_mask;
89 if (unlikely(!zalloc_cpumask_var(&thread_mask, GFP_KERNEL))) {
90 netif_warn(efx, probe, efx->net_dev,
91 "RSS disabled due to allocation failure\n");
96 for_each_online_cpu(cpu) {
97 if (!cpumask_test_cpu(cpu, thread_mask)) {
99 cpumask_or(thread_mask, thread_mask,
100 topology_sibling_cpumask(cpu));
104 free_cpumask_var(thread_mask);
107 if (count > EFX_MAX_RX_QUEUES) {
108 netif_cond_dbg(efx, probe, efx->net_dev, !rss_cpus, warn,
109 "Reducing number of rx queues from %u to %u.\n",
110 count, EFX_MAX_RX_QUEUES);
111 count = EFX_MAX_RX_QUEUES;
114 /* If RSS is requested for the PF *and* VFs then we can't write RSS
115 * table entries that are inaccessible to VFs
117 #ifdef CONFIG_SFC_SRIOV
118 if (efx->type->sriov_wanted) {
119 if (efx->type->sriov_wanted(efx) && efx_vf_size(efx) > 1 &&
120 count > efx_vf_size(efx)) {
121 netif_warn(efx, probe, efx->net_dev,
122 "Reducing number of RSS channels from %u to %u for "
123 "VF support. Increase vf-msix-limit to use more "
124 "channels on the PF.\n",
125 count, efx_vf_size(efx));
126 count = efx_vf_size(efx);
134 static int efx_allocate_msix_channels(struct efx_nic *efx,
135 unsigned int max_channels,
136 unsigned int extra_channels,
137 unsigned int parallelism)
139 unsigned int n_channels = parallelism;
145 if (efx_separate_tx_channels)
147 n_channels += extra_channels;
149 /* To allow XDP transmit to happen from arbitrary NAPI contexts
150 * we allocate a TX queue per CPU. We share event queues across
151 * multiple tx queues, assuming tx and ev queues are both
154 tx_per_ev = EFX_MAX_EVQ_SIZE / EFX_TXQ_MAX_ENT(efx);
155 tx_per_ev = min(tx_per_ev, EFX_MAX_TXQ_PER_CHANNEL);
156 n_xdp_tx = num_possible_cpus();
157 n_xdp_ev = DIV_ROUND_UP(n_xdp_tx, tx_per_ev);
159 vec_count = pci_msix_vec_count(efx->pci_dev);
163 max_channels = min_t(unsigned int, vec_count, max_channels);
166 * We need a channel per event queue, plus a VI per tx queue.
167 * This may be more pessimistic than it needs to be.
169 if (n_channels + n_xdp_ev > max_channels) {
170 netif_err(efx, drv, efx->net_dev,
171 "Insufficient resources for %d XDP event queues (%d other channels, max %d)\n",
172 n_xdp_ev, n_channels, max_channels);
173 netif_err(efx, drv, efx->net_dev,
174 "XDP_TX and XDP_REDIRECT will not work on this interface");
175 efx->n_xdp_channels = 0;
176 efx->xdp_tx_per_channel = 0;
177 efx->xdp_tx_queue_count = 0;
178 } else if (n_channels + n_xdp_tx > efx->max_vis) {
179 netif_err(efx, drv, efx->net_dev,
180 "Insufficient resources for %d XDP TX queues (%d other channels, max VIs %d)\n",
181 n_xdp_tx, n_channels, efx->max_vis);
182 netif_err(efx, drv, efx->net_dev,
183 "XDP_TX and XDP_REDIRECT will not work on this interface");
184 efx->n_xdp_channels = 0;
185 efx->xdp_tx_per_channel = 0;
186 efx->xdp_tx_queue_count = 0;
188 efx->n_xdp_channels = n_xdp_ev;
189 efx->xdp_tx_per_channel = tx_per_ev;
190 efx->xdp_tx_queue_count = n_xdp_tx;
191 n_channels += n_xdp_ev;
192 netif_dbg(efx, drv, efx->net_dev,
193 "Allocating %d TX and %d event queues for XDP\n",
197 if (vec_count < n_channels) {
198 netif_err(efx, drv, efx->net_dev,
199 "WARNING: Insufficient MSI-X vectors available (%d < %u).\n",
200 vec_count, n_channels);
201 netif_err(efx, drv, efx->net_dev,
202 "WARNING: Performance may be reduced.\n");
203 n_channels = vec_count;
206 n_channels = min(n_channels, max_channels);
208 efx->n_channels = n_channels;
210 /* Ignore XDP tx channels when creating rx channels. */
211 n_channels -= efx->n_xdp_channels;
213 if (efx_separate_tx_channels) {
215 min(max(n_channels / 2, 1U),
216 efx->max_tx_channels);
217 efx->tx_channel_offset =
218 n_channels - efx->n_tx_channels;
221 efx->n_tx_channels, 1U);
223 efx->n_tx_channels = min(n_channels, efx->max_tx_channels);
224 efx->tx_channel_offset = 0;
225 efx->n_rx_channels = n_channels;
228 efx->n_rx_channels = min(efx->n_rx_channels, parallelism);
229 efx->n_tx_channels = min(efx->n_tx_channels, parallelism);
231 efx->xdp_channel_offset = n_channels;
233 netif_dbg(efx, drv, efx->net_dev,
234 "Allocating %u RX channels\n",
237 return efx->n_channels;
240 /* Probe the number and type of interrupts we are able to obtain, and
241 * the resulting numbers of channels and RX queues.
243 int efx_probe_interrupts(struct efx_nic *efx)
245 unsigned int extra_channels = 0;
246 unsigned int rss_spread;
250 for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++)
251 if (efx->extra_channel_type[i])
254 if (efx->interrupt_mode == EFX_INT_MODE_MSIX) {
255 unsigned int parallelism = efx_wanted_parallelism(efx);
256 struct msix_entry xentries[EFX_MAX_CHANNELS];
257 unsigned int n_channels;
259 rc = efx_allocate_msix_channels(efx, efx->max_channels,
260 extra_channels, parallelism);
263 for (i = 0; i < n_channels; i++)
264 xentries[i].entry = i;
265 rc = pci_enable_msix_range(efx->pci_dev, xentries, 1,
269 /* Fall back to single channel MSI */
270 netif_err(efx, drv, efx->net_dev,
271 "could not enable MSI-X\n");
272 if (efx->type->min_interrupt_mode >= EFX_INT_MODE_MSI)
273 efx->interrupt_mode = EFX_INT_MODE_MSI;
276 } else if (rc < n_channels) {
277 netif_err(efx, drv, efx->net_dev,
278 "WARNING: Insufficient MSI-X vectors"
279 " available (%d < %u).\n", rc, n_channels);
280 netif_err(efx, drv, efx->net_dev,
281 "WARNING: Performance may be reduced.\n");
286 for (i = 0; i < efx->n_channels; i++)
287 efx_get_channel(efx, i)->irq =
292 /* Try single interrupt MSI */
293 if (efx->interrupt_mode == EFX_INT_MODE_MSI) {
295 efx->n_rx_channels = 1;
296 efx->n_tx_channels = 1;
297 efx->n_xdp_channels = 0;
298 efx->xdp_channel_offset = efx->n_channels;
299 rc = pci_enable_msi(efx->pci_dev);
301 efx_get_channel(efx, 0)->irq = efx->pci_dev->irq;
303 netif_err(efx, drv, efx->net_dev,
304 "could not enable MSI\n");
305 if (efx->type->min_interrupt_mode >= EFX_INT_MODE_LEGACY)
306 efx->interrupt_mode = EFX_INT_MODE_LEGACY;
312 /* Assume legacy interrupts */
313 if (efx->interrupt_mode == EFX_INT_MODE_LEGACY) {
314 efx->n_channels = 1 + (efx_separate_tx_channels ? 1 : 0);
315 efx->n_rx_channels = 1;
316 efx->n_tx_channels = 1;
317 efx->n_xdp_channels = 0;
318 efx->xdp_channel_offset = efx->n_channels;
319 efx->legacy_irq = efx->pci_dev->irq;
322 /* Assign extra channels if possible, before XDP channels */
323 efx->n_extra_tx_channels = 0;
324 j = efx->xdp_channel_offset;
325 for (i = 0; i < EFX_MAX_EXTRA_CHANNELS; i++) {
326 if (!efx->extra_channel_type[i])
328 if (j <= efx->tx_channel_offset + efx->n_tx_channels) {
329 efx->extra_channel_type[i]->handle_no_channel(efx);
332 efx_get_channel(efx, j)->type =
333 efx->extra_channel_type[i];
334 if (efx_channel_has_tx_queues(efx_get_channel(efx, j)))
335 efx->n_extra_tx_channels++;
339 rss_spread = efx->n_rx_channels;
340 /* RSS might be usable on VFs even if it is disabled on the PF */
341 #ifdef CONFIG_SFC_SRIOV
342 if (efx->type->sriov_wanted) {
343 efx->rss_spread = ((rss_spread > 1 ||
344 !efx->type->sriov_wanted(efx)) ?
345 rss_spread : efx_vf_size(efx));
349 efx->rss_spread = rss_spread;
354 #if defined(CONFIG_SMP)
355 void efx_set_interrupt_affinity(struct efx_nic *efx)
357 struct efx_channel *channel;
360 efx_for_each_channel(channel, efx) {
361 cpu = cpumask_local_spread(channel->channel,
362 pcibus_to_node(efx->pci_dev->bus));
363 irq_set_affinity_hint(channel->irq, cpumask_of(cpu));
367 void efx_clear_interrupt_affinity(struct efx_nic *efx)
369 struct efx_channel *channel;
371 efx_for_each_channel(channel, efx)
372 irq_set_affinity_hint(channel->irq, NULL);
376 efx_set_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused)))
381 efx_clear_interrupt_affinity(struct efx_nic *efx __attribute__ ((unused)))
384 #endif /* CONFIG_SMP */
386 void efx_remove_interrupts(struct efx_nic *efx)
388 struct efx_channel *channel;
390 /* Remove MSI/MSI-X interrupts */
391 efx_for_each_channel(channel, efx)
393 pci_disable_msi(efx->pci_dev);
394 pci_disable_msix(efx->pci_dev);
396 /* Remove legacy interrupt */
404 /* Create event queue
405 * Event queue memory allocations are done only once. If the channel
406 * is reset, the memory buffer will be reused; this guards against
407 * errors during channel reset and also simplifies interrupt handling.
409 int efx_probe_eventq(struct efx_channel *channel)
411 struct efx_nic *efx = channel->efx;
412 unsigned long entries;
414 netif_dbg(efx, probe, efx->net_dev,
415 "chan %d create event queue\n", channel->channel);
417 /* Build an event queue with room for one event per tx and rx buffer,
418 * plus some extra for link state events and MCDI completions.
420 entries = roundup_pow_of_two(efx->rxq_entries + efx->txq_entries + 128);
421 EFX_WARN_ON_PARANOID(entries > EFX_MAX_EVQ_SIZE);
422 channel->eventq_mask = max(entries, EFX_MIN_EVQ_SIZE) - 1;
424 return efx_nic_probe_eventq(channel);
427 /* Prepare channel's event queue */
428 int efx_init_eventq(struct efx_channel *channel)
430 struct efx_nic *efx = channel->efx;
433 EFX_WARN_ON_PARANOID(channel->eventq_init);
435 netif_dbg(efx, drv, efx->net_dev,
436 "chan %d init event queue\n", channel->channel);
438 rc = efx_nic_init_eventq(channel);
440 efx->type->push_irq_moderation(channel);
441 channel->eventq_read_ptr = 0;
442 channel->eventq_init = true;
447 /* Enable event queue processing and NAPI */
448 void efx_start_eventq(struct efx_channel *channel)
450 netif_dbg(channel->efx, ifup, channel->efx->net_dev,
451 "chan %d start event queue\n", channel->channel);
453 /* Make sure the NAPI handler sees the enabled flag set */
454 channel->enabled = true;
457 napi_enable(&channel->napi_str);
458 efx_nic_eventq_read_ack(channel);
461 /* Disable event queue processing and NAPI */
462 void efx_stop_eventq(struct efx_channel *channel)
464 if (!channel->enabled)
467 napi_disable(&channel->napi_str);
468 channel->enabled = false;
471 void efx_fini_eventq(struct efx_channel *channel)
473 if (!channel->eventq_init)
476 netif_dbg(channel->efx, drv, channel->efx->net_dev,
477 "chan %d fini event queue\n", channel->channel);
479 efx_nic_fini_eventq(channel);
480 channel->eventq_init = false;
483 void efx_remove_eventq(struct efx_channel *channel)
485 netif_dbg(channel->efx, drv, channel->efx->net_dev,
486 "chan %d remove event queue\n", channel->channel);
488 efx_nic_remove_eventq(channel);
491 /**************************************************************************
495 *************************************************************************/
497 #ifdef CONFIG_RFS_ACCEL
498 static void efx_filter_rfs_expire(struct work_struct *data)
500 struct delayed_work *dwork = to_delayed_work(data);
501 struct efx_channel *channel;
502 unsigned int time, quota;
504 channel = container_of(dwork, struct efx_channel, filter_work);
505 time = jiffies - channel->rfs_last_expiry;
506 quota = channel->rfs_filter_count * time / (30 * HZ);
507 if (quota >= 20 && __efx_filter_rfs_expire(channel, min(channel->rfs_filter_count, quota)))
508 channel->rfs_last_expiry += time;
509 /* Ensure we do more work eventually even if NAPI poll is not happening */
510 schedule_delayed_work(dwork, 30 * HZ);
514 /* Allocate and initialise a channel structure. */
515 static struct efx_channel *efx_alloc_channel(struct efx_nic *efx, int i)
517 struct efx_rx_queue *rx_queue;
518 struct efx_tx_queue *tx_queue;
519 struct efx_channel *channel;
522 channel = kzalloc(sizeof(*channel), GFP_KERNEL);
527 channel->channel = i;
528 channel->type = &efx_default_channel_type;
530 for (j = 0; j < EFX_MAX_TXQ_PER_CHANNEL; j++) {
531 tx_queue = &channel->tx_queue[j];
533 tx_queue->queue = -1;
535 tx_queue->channel = channel;
538 #ifdef CONFIG_RFS_ACCEL
539 INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire);
542 rx_queue = &channel->rx_queue;
544 timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0);
549 int efx_init_channels(struct efx_nic *efx)
553 for (i = 0; i < EFX_MAX_CHANNELS; i++) {
554 efx->channel[i] = efx_alloc_channel(efx, i);
555 if (!efx->channel[i])
557 efx->msi_context[i].efx = efx;
558 efx->msi_context[i].index = i;
561 /* Higher numbered interrupt modes are less capable! */
562 efx->interrupt_mode = min(efx->type->min_interrupt_mode,
565 efx->max_channels = EFX_MAX_CHANNELS;
566 efx->max_tx_channels = EFX_MAX_CHANNELS;
571 void efx_fini_channels(struct efx_nic *efx)
575 for (i = 0; i < EFX_MAX_CHANNELS; i++)
576 if (efx->channel[i]) {
577 kfree(efx->channel[i]);
578 efx->channel[i] = NULL;
582 /* Allocate and initialise a channel structure, copying parameters
583 * (but not resources) from an old channel structure.
585 struct efx_channel *efx_copy_channel(const struct efx_channel *old_channel)
587 struct efx_rx_queue *rx_queue;
588 struct efx_tx_queue *tx_queue;
589 struct efx_channel *channel;
592 channel = kmalloc(sizeof(*channel), GFP_KERNEL);
596 *channel = *old_channel;
598 channel->napi_dev = NULL;
599 INIT_HLIST_NODE(&channel->napi_str.napi_hash_node);
600 channel->napi_str.napi_id = 0;
601 channel->napi_str.state = 0;
602 memset(&channel->eventq, 0, sizeof(channel->eventq));
604 for (j = 0; j < EFX_MAX_TXQ_PER_CHANNEL; j++) {
605 tx_queue = &channel->tx_queue[j];
606 if (tx_queue->channel)
607 tx_queue->channel = channel;
608 tx_queue->buffer = NULL;
609 tx_queue->cb_page = NULL;
610 memset(&tx_queue->txd, 0, sizeof(tx_queue->txd));
613 rx_queue = &channel->rx_queue;
614 rx_queue->buffer = NULL;
615 memset(&rx_queue->rxd, 0, sizeof(rx_queue->rxd));
616 timer_setup(&rx_queue->slow_fill, efx_rx_slow_fill, 0);
617 #ifdef CONFIG_RFS_ACCEL
618 INIT_DELAYED_WORK(&channel->filter_work, efx_filter_rfs_expire);
624 static int efx_probe_channel(struct efx_channel *channel)
626 struct efx_tx_queue *tx_queue;
627 struct efx_rx_queue *rx_queue;
630 netif_dbg(channel->efx, probe, channel->efx->net_dev,
631 "creating channel %d\n", channel->channel);
633 rc = channel->type->pre_probe(channel);
637 rc = efx_probe_eventq(channel);
641 efx_for_each_channel_tx_queue(tx_queue, channel) {
642 rc = efx_probe_tx_queue(tx_queue);
647 efx_for_each_channel_rx_queue(rx_queue, channel) {
648 rc = efx_probe_rx_queue(rx_queue);
653 channel->rx_list = NULL;
658 efx_remove_channel(channel);
662 void efx_get_channel_name(struct efx_channel *channel, char *buf, size_t len)
664 struct efx_nic *efx = channel->efx;
668 number = channel->channel;
670 if (number >= efx->xdp_channel_offset &&
671 !WARN_ON_ONCE(!efx->n_xdp_channels)) {
673 number -= efx->xdp_channel_offset;
674 } else if (efx->tx_channel_offset == 0) {
676 } else if (number < efx->tx_channel_offset) {
680 number -= efx->tx_channel_offset;
682 snprintf(buf, len, "%s%s-%d", efx->name, type, number);
685 void efx_set_channel_names(struct efx_nic *efx)
687 struct efx_channel *channel;
689 efx_for_each_channel(channel, efx)
690 channel->type->get_name(channel,
691 efx->msi_context[channel->channel].name,
692 sizeof(efx->msi_context[0].name));
695 int efx_probe_channels(struct efx_nic *efx)
697 struct efx_channel *channel;
700 /* Restart special buffer allocation */
701 efx->next_buffer_table = 0;
703 /* Probe channels in reverse, so that any 'extra' channels
704 * use the start of the buffer table. This allows the traffic
705 * channels to be resized without moving them or wasting the
706 * entries before them.
708 efx_for_each_channel_rev(channel, efx) {
709 rc = efx_probe_channel(channel);
711 netif_err(efx, probe, efx->net_dev,
712 "failed to create channel %d\n",
717 efx_set_channel_names(efx);
722 efx_remove_channels(efx);
726 void efx_remove_channel(struct efx_channel *channel)
728 struct efx_tx_queue *tx_queue;
729 struct efx_rx_queue *rx_queue;
731 netif_dbg(channel->efx, drv, channel->efx->net_dev,
732 "destroy chan %d\n", channel->channel);
734 efx_for_each_channel_rx_queue(rx_queue, channel)
735 efx_remove_rx_queue(rx_queue);
736 efx_for_each_channel_tx_queue(tx_queue, channel)
737 efx_remove_tx_queue(tx_queue);
738 efx_remove_eventq(channel);
739 channel->type->post_remove(channel);
742 void efx_remove_channels(struct efx_nic *efx)
744 struct efx_channel *channel;
746 efx_for_each_channel(channel, efx)
747 efx_remove_channel(channel);
749 kfree(efx->xdp_tx_queues);
752 int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
754 struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
755 unsigned int i, next_buffer_table = 0;
756 u32 old_rxq_entries, old_txq_entries;
759 rc = efx_check_disabled(efx);
763 /* Not all channels should be reallocated. We must avoid
764 * reallocating their buffer table entries.
766 efx_for_each_channel(channel, efx) {
767 struct efx_rx_queue *rx_queue;
768 struct efx_tx_queue *tx_queue;
770 if (channel->type->copy)
772 next_buffer_table = max(next_buffer_table,
773 channel->eventq.index +
774 channel->eventq.entries);
775 efx_for_each_channel_rx_queue(rx_queue, channel)
776 next_buffer_table = max(next_buffer_table,
777 rx_queue->rxd.index +
778 rx_queue->rxd.entries);
779 efx_for_each_channel_tx_queue(tx_queue, channel)
780 next_buffer_table = max(next_buffer_table,
781 tx_queue->txd.index +
782 tx_queue->txd.entries);
785 efx_device_detach_sync(efx);
787 efx_soft_disable_interrupts(efx);
789 /* Clone channels (where possible) */
790 memset(other_channel, 0, sizeof(other_channel));
791 for (i = 0; i < efx->n_channels; i++) {
792 channel = efx->channel[i];
793 if (channel->type->copy)
794 channel = channel->type->copy(channel);
799 other_channel[i] = channel;
802 /* Swap entry counts and channel pointers */
803 old_rxq_entries = efx->rxq_entries;
804 old_txq_entries = efx->txq_entries;
805 efx->rxq_entries = rxq_entries;
806 efx->txq_entries = txq_entries;
807 for (i = 0; i < efx->n_channels; i++) {
808 channel = efx->channel[i];
809 efx->channel[i] = other_channel[i];
810 other_channel[i] = channel;
813 /* Restart buffer table allocation */
814 efx->next_buffer_table = next_buffer_table;
816 for (i = 0; i < efx->n_channels; i++) {
817 channel = efx->channel[i];
818 if (!channel->type->copy)
820 rc = efx_probe_channel(channel);
823 efx_init_napi_channel(efx->channel[i]);
827 /* Destroy unused channel structures */
828 for (i = 0; i < efx->n_channels; i++) {
829 channel = other_channel[i];
830 if (channel && channel->type->copy) {
831 efx_fini_napi_channel(channel);
832 efx_remove_channel(channel);
837 rc2 = efx_soft_enable_interrupts(efx);
840 netif_err(efx, drv, efx->net_dev,
841 "unable to restart interrupts on channel reallocation\n");
842 efx_schedule_reset(efx, RESET_TYPE_DISABLE);
845 efx_device_attach_if_not_resetting(efx);
851 efx->rxq_entries = old_rxq_entries;
852 efx->txq_entries = old_txq_entries;
853 for (i = 0; i < efx->n_channels; i++) {
854 channel = efx->channel[i];
855 efx->channel[i] = other_channel[i];
856 other_channel[i] = channel;
861 int efx_set_channels(struct efx_nic *efx)
863 struct efx_tx_queue *tx_queue;
864 struct efx_channel *channel;
865 unsigned int next_queue = 0;
866 int xdp_queue_number;
869 efx->tx_channel_offset =
870 efx_separate_tx_channels ?
871 efx->n_channels - efx->n_tx_channels : 0;
873 if (efx->xdp_tx_queue_count) {
874 EFX_WARN_ON_PARANOID(efx->xdp_tx_queues);
876 /* Allocate array for XDP TX queue lookup. */
877 efx->xdp_tx_queues = kcalloc(efx->xdp_tx_queue_count,
878 sizeof(*efx->xdp_tx_queues),
880 if (!efx->xdp_tx_queues)
884 /* We need to mark which channels really have RX and TX
885 * queues, and adjust the TX queue numbers if we have separate
886 * RX-only and TX-only channels.
888 xdp_queue_number = 0;
889 efx_for_each_channel(channel, efx) {
890 if (channel->channel < efx->n_rx_channels)
891 channel->rx_queue.core_index = channel->channel;
893 channel->rx_queue.core_index = -1;
895 if (channel->channel >= efx->tx_channel_offset) {
896 if (efx_channel_is_xdp_tx(channel)) {
897 efx_for_each_channel_tx_queue(tx_queue, channel) {
898 tx_queue->queue = next_queue++;
900 /* We may have a few left-over XDP TX
901 * queues owing to xdp_tx_queue_count
902 * not dividing evenly by EFX_MAX_TXQ_PER_CHANNEL.
903 * We still allocate and probe those
904 * TXQs, but never use them.
906 if (xdp_queue_number < efx->xdp_tx_queue_count) {
907 netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n",
908 channel->channel, tx_queue->label,
909 xdp_queue_number, tx_queue->queue);
910 efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
915 efx_for_each_channel_tx_queue(tx_queue, channel) {
916 tx_queue->queue = next_queue++;
917 netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is HW %u\n",
918 channel->channel, tx_queue->label,
924 WARN_ON(xdp_queue_number != efx->xdp_tx_queue_count);
926 rc = netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
929 return netif_set_real_num_rx_queues(efx->net_dev, efx->n_rx_channels);
932 bool efx_default_channel_want_txqs(struct efx_channel *channel)
934 return channel->channel - channel->efx->tx_channel_offset <
935 channel->efx->n_tx_channels;
942 int efx_soft_enable_interrupts(struct efx_nic *efx)
944 struct efx_channel *channel, *end_channel;
947 BUG_ON(efx->state == STATE_DISABLED);
949 efx->irq_soft_enabled = true;
952 efx_for_each_channel(channel, efx) {
953 if (!channel->type->keep_eventq) {
954 rc = efx_init_eventq(channel);
958 efx_start_eventq(channel);
961 efx_mcdi_mode_event(efx);
965 end_channel = channel;
966 efx_for_each_channel(channel, efx) {
967 if (channel == end_channel)
969 efx_stop_eventq(channel);
970 if (!channel->type->keep_eventq)
971 efx_fini_eventq(channel);
977 void efx_soft_disable_interrupts(struct efx_nic *efx)
979 struct efx_channel *channel;
981 if (efx->state == STATE_DISABLED)
984 efx_mcdi_mode_poll(efx);
986 efx->irq_soft_enabled = false;
990 synchronize_irq(efx->legacy_irq);
992 efx_for_each_channel(channel, efx) {
994 synchronize_irq(channel->irq);
996 efx_stop_eventq(channel);
997 if (!channel->type->keep_eventq)
998 efx_fini_eventq(channel);
1001 /* Flush the asynchronous MCDI request queue */
1002 efx_mcdi_flush_async(efx);
1005 int efx_enable_interrupts(struct efx_nic *efx)
1007 struct efx_channel *channel, *end_channel;
1010 /* TODO: Is this really a bug? */
1011 BUG_ON(efx->state == STATE_DISABLED);
1013 if (efx->eeh_disabled_legacy_irq) {
1014 enable_irq(efx->legacy_irq);
1015 efx->eeh_disabled_legacy_irq = false;
1018 efx->type->irq_enable_master(efx);
1020 efx_for_each_channel(channel, efx) {
1021 if (channel->type->keep_eventq) {
1022 rc = efx_init_eventq(channel);
1028 rc = efx_soft_enable_interrupts(efx);
1035 end_channel = channel;
1036 efx_for_each_channel(channel, efx) {
1037 if (channel == end_channel)
1039 if (channel->type->keep_eventq)
1040 efx_fini_eventq(channel);
1043 efx->type->irq_disable_non_ev(efx);
1048 void efx_disable_interrupts(struct efx_nic *efx)
1050 struct efx_channel *channel;
1052 efx_soft_disable_interrupts(efx);
1054 efx_for_each_channel(channel, efx) {
1055 if (channel->type->keep_eventq)
1056 efx_fini_eventq(channel);
1059 efx->type->irq_disable_non_ev(efx);
1062 void efx_start_channels(struct efx_nic *efx)
1064 struct efx_tx_queue *tx_queue;
1065 struct efx_rx_queue *rx_queue;
1066 struct efx_channel *channel;
1068 efx_for_each_channel(channel, efx) {
1069 efx_for_each_channel_tx_queue(tx_queue, channel) {
1070 efx_init_tx_queue(tx_queue);
1071 atomic_inc(&efx->active_queues);
1074 efx_for_each_channel_rx_queue(rx_queue, channel) {
1075 efx_init_rx_queue(rx_queue);
1076 atomic_inc(&efx->active_queues);
1077 efx_stop_eventq(channel);
1078 efx_fast_push_rx_descriptors(rx_queue, false);
1079 efx_start_eventq(channel);
1082 WARN_ON(channel->rx_pkt_n_frags);
1086 void efx_stop_channels(struct efx_nic *efx)
1088 struct efx_tx_queue *tx_queue;
1089 struct efx_rx_queue *rx_queue;
1090 struct efx_channel *channel;
1093 /* Stop RX refill */
1094 efx_for_each_channel(channel, efx) {
1095 efx_for_each_channel_rx_queue(rx_queue, channel)
1096 rx_queue->refill_enabled = false;
1099 efx_for_each_channel(channel, efx) {
1100 /* RX packet processing is pipelined, so wait for the
1101 * NAPI handler to complete. At least event queue 0
1102 * might be kept active by non-data events, so don't
1103 * use napi_synchronize() but actually disable NAPI
1106 if (efx_channel_has_rx_queue(channel)) {
1107 efx_stop_eventq(channel);
1108 efx_start_eventq(channel);
1112 if (efx->type->fini_dmaq)
1113 rc = efx->type->fini_dmaq(efx);
1116 netif_err(efx, drv, efx->net_dev, "failed to flush queues\n");
1118 netif_dbg(efx, drv, efx->net_dev,
1119 "successfully flushed all queues\n");
1122 efx_for_each_channel(channel, efx) {
1123 efx_for_each_channel_rx_queue(rx_queue, channel)
1124 efx_fini_rx_queue(rx_queue);
1125 efx_for_each_channel_tx_queue(tx_queue, channel)
1126 efx_fini_tx_queue(tx_queue);
1130 /**************************************************************************
1134 *************************************************************************/
1136 /* Process channel's event queue
1138 * This function is responsible for processing the event queue of a
1139 * single channel. The caller must guarantee that this function will
1140 * never be concurrently called more than once on the same channel,
1141 * though different channels may be being processed concurrently.
1143 static int efx_process_channel(struct efx_channel *channel, int budget)
1145 struct efx_tx_queue *tx_queue;
1146 struct list_head rx_list;
1149 if (unlikely(!channel->enabled))
1152 /* Prepare the batch receive list */
1153 EFX_WARN_ON_PARANOID(channel->rx_list != NULL);
1154 INIT_LIST_HEAD(&rx_list);
1155 channel->rx_list = &rx_list;
1157 efx_for_each_channel_tx_queue(tx_queue, channel) {
1158 tx_queue->pkts_compl = 0;
1159 tx_queue->bytes_compl = 0;
1162 spent = efx_nic_process_eventq(channel, budget);
1163 if (spent && efx_channel_has_rx_queue(channel)) {
1164 struct efx_rx_queue *rx_queue =
1165 efx_channel_get_rx_queue(channel);
1167 efx_rx_flush_packet(channel);
1168 efx_fast_push_rx_descriptors(rx_queue, true);
1172 efx_for_each_channel_tx_queue(tx_queue, channel) {
1173 if (tx_queue->bytes_compl) {
1174 netdev_tx_completed_queue(tx_queue->core_txq,
1175 tx_queue->pkts_compl,
1176 tx_queue->bytes_compl);
1180 /* Receive any packets we queued up */
1181 netif_receive_skb_list(channel->rx_list);
1182 channel->rx_list = NULL;
1187 static void efx_update_irq_mod(struct efx_nic *efx, struct efx_channel *channel)
1189 int step = efx->irq_mod_step_us;
1191 if (channel->irq_mod_score < irq_adapt_low_thresh) {
1192 if (channel->irq_moderation_us > step) {
1193 channel->irq_moderation_us -= step;
1194 efx->type->push_irq_moderation(channel);
1196 } else if (channel->irq_mod_score > irq_adapt_high_thresh) {
1197 if (channel->irq_moderation_us <
1198 efx->irq_rx_moderation_us) {
1199 channel->irq_moderation_us += step;
1200 efx->type->push_irq_moderation(channel);
1204 channel->irq_count = 0;
1205 channel->irq_mod_score = 0;
1208 /* NAPI poll handler
1210 * NAPI guarantees serialisation of polls of the same device, which
1211 * provides the guarantee required by efx_process_channel().
1213 static int efx_poll(struct napi_struct *napi, int budget)
1215 struct efx_channel *channel =
1216 container_of(napi, struct efx_channel, napi_str);
1217 struct efx_nic *efx = channel->efx;
1218 #ifdef CONFIG_RFS_ACCEL
1223 netif_vdbg(efx, intr, efx->net_dev,
1224 "channel %d NAPI poll executing on CPU %d\n",
1225 channel->channel, raw_smp_processor_id());
1227 spent = efx_process_channel(channel, budget);
1231 if (spent < budget) {
1232 if (efx_channel_has_rx_queue(channel) &&
1233 efx->irq_rx_adaptive &&
1234 unlikely(++channel->irq_count == 1000)) {
1235 efx_update_irq_mod(efx, channel);
1238 #ifdef CONFIG_RFS_ACCEL
1239 /* Perhaps expire some ARFS filters */
1240 time = jiffies - channel->rfs_last_expiry;
1241 /* Would our quota be >= 20? */
1242 if (channel->rfs_filter_count * time >= 600 * HZ)
1243 mod_delayed_work(system_wq, &channel->filter_work, 0);
1246 /* There is no race here; although napi_disable() will
1247 * only wait for napi_complete(), this isn't a problem
1248 * since efx_nic_eventq_read_ack() will have no effect if
1249 * interrupts have already been disabled.
1251 if (napi_complete_done(napi, spent))
1252 efx_nic_eventq_read_ack(channel);
1258 void efx_init_napi_channel(struct efx_channel *channel)
1260 struct efx_nic *efx = channel->efx;
1262 channel->napi_dev = efx->net_dev;
1263 netif_napi_add(channel->napi_dev, &channel->napi_str,
1264 efx_poll, napi_weight);
1267 void efx_init_napi(struct efx_nic *efx)
1269 struct efx_channel *channel;
1271 efx_for_each_channel(channel, efx)
1272 efx_init_napi_channel(channel);
1275 void efx_fini_napi_channel(struct efx_channel *channel)
1277 if (channel->napi_dev)
1278 netif_napi_del(&channel->napi_str);
1280 channel->napi_dev = NULL;
1283 void efx_fini_napi(struct efx_nic *efx)
1285 struct efx_channel *channel;
1287 efx_for_each_channel(channel, efx)
1288 efx_fini_napi_channel(channel);