1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (C) 2021 Gerhard Engleder <gerhard@engleder-embedded.com> */
4 /* TSN endpoint Ethernet MAC driver
6 * The TSN endpoint Ethernet MAC is a FPGA based network device for real-time
7 * communication. It is designed for endpoints within TSN (Time Sensitive
8 * Networking) networks; e.g., for PLCs in the industrial automation case.
10 * It supports multiple TX/RX queue pairs. The first TX/RX queue pair is used
13 * More information can be found here:
14 * - www.embedded-experts.at/tsn
15 * - www.engleder-embedded.com
21 #include <linux/module.h>
23 #include <linux/of_net.h>
24 #include <linux/of_mdio.h>
25 #include <linux/interrupt.h>
26 #include <linux/etherdevice.h>
27 #include <linux/phy.h>
28 #include <linux/iopoll.h>
30 #define TSNEP_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
31 #define TSNEP_HEADROOM ALIGN(TSNEP_SKB_PAD, 4)
32 #define TSNEP_MAX_RX_BUF_SIZE (PAGE_SIZE - TSNEP_HEADROOM - \
33 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
35 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
36 #define DMA_ADDR_HIGH(dma_addr) ((u32)(((dma_addr) >> 32) & 0xFFFFFFFF))
38 #define DMA_ADDR_HIGH(dma_addr) ((u32)(0))
40 #define DMA_ADDR_LOW(dma_addr) ((u32)((dma_addr) & 0xFFFFFFFF))
42 static void tsnep_enable_irq(struct tsnep_adapter *adapter, u32 mask)
44 iowrite32(mask, adapter->addr + ECM_INT_ENABLE);
47 static void tsnep_disable_irq(struct tsnep_adapter *adapter, u32 mask)
49 mask |= ECM_INT_DISABLE;
50 iowrite32(mask, adapter->addr + ECM_INT_ENABLE);
53 static irqreturn_t tsnep_irq(int irq, void *arg)
55 struct tsnep_adapter *adapter = arg;
56 u32 active = ioread32(adapter->addr + ECM_INT_ACTIVE);
58 /* acknowledge interrupt */
60 iowrite32(active, adapter->addr + ECM_INT_ACKNOWLEDGE);
62 /* handle link interrupt */
63 if ((active & ECM_INT_LINK) != 0)
64 phy_mac_interrupt(adapter->netdev->phydev);
66 /* handle TX/RX queue 0 interrupt */
67 if ((active & adapter->queue[0].irq_mask) != 0) {
68 tsnep_disable_irq(adapter, adapter->queue[0].irq_mask);
69 napi_schedule(&adapter->queue[0].napi);
75 static irqreturn_t tsnep_irq_txrx(int irq, void *arg)
77 struct tsnep_queue *queue = arg;
79 /* handle TX/RX queue interrupt */
80 tsnep_disable_irq(queue->adapter, queue->irq_mask);
81 napi_schedule(&queue->napi);
86 static int tsnep_mdiobus_read(struct mii_bus *bus, int addr, int regnum)
88 struct tsnep_adapter *adapter = bus->priv;
92 if (regnum & MII_ADDR_C45)
96 if (!adapter->suppress_preamble)
97 md |= ECM_MD_PREAMBLE;
98 md |= (regnum << ECM_MD_ADDR_SHIFT) & ECM_MD_ADDR_MASK;
99 md |= (addr << ECM_MD_PHY_ADDR_SHIFT) & ECM_MD_PHY_ADDR_MASK;
100 iowrite32(md, adapter->addr + ECM_MD_CONTROL);
101 retval = readl_poll_timeout_atomic(adapter->addr + ECM_MD_STATUS, md,
102 !(md & ECM_MD_BUSY), 16, 1000);
106 return (md & ECM_MD_DATA_MASK) >> ECM_MD_DATA_SHIFT;
109 static int tsnep_mdiobus_write(struct mii_bus *bus, int addr, int regnum,
112 struct tsnep_adapter *adapter = bus->priv;
116 if (regnum & MII_ADDR_C45)
120 if (!adapter->suppress_preamble)
121 md |= ECM_MD_PREAMBLE;
122 md |= (regnum << ECM_MD_ADDR_SHIFT) & ECM_MD_ADDR_MASK;
123 md |= (addr << ECM_MD_PHY_ADDR_SHIFT) & ECM_MD_PHY_ADDR_MASK;
124 md |= ((u32)val << ECM_MD_DATA_SHIFT) & ECM_MD_DATA_MASK;
125 iowrite32(md, adapter->addr + ECM_MD_CONTROL);
126 retval = readl_poll_timeout_atomic(adapter->addr + ECM_MD_STATUS, md,
127 !(md & ECM_MD_BUSY), 16, 1000);
134 static void tsnep_set_link_mode(struct tsnep_adapter *adapter)
138 switch (adapter->phydev->speed) {
140 mode = ECM_LINK_MODE_100;
143 mode = ECM_LINK_MODE_1000;
146 mode = ECM_LINK_MODE_OFF;
149 iowrite32(mode, adapter->addr + ECM_STATUS);
152 static void tsnep_phy_link_status_change(struct net_device *netdev)
154 struct tsnep_adapter *adapter = netdev_priv(netdev);
155 struct phy_device *phydev = netdev->phydev;
158 tsnep_set_link_mode(adapter);
160 phy_print_status(netdev->phydev);
163 static int tsnep_phy_loopback(struct tsnep_adapter *adapter, bool enable)
167 retval = phy_loopback(adapter->phydev, enable);
169 /* PHY link state change is not signaled if loopback is enabled, it
170 * would delay a working loopback anyway, let's ensure that loopback
171 * is working immediately by setting link mode directly
173 if (!retval && enable)
174 tsnep_set_link_mode(adapter);
179 static int tsnep_phy_open(struct tsnep_adapter *adapter)
181 struct phy_device *phydev;
182 struct ethtool_eee ethtool_eee;
185 retval = phy_connect_direct(adapter->netdev, adapter->phydev,
186 tsnep_phy_link_status_change,
190 phydev = adapter->netdev->phydev;
192 /* MAC supports only 100Mbps|1000Mbps full duplex
193 * SPE (Single Pair Ethernet) is also an option but not implemented yet
195 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Half_BIT);
196 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_10baseT_Full_BIT);
197 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT);
198 phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
200 /* disable EEE autoneg, EEE not supported by TSNEP */
201 memset(ðtool_eee, 0, sizeof(ethtool_eee));
202 phy_ethtool_set_eee(adapter->phydev, ðtool_eee);
204 adapter->phydev->irq = PHY_MAC_INTERRUPT;
205 phy_start(adapter->phydev);
210 static void tsnep_phy_close(struct tsnep_adapter *adapter)
212 phy_stop(adapter->netdev->phydev);
213 phy_disconnect(adapter->netdev->phydev);
214 adapter->netdev->phydev = NULL;
217 static void tsnep_tx_ring_cleanup(struct tsnep_tx *tx)
219 struct device *dmadev = tx->adapter->dmadev;
222 memset(tx->entry, 0, sizeof(tx->entry));
224 for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) {
226 dma_free_coherent(dmadev, PAGE_SIZE, tx->page[i],
234 static int tsnep_tx_ring_init(struct tsnep_tx *tx)
236 struct device *dmadev = tx->adapter->dmadev;
237 struct tsnep_tx_entry *entry;
238 struct tsnep_tx_entry *next_entry;
242 for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) {
244 dma_alloc_coherent(dmadev, PAGE_SIZE, &tx->page_dma[i],
250 for (j = 0; j < TSNEP_RING_ENTRIES_PER_PAGE; j++) {
251 entry = &tx->entry[TSNEP_RING_ENTRIES_PER_PAGE * i + j];
252 entry->desc_wb = (struct tsnep_tx_desc_wb *)
253 (((u8 *)tx->page[i]) + TSNEP_DESC_SIZE * j);
254 entry->desc = (struct tsnep_tx_desc *)
255 (((u8 *)entry->desc_wb) + TSNEP_DESC_OFFSET);
256 entry->desc_dma = tx->page_dma[i] + TSNEP_DESC_SIZE * j;
259 for (i = 0; i < TSNEP_RING_SIZE; i++) {
260 entry = &tx->entry[i];
261 next_entry = &tx->entry[(i + 1) % TSNEP_RING_SIZE];
262 entry->desc->next = __cpu_to_le64(next_entry->desc_dma);
268 tsnep_tx_ring_cleanup(tx);
272 static void tsnep_tx_activate(struct tsnep_tx *tx, int index, int length,
275 struct tsnep_tx_entry *entry = &tx->entry[index];
277 entry->properties = 0;
279 entry->properties = length & TSNEP_DESC_LENGTH_MASK;
280 entry->properties |= TSNEP_DESC_INTERRUPT_FLAG;
281 if (skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS)
282 entry->properties |= TSNEP_DESC_EXTENDED_WRITEBACK_FLAG;
284 /* toggle user flag to prevent false acknowledge
286 * Only the first fragment is acknowledged. For all other
287 * fragments no acknowledge is done and the last written owner
288 * counter stays in the writeback descriptor. Therefore, it is
289 * possible that the last written owner counter is identical to
290 * the new incremented owner counter and a false acknowledge is
291 * detected before the real acknowledge has been done by
294 * The user flag is used to prevent this situation. The user
295 * flag is copied to the writeback descriptor by the hardware
296 * and is used as additional acknowledge data. By toggeling the
297 * user flag only for the first fragment (which is
298 * acknowledged), it is guaranteed that the last acknowledge
299 * done for this descriptor has used a different user flag and
300 * cannot be detected as false acknowledge.
302 entry->owner_user_flag = !entry->owner_user_flag;
305 entry->properties |= TSNEP_TX_DESC_LAST_FRAGMENT_FLAG;
306 if (index == tx->increment_owner_counter) {
308 if (tx->owner_counter == 4)
309 tx->owner_counter = 1;
310 tx->increment_owner_counter--;
311 if (tx->increment_owner_counter < 0)
312 tx->increment_owner_counter = TSNEP_RING_SIZE - 1;
315 (tx->owner_counter << TSNEP_DESC_OWNER_COUNTER_SHIFT) &
316 TSNEP_DESC_OWNER_COUNTER_MASK;
317 if (entry->owner_user_flag)
318 entry->properties |= TSNEP_TX_DESC_OWNER_USER_FLAG;
319 entry->desc->more_properties =
320 __cpu_to_le32(entry->len & TSNEP_DESC_LENGTH_MASK);
322 /* descriptor properties shall be written last, because valid data is
327 entry->desc->properties = __cpu_to_le32(entry->properties);
330 static int tsnep_tx_desc_available(struct tsnep_tx *tx)
332 if (tx->read <= tx->write)
333 return TSNEP_RING_SIZE - tx->write + tx->read - 1;
335 return tx->read - tx->write - 1;
338 static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count)
340 struct device *dmadev = tx->adapter->dmadev;
341 struct tsnep_tx_entry *entry;
347 for (i = 0; i < count; i++) {
348 entry = &tx->entry[(tx->write + i) % TSNEP_RING_SIZE];
351 len = skb_headlen(skb);
352 dma = dma_map_single(dmadev, skb->data, len,
355 len = skb_frag_size(&skb_shinfo(skb)->frags[i - 1]);
356 dma = skb_frag_dma_map(dmadev,
357 &skb_shinfo(skb)->frags[i - 1],
358 0, len, DMA_TO_DEVICE);
360 if (dma_mapping_error(dmadev, dma))
364 dma_unmap_addr_set(entry, dma, dma);
366 entry->desc->tx = __cpu_to_le64(dma);
374 static int tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count)
376 struct device *dmadev = tx->adapter->dmadev;
377 struct tsnep_tx_entry *entry;
381 for (i = 0; i < count; i++) {
382 entry = &tx->entry[(index + i) % TSNEP_RING_SIZE];
386 dma_unmap_single(dmadev,
387 dma_unmap_addr(entry, dma),
388 dma_unmap_len(entry, len),
391 dma_unmap_page(dmadev,
392 dma_unmap_addr(entry, dma),
393 dma_unmap_len(entry, len),
395 map_len += entry->len;
403 static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb,
408 struct tsnep_tx_entry *entry;
413 if (skb_shinfo(skb)->nr_frags > 0)
414 count += skb_shinfo(skb)->nr_frags;
416 spin_lock_irqsave(&tx->lock, flags);
418 if (tsnep_tx_desc_available(tx) < count) {
419 /* ring full, shall not happen because queue is stopped if full
422 netif_stop_queue(tx->adapter->netdev);
424 spin_unlock_irqrestore(&tx->lock, flags);
426 return NETDEV_TX_BUSY;
429 entry = &tx->entry[tx->write];
432 retval = tsnep_tx_map(skb, tx, count);
434 tsnep_tx_unmap(tx, tx->write, count);
435 dev_kfree_skb_any(entry->skb);
440 spin_unlock_irqrestore(&tx->lock, flags);
442 netdev_err(tx->adapter->netdev, "TX DMA map failed\n");
448 if (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)
449 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
451 for (i = 0; i < count; i++)
452 tsnep_tx_activate(tx, (tx->write + i) % TSNEP_RING_SIZE, length,
454 tx->write = (tx->write + count) % TSNEP_RING_SIZE;
456 skb_tx_timestamp(skb);
458 /* descriptor properties shall be valid before hardware is notified */
461 iowrite32(TSNEP_CONTROL_TX_ENABLE, tx->addr + TSNEP_CONTROL);
463 if (tsnep_tx_desc_available(tx) < (MAX_SKB_FRAGS + 1)) {
464 /* ring can get full with next frame */
465 netif_stop_queue(tx->adapter->netdev);
468 spin_unlock_irqrestore(&tx->lock, flags);
473 static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget)
477 struct tsnep_tx_entry *entry;
481 spin_lock_irqsave(&tx->lock, flags);
484 if (tx->read == tx->write)
487 entry = &tx->entry[tx->read];
488 if ((__le32_to_cpu(entry->desc_wb->properties) &
489 TSNEP_TX_DESC_OWNER_MASK) !=
490 (entry->properties & TSNEP_TX_DESC_OWNER_MASK))
493 /* descriptor properties shall be read first, because valid data
499 if (skb_shinfo(entry->skb)->nr_frags > 0)
500 count += skb_shinfo(entry->skb)->nr_frags;
502 length = tsnep_tx_unmap(tx, tx->read, count);
504 if ((skb_shinfo(entry->skb)->tx_flags & SKBTX_IN_PROGRESS) &&
505 (__le32_to_cpu(entry->desc_wb->properties) &
506 TSNEP_DESC_EXTENDED_WRITEBACK_FLAG)) {
507 struct skb_shared_hwtstamps hwtstamps;
510 if (skb_shinfo(entry->skb)->tx_flags &
511 SKBTX_HW_TSTAMP_USE_CYCLES)
513 __le64_to_cpu(entry->desc_wb->counter);
516 __le64_to_cpu(entry->desc_wb->timestamp);
518 memset(&hwtstamps, 0, sizeof(hwtstamps));
519 hwtstamps.hwtstamp = ns_to_ktime(timestamp);
521 skb_tstamp_tx(entry->skb, &hwtstamps);
524 napi_consume_skb(entry->skb, budget);
527 tx->read = (tx->read + count) % TSNEP_RING_SIZE;
530 tx->bytes += length + ETH_FCS_LEN;
533 } while (likely(budget));
535 if ((tsnep_tx_desc_available(tx) >= ((MAX_SKB_FRAGS + 1) * 2)) &&
536 netif_queue_stopped(tx->adapter->netdev)) {
537 netif_wake_queue(tx->adapter->netdev);
540 spin_unlock_irqrestore(&tx->lock, flags);
542 return (budget != 0);
545 static bool tsnep_tx_pending(struct tsnep_tx *tx)
548 struct tsnep_tx_entry *entry;
549 bool pending = false;
551 spin_lock_irqsave(&tx->lock, flags);
553 if (tx->read != tx->write) {
554 entry = &tx->entry[tx->read];
555 if ((__le32_to_cpu(entry->desc_wb->properties) &
556 TSNEP_TX_DESC_OWNER_MASK) ==
557 (entry->properties & TSNEP_TX_DESC_OWNER_MASK))
561 spin_unlock_irqrestore(&tx->lock, flags);
566 static int tsnep_tx_open(struct tsnep_adapter *adapter, void __iomem *addr,
567 int queue_index, struct tsnep_tx *tx)
572 memset(tx, 0, sizeof(*tx));
573 tx->adapter = adapter;
575 tx->queue_index = queue_index;
577 retval = tsnep_tx_ring_init(tx);
581 dma = tx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER;
582 iowrite32(DMA_ADDR_LOW(dma), tx->addr + TSNEP_TX_DESC_ADDR_LOW);
583 iowrite32(DMA_ADDR_HIGH(dma), tx->addr + TSNEP_TX_DESC_ADDR_HIGH);
584 tx->owner_counter = 1;
585 tx->increment_owner_counter = TSNEP_RING_SIZE - 1;
587 spin_lock_init(&tx->lock);
592 static void tsnep_tx_close(struct tsnep_tx *tx)
596 readx_poll_timeout(ioread32, tx->addr + TSNEP_CONTROL, val,
597 ((val & TSNEP_CONTROL_TX_ENABLE) == 0), 10000,
600 tsnep_tx_ring_cleanup(tx);
603 static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx)
605 struct device *dmadev = rx->adapter->dmadev;
606 struct tsnep_rx_entry *entry;
609 for (i = 0; i < TSNEP_RING_SIZE; i++) {
610 entry = &rx->entry[i];
612 page_pool_put_full_page(rx->page_pool, entry->page,
618 page_pool_destroy(rx->page_pool);
620 memset(rx->entry, 0, sizeof(rx->entry));
622 for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) {
624 dma_free_coherent(dmadev, PAGE_SIZE, rx->page[i],
632 static int tsnep_rx_alloc_buffer(struct tsnep_rx *rx,
633 struct tsnep_rx_entry *entry)
637 page = page_pool_dev_alloc_pages(rx->page_pool);
642 entry->len = TSNEP_MAX_RX_BUF_SIZE;
643 entry->dma = page_pool_get_dma_addr(entry->page);
644 entry->desc->rx = __cpu_to_le64(entry->dma + TSNEP_SKB_PAD);
649 static int tsnep_rx_ring_init(struct tsnep_rx *rx)
651 struct device *dmadev = rx->adapter->dmadev;
652 struct tsnep_rx_entry *entry;
653 struct page_pool_params pp_params = { 0 };
654 struct tsnep_rx_entry *next_entry;
658 for (i = 0; i < TSNEP_RING_PAGE_COUNT; i++) {
660 dma_alloc_coherent(dmadev, PAGE_SIZE, &rx->page_dma[i],
666 for (j = 0; j < TSNEP_RING_ENTRIES_PER_PAGE; j++) {
667 entry = &rx->entry[TSNEP_RING_ENTRIES_PER_PAGE * i + j];
668 entry->desc_wb = (struct tsnep_rx_desc_wb *)
669 (((u8 *)rx->page[i]) + TSNEP_DESC_SIZE * j);
670 entry->desc = (struct tsnep_rx_desc *)
671 (((u8 *)entry->desc_wb) + TSNEP_DESC_OFFSET);
672 entry->desc_dma = rx->page_dma[i] + TSNEP_DESC_SIZE * j;
676 pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
678 pp_params.pool_size = TSNEP_RING_SIZE;
679 pp_params.nid = dev_to_node(dmadev);
680 pp_params.dev = dmadev;
681 pp_params.dma_dir = DMA_FROM_DEVICE;
682 pp_params.max_len = TSNEP_MAX_RX_BUF_SIZE;
683 pp_params.offset = TSNEP_SKB_PAD;
684 rx->page_pool = page_pool_create(&pp_params);
685 if (IS_ERR(rx->page_pool)) {
686 retval = PTR_ERR(rx->page_pool);
687 rx->page_pool = NULL;
691 for (i = 0; i < TSNEP_RING_SIZE; i++) {
692 entry = &rx->entry[i];
693 next_entry = &rx->entry[(i + 1) % TSNEP_RING_SIZE];
694 entry->desc->next = __cpu_to_le64(next_entry->desc_dma);
696 retval = tsnep_rx_alloc_buffer(rx, entry);
704 tsnep_rx_ring_cleanup(rx);
708 static void tsnep_rx_activate(struct tsnep_rx *rx, int index)
710 struct tsnep_rx_entry *entry = &rx->entry[index];
712 /* TSNEP_MAX_RX_BUF_SIZE is a multiple of 4 */
713 entry->properties = entry->len & TSNEP_DESC_LENGTH_MASK;
714 entry->properties |= TSNEP_DESC_INTERRUPT_FLAG;
715 if (index == rx->increment_owner_counter) {
717 if (rx->owner_counter == 4)
718 rx->owner_counter = 1;
719 rx->increment_owner_counter--;
720 if (rx->increment_owner_counter < 0)
721 rx->increment_owner_counter = TSNEP_RING_SIZE - 1;
724 (rx->owner_counter << TSNEP_DESC_OWNER_COUNTER_SHIFT) &
725 TSNEP_DESC_OWNER_COUNTER_MASK;
727 /* descriptor properties shall be written last, because valid data is
732 entry->desc->properties = __cpu_to_le32(entry->properties);
735 static struct sk_buff *tsnep_build_skb(struct tsnep_rx *rx, struct page *page,
740 skb = napi_build_skb(page_address(page), PAGE_SIZE);
744 /* update pointers within the skb to store the data */
745 skb_reserve(skb, TSNEP_SKB_PAD + TSNEP_RX_INLINE_METADATA_SIZE);
746 __skb_put(skb, length - TSNEP_RX_INLINE_METADATA_SIZE - ETH_FCS_LEN);
748 if (rx->adapter->hwtstamp_config.rx_filter == HWTSTAMP_FILTER_ALL) {
749 struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb);
750 struct tsnep_rx_inline *rx_inline =
751 (struct tsnep_rx_inline *)(page_address(page) +
754 skb_shinfo(skb)->tx_flags |=
755 SKBTX_HW_TSTAMP_NETDEV;
756 memset(hwtstamps, 0, sizeof(*hwtstamps));
757 hwtstamps->netdev_data = rx_inline;
760 skb_record_rx_queue(skb, rx->queue_index);
761 skb->protocol = eth_type_trans(skb, rx->adapter->netdev);
766 static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi,
769 struct device *dmadev = rx->adapter->dmadev;
771 enum dma_data_direction dma_dir;
772 struct tsnep_rx_entry *entry;
779 dma_dir = page_pool_get_dma_dir(rx->page_pool);
781 while (likely(done < budget)) {
782 entry = &rx->entry[rx->read];
783 if ((__le32_to_cpu(entry->desc_wb->properties) &
784 TSNEP_DESC_OWNER_COUNTER_MASK) !=
785 (entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK))
788 /* descriptor properties shall be read first, because valid data
793 prefetch(page_address(entry->page) + TSNEP_SKB_PAD);
794 length = __le32_to_cpu(entry->desc_wb->properties) &
795 TSNEP_DESC_LENGTH_MASK;
796 dma_sync_single_range_for_cpu(dmadev, entry->dma, TSNEP_SKB_PAD,
800 /* forward skb only if allocation is successful, otherwise
801 * page is reused and frame dropped
803 retval = tsnep_rx_alloc_buffer(rx, entry);
805 skb = tsnep_build_skb(rx, page, length);
807 page_pool_release_page(rx->page_pool, page);
810 rx->bytes += length -
811 TSNEP_RX_INLINE_METADATA_SIZE;
812 if (skb->pkt_type == PACKET_MULTICAST)
815 napi_gro_receive(napi, skb);
817 page_pool_recycle_direct(rx->page_pool, page);
826 tsnep_rx_activate(rx, rx->read);
830 rx->read = (rx->read + 1) % TSNEP_RING_SIZE;
834 /* descriptor properties shall be valid before hardware is
839 iowrite32(TSNEP_CONTROL_RX_ENABLE, rx->addr + TSNEP_CONTROL);
845 static bool tsnep_rx_pending(struct tsnep_rx *rx)
847 struct tsnep_rx_entry *entry;
849 entry = &rx->entry[rx->read];
850 if ((__le32_to_cpu(entry->desc_wb->properties) &
851 TSNEP_DESC_OWNER_COUNTER_MASK) ==
852 (entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK))
858 static int tsnep_rx_open(struct tsnep_adapter *adapter, void __iomem *addr,
859 int queue_index, struct tsnep_rx *rx)
865 memset(rx, 0, sizeof(*rx));
866 rx->adapter = adapter;
868 rx->queue_index = queue_index;
870 retval = tsnep_rx_ring_init(rx);
874 dma = rx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER;
875 iowrite32(DMA_ADDR_LOW(dma), rx->addr + TSNEP_RX_DESC_ADDR_LOW);
876 iowrite32(DMA_ADDR_HIGH(dma), rx->addr + TSNEP_RX_DESC_ADDR_HIGH);
877 rx->owner_counter = 1;
878 rx->increment_owner_counter = TSNEP_RING_SIZE - 1;
880 for (i = 0; i < TSNEP_RING_SIZE; i++)
881 tsnep_rx_activate(rx, i);
883 /* descriptor properties shall be valid before hardware is notified */
886 iowrite32(TSNEP_CONTROL_RX_ENABLE, rx->addr + TSNEP_CONTROL);
891 static void tsnep_rx_close(struct tsnep_rx *rx)
895 iowrite32(TSNEP_CONTROL_RX_DISABLE, rx->addr + TSNEP_CONTROL);
896 readx_poll_timeout(ioread32, rx->addr + TSNEP_CONTROL, val,
897 ((val & TSNEP_CONTROL_RX_ENABLE) == 0), 10000,
900 tsnep_rx_ring_cleanup(rx);
903 static bool tsnep_pending(struct tsnep_queue *queue)
905 if (queue->tx && tsnep_tx_pending(queue->tx))
908 if (queue->rx && tsnep_rx_pending(queue->rx))
914 static int tsnep_poll(struct napi_struct *napi, int budget)
916 struct tsnep_queue *queue = container_of(napi, struct tsnep_queue,
918 bool complete = true;
922 complete = tsnep_tx_poll(queue->tx, budget);
925 done = tsnep_rx_poll(queue->rx, napi, budget);
930 /* if all work not completed, return budget and keep polling */
934 if (likely(napi_complete_done(napi, done))) {
935 tsnep_enable_irq(queue->adapter, queue->irq_mask);
937 /* reschedule if work is already pending, prevent rotten packets
938 * which are transmitted or received after polling but before
941 if (tsnep_pending(queue)) {
942 tsnep_disable_irq(queue->adapter, queue->irq_mask);
947 return min(done, budget - 1);
950 static int tsnep_request_irq(struct tsnep_queue *queue, bool first)
952 const char *name = netdev_name(queue->adapter->netdev);
953 irq_handler_t handler;
958 sprintf(queue->name, "%s-mac", name);
960 dev = queue->adapter;
962 if (queue->tx && queue->rx)
963 sprintf(queue->name, "%s-txrx-%d", name,
964 queue->rx->queue_index);
966 sprintf(queue->name, "%s-tx-%d", name,
967 queue->tx->queue_index);
969 sprintf(queue->name, "%s-rx-%d", name,
970 queue->rx->queue_index);
971 handler = tsnep_irq_txrx;
975 retval = request_irq(queue->irq, handler, 0, queue->name, dev);
977 /* if name is empty, then interrupt won't be freed */
978 memset(queue->name, 0, sizeof(queue->name));
984 static void tsnep_free_irq(struct tsnep_queue *queue, bool first)
988 if (!strlen(queue->name))
992 dev = queue->adapter;
996 free_irq(queue->irq, dev);
997 memset(queue->name, 0, sizeof(queue->name));
1000 static int tsnep_netdev_open(struct net_device *netdev)
1002 struct tsnep_adapter *adapter = netdev_priv(netdev);
1005 int tx_queue_index = 0;
1006 int rx_queue_index = 0;
1009 for (i = 0; i < adapter->num_queues; i++) {
1010 adapter->queue[i].adapter = adapter;
1011 if (adapter->queue[i].tx) {
1012 addr = adapter->addr + TSNEP_QUEUE(tx_queue_index);
1013 retval = tsnep_tx_open(adapter, addr, tx_queue_index,
1014 adapter->queue[i].tx);
1019 if (adapter->queue[i].rx) {
1020 addr = adapter->addr + TSNEP_QUEUE(rx_queue_index);
1021 retval = tsnep_rx_open(adapter, addr,
1023 adapter->queue[i].rx);
1029 retval = tsnep_request_irq(&adapter->queue[i], i == 0);
1031 netif_err(adapter, drv, adapter->netdev,
1032 "can't get assigned irq %d.\n",
1033 adapter->queue[i].irq);
1038 retval = netif_set_real_num_tx_queues(adapter->netdev,
1039 adapter->num_tx_queues);
1042 retval = netif_set_real_num_rx_queues(adapter->netdev,
1043 adapter->num_rx_queues);
1047 tsnep_enable_irq(adapter, ECM_INT_LINK);
1048 retval = tsnep_phy_open(adapter);
1052 for (i = 0; i < adapter->num_queues; i++) {
1053 netif_napi_add(adapter->netdev, &adapter->queue[i].napi,
1055 napi_enable(&adapter->queue[i].napi);
1057 tsnep_enable_irq(adapter, adapter->queue[i].irq_mask);
1063 tsnep_disable_irq(adapter, ECM_INT_LINK);
1064 tsnep_phy_close(adapter);
1066 for (i = 0; i < adapter->num_queues; i++) {
1067 tsnep_free_irq(&adapter->queue[i], i == 0);
1069 if (adapter->queue[i].rx)
1070 tsnep_rx_close(adapter->queue[i].rx);
1071 if (adapter->queue[i].tx)
1072 tsnep_tx_close(adapter->queue[i].tx);
1077 static int tsnep_netdev_close(struct net_device *netdev)
1079 struct tsnep_adapter *adapter = netdev_priv(netdev);
1082 tsnep_disable_irq(adapter, ECM_INT_LINK);
1083 tsnep_phy_close(adapter);
1085 for (i = 0; i < adapter->num_queues; i++) {
1086 tsnep_disable_irq(adapter, adapter->queue[i].irq_mask);
1088 napi_disable(&adapter->queue[i].napi);
1089 netif_napi_del(&adapter->queue[i].napi);
1091 tsnep_free_irq(&adapter->queue[i], i == 0);
1093 if (adapter->queue[i].rx)
1094 tsnep_rx_close(adapter->queue[i].rx);
1095 if (adapter->queue[i].tx)
1096 tsnep_tx_close(adapter->queue[i].tx);
1102 static netdev_tx_t tsnep_netdev_xmit_frame(struct sk_buff *skb,
1103 struct net_device *netdev)
1105 struct tsnep_adapter *adapter = netdev_priv(netdev);
1106 u16 queue_mapping = skb_get_queue_mapping(skb);
1108 if (queue_mapping >= adapter->num_tx_queues)
1111 return tsnep_xmit_frame_ring(skb, &adapter->tx[queue_mapping]);
1114 static int tsnep_netdev_ioctl(struct net_device *netdev, struct ifreq *ifr,
1117 if (!netif_running(netdev))
1119 if (cmd == SIOCSHWTSTAMP || cmd == SIOCGHWTSTAMP)
1120 return tsnep_ptp_ioctl(netdev, ifr, cmd);
1121 return phy_mii_ioctl(netdev->phydev, ifr, cmd);
1124 static void tsnep_netdev_set_multicast(struct net_device *netdev)
1126 struct tsnep_adapter *adapter = netdev_priv(netdev);
1130 /* configured MAC address and broadcasts are never filtered */
1131 if (netdev->flags & IFF_PROMISC) {
1132 rx_filter |= TSNEP_RX_FILTER_ACCEPT_ALL_MULTICASTS;
1133 rx_filter |= TSNEP_RX_FILTER_ACCEPT_ALL_UNICASTS;
1134 } else if (!netdev_mc_empty(netdev) || (netdev->flags & IFF_ALLMULTI)) {
1135 rx_filter |= TSNEP_RX_FILTER_ACCEPT_ALL_MULTICASTS;
1137 iowrite16(rx_filter, adapter->addr + TSNEP_RX_FILTER);
1140 static void tsnep_netdev_get_stats64(struct net_device *netdev,
1141 struct rtnl_link_stats64 *stats)
1143 struct tsnep_adapter *adapter = netdev_priv(netdev);
1148 for (i = 0; i < adapter->num_tx_queues; i++) {
1149 stats->tx_packets += adapter->tx[i].packets;
1150 stats->tx_bytes += adapter->tx[i].bytes;
1151 stats->tx_dropped += adapter->tx[i].dropped;
1153 for (i = 0; i < adapter->num_rx_queues; i++) {
1154 stats->rx_packets += adapter->rx[i].packets;
1155 stats->rx_bytes += adapter->rx[i].bytes;
1156 stats->rx_dropped += adapter->rx[i].dropped;
1157 stats->multicast += adapter->rx[i].multicast;
1159 reg = ioread32(adapter->addr + TSNEP_QUEUE(i) +
1160 TSNEP_RX_STATISTIC);
1161 val = (reg & TSNEP_RX_STATISTIC_NO_DESC_MASK) >>
1162 TSNEP_RX_STATISTIC_NO_DESC_SHIFT;
1163 stats->rx_dropped += val;
1164 val = (reg & TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_MASK) >>
1165 TSNEP_RX_STATISTIC_BUFFER_TOO_SMALL_SHIFT;
1166 stats->rx_dropped += val;
1167 val = (reg & TSNEP_RX_STATISTIC_FIFO_OVERFLOW_MASK) >>
1168 TSNEP_RX_STATISTIC_FIFO_OVERFLOW_SHIFT;
1169 stats->rx_errors += val;
1170 stats->rx_fifo_errors += val;
1171 val = (reg & TSNEP_RX_STATISTIC_INVALID_FRAME_MASK) >>
1172 TSNEP_RX_STATISTIC_INVALID_FRAME_SHIFT;
1173 stats->rx_errors += val;
1174 stats->rx_frame_errors += val;
1177 reg = ioread32(adapter->addr + ECM_STAT);
1178 val = (reg & ECM_STAT_RX_ERR_MASK) >> ECM_STAT_RX_ERR_SHIFT;
1179 stats->rx_errors += val;
1180 val = (reg & ECM_STAT_INV_FRM_MASK) >> ECM_STAT_INV_FRM_SHIFT;
1181 stats->rx_errors += val;
1182 stats->rx_crc_errors += val;
1183 val = (reg & ECM_STAT_FWD_RX_ERR_MASK) >> ECM_STAT_FWD_RX_ERR_SHIFT;
1184 stats->rx_errors += val;
1187 static void tsnep_mac_set_address(struct tsnep_adapter *adapter, u8 *addr)
1189 iowrite32(*(u32 *)addr, adapter->addr + TSNEP_MAC_ADDRESS_LOW);
1190 iowrite16(*(u16 *)(addr + sizeof(u32)),
1191 adapter->addr + TSNEP_MAC_ADDRESS_HIGH);
1193 ether_addr_copy(adapter->mac_address, addr);
1194 netif_info(adapter, drv, adapter->netdev, "MAC address set to %pM\n",
1198 static int tsnep_netdev_set_mac_address(struct net_device *netdev, void *addr)
1200 struct tsnep_adapter *adapter = netdev_priv(netdev);
1201 struct sockaddr *sock_addr = addr;
1204 retval = eth_prepare_mac_addr_change(netdev, sock_addr);
1207 eth_hw_addr_set(netdev, sock_addr->sa_data);
1208 tsnep_mac_set_address(adapter, sock_addr->sa_data);
1213 static int tsnep_netdev_set_features(struct net_device *netdev,
1214 netdev_features_t features)
1216 struct tsnep_adapter *adapter = netdev_priv(netdev);
1217 netdev_features_t changed = netdev->features ^ features;
1221 if (changed & NETIF_F_LOOPBACK) {
1222 enable = !!(features & NETIF_F_LOOPBACK);
1223 retval = tsnep_phy_loopback(adapter, enable);
1229 static ktime_t tsnep_netdev_get_tstamp(struct net_device *netdev,
1230 const struct skb_shared_hwtstamps *hwtstamps,
1233 struct tsnep_rx_inline *rx_inline = hwtstamps->netdev_data;
1237 timestamp = __le64_to_cpu(rx_inline->counter);
1239 timestamp = __le64_to_cpu(rx_inline->timestamp);
1241 return ns_to_ktime(timestamp);
1244 static const struct net_device_ops tsnep_netdev_ops = {
1245 .ndo_open = tsnep_netdev_open,
1246 .ndo_stop = tsnep_netdev_close,
1247 .ndo_start_xmit = tsnep_netdev_xmit_frame,
1248 .ndo_eth_ioctl = tsnep_netdev_ioctl,
1249 .ndo_set_rx_mode = tsnep_netdev_set_multicast,
1250 .ndo_get_stats64 = tsnep_netdev_get_stats64,
1251 .ndo_set_mac_address = tsnep_netdev_set_mac_address,
1252 .ndo_set_features = tsnep_netdev_set_features,
1253 .ndo_get_tstamp = tsnep_netdev_get_tstamp,
1254 .ndo_setup_tc = tsnep_tc_setup,
1257 static int tsnep_mac_init(struct tsnep_adapter *adapter)
1261 /* initialize RX filtering, at least configured MAC address and
1262 * broadcast are not filtered
1264 iowrite16(0, adapter->addr + TSNEP_RX_FILTER);
1266 /* try to get MAC address in the following order:
1268 * - valid MAC address already set
1269 * - MAC address register if valid
1270 * - random MAC address
1272 retval = of_get_mac_address(adapter->pdev->dev.of_node,
1273 adapter->mac_address);
1274 if (retval == -EPROBE_DEFER)
1276 if (retval && !is_valid_ether_addr(adapter->mac_address)) {
1277 *(u32 *)adapter->mac_address =
1278 ioread32(adapter->addr + TSNEP_MAC_ADDRESS_LOW);
1279 *(u16 *)(adapter->mac_address + sizeof(u32)) =
1280 ioread16(adapter->addr + TSNEP_MAC_ADDRESS_HIGH);
1281 if (!is_valid_ether_addr(adapter->mac_address))
1282 eth_random_addr(adapter->mac_address);
1285 tsnep_mac_set_address(adapter, adapter->mac_address);
1286 eth_hw_addr_set(adapter->netdev, adapter->mac_address);
1291 static int tsnep_mdio_init(struct tsnep_adapter *adapter)
1293 struct device_node *np = adapter->pdev->dev.of_node;
1297 np = of_get_child_by_name(np, "mdio");
1301 adapter->suppress_preamble =
1302 of_property_read_bool(np, "suppress-preamble");
1305 adapter->mdiobus = devm_mdiobus_alloc(&adapter->pdev->dev);
1306 if (!adapter->mdiobus) {
1312 adapter->mdiobus->priv = (void *)adapter;
1313 adapter->mdiobus->parent = &adapter->pdev->dev;
1314 adapter->mdiobus->read = tsnep_mdiobus_read;
1315 adapter->mdiobus->write = tsnep_mdiobus_write;
1316 adapter->mdiobus->name = TSNEP "-mdiobus";
1317 snprintf(adapter->mdiobus->id, MII_BUS_ID_SIZE, "%s",
1318 adapter->pdev->name);
1320 /* do not scan broadcast address */
1321 adapter->mdiobus->phy_mask = 0x0000001;
1323 retval = of_mdiobus_register(adapter->mdiobus, np);
1331 static int tsnep_phy_init(struct tsnep_adapter *adapter)
1333 struct device_node *phy_node;
1336 retval = of_get_phy_mode(adapter->pdev->dev.of_node,
1337 &adapter->phy_mode);
1339 adapter->phy_mode = PHY_INTERFACE_MODE_GMII;
1341 phy_node = of_parse_phandle(adapter->pdev->dev.of_node, "phy-handle",
1343 adapter->phydev = of_phy_find_device(phy_node);
1344 of_node_put(phy_node);
1345 if (!adapter->phydev && adapter->mdiobus)
1346 adapter->phydev = phy_find_first(adapter->mdiobus);
1347 if (!adapter->phydev)
1353 static int tsnep_queue_init(struct tsnep_adapter *adapter, int queue_count)
1355 u32 irq_mask = ECM_INT_TX_0 | ECM_INT_RX_0;
1360 /* one TX/RX queue pair for netdev is mandatory */
1361 if (platform_irq_count(adapter->pdev) == 1)
1362 retval = platform_get_irq(adapter->pdev, 0);
1364 retval = platform_get_irq_byname(adapter->pdev, "mac");
1367 adapter->num_tx_queues = 1;
1368 adapter->num_rx_queues = 1;
1369 adapter->num_queues = 1;
1370 adapter->queue[0].irq = retval;
1371 adapter->queue[0].tx = &adapter->tx[0];
1372 adapter->queue[0].rx = &adapter->rx[0];
1373 adapter->queue[0].irq_mask = irq_mask;
1375 adapter->netdev->irq = adapter->queue[0].irq;
1377 /* add additional TX/RX queue pairs only if dedicated interrupt is
1380 for (i = 1; i < queue_count; i++) {
1381 sprintf(name, "txrx-%d", i);
1382 retval = platform_get_irq_byname_optional(adapter->pdev, name);
1386 adapter->num_tx_queues++;
1387 adapter->num_rx_queues++;
1388 adapter->num_queues++;
1389 adapter->queue[i].irq = retval;
1390 adapter->queue[i].tx = &adapter->tx[i];
1391 adapter->queue[i].rx = &adapter->rx[i];
1392 adapter->queue[i].irq_mask =
1393 irq_mask << (ECM_INT_TXRX_SHIFT * i);
1399 static int tsnep_probe(struct platform_device *pdev)
1401 struct tsnep_adapter *adapter;
1402 struct net_device *netdev;
1403 struct resource *io;
1410 netdev = devm_alloc_etherdev_mqs(&pdev->dev,
1411 sizeof(struct tsnep_adapter),
1412 TSNEP_MAX_QUEUES, TSNEP_MAX_QUEUES);
1415 SET_NETDEV_DEV(netdev, &pdev->dev);
1416 adapter = netdev_priv(netdev);
1417 platform_set_drvdata(pdev, adapter);
1418 adapter->pdev = pdev;
1419 adapter->dmadev = &pdev->dev;
1420 adapter->netdev = netdev;
1421 adapter->msg_enable = NETIF_MSG_DRV | NETIF_MSG_PROBE |
1422 NETIF_MSG_LINK | NETIF_MSG_IFUP |
1423 NETIF_MSG_IFDOWN | NETIF_MSG_TX_QUEUED;
1425 netdev->min_mtu = ETH_MIN_MTU;
1426 netdev->max_mtu = TSNEP_MAX_FRAME_SIZE;
1428 mutex_init(&adapter->gate_control_lock);
1429 mutex_init(&adapter->rxnfc_lock);
1430 INIT_LIST_HEAD(&adapter->rxnfc_rules);
1432 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1433 adapter->addr = devm_ioremap_resource(&pdev->dev, io);
1434 if (IS_ERR(adapter->addr))
1435 return PTR_ERR(adapter->addr);
1436 netdev->mem_start = io->start;
1437 netdev->mem_end = io->end;
1439 type = ioread32(adapter->addr + ECM_TYPE);
1440 revision = (type & ECM_REVISION_MASK) >> ECM_REVISION_SHIFT;
1441 version = (type & ECM_VERSION_MASK) >> ECM_VERSION_SHIFT;
1442 queue_count = (type & ECM_QUEUE_COUNT_MASK) >> ECM_QUEUE_COUNT_SHIFT;
1443 adapter->gate_control = type & ECM_GATE_CONTROL;
1444 adapter->rxnfc_max = TSNEP_RX_ASSIGN_ETHER_TYPE_COUNT;
1446 tsnep_disable_irq(adapter, ECM_INT_ALL);
1448 retval = tsnep_queue_init(adapter, queue_count);
1452 retval = dma_set_mask_and_coherent(&adapter->pdev->dev,
1455 dev_err(&adapter->pdev->dev, "no usable DMA configuration.\n");
1459 retval = tsnep_mac_init(adapter);
1463 retval = tsnep_mdio_init(adapter);
1465 goto mdio_init_failed;
1467 retval = tsnep_phy_init(adapter);
1469 goto phy_init_failed;
1471 retval = tsnep_ptp_init(adapter);
1473 goto ptp_init_failed;
1475 retval = tsnep_tc_init(adapter);
1477 goto tc_init_failed;
1479 retval = tsnep_rxnfc_init(adapter);
1481 goto rxnfc_init_failed;
1483 netdev->netdev_ops = &tsnep_netdev_ops;
1484 netdev->ethtool_ops = &tsnep_ethtool_ops;
1485 netdev->features = NETIF_F_SG;
1486 netdev->hw_features = netdev->features | NETIF_F_LOOPBACK;
1488 /* carrier off reporting is important to ethtool even BEFORE open */
1489 netif_carrier_off(netdev);
1491 retval = register_netdev(netdev);
1493 goto register_failed;
1495 dev_info(&adapter->pdev->dev, "device version %d.%02d\n", version,
1497 if (adapter->gate_control)
1498 dev_info(&adapter->pdev->dev, "gate control detected\n");
1503 tsnep_rxnfc_cleanup(adapter);
1505 tsnep_tc_cleanup(adapter);
1507 tsnep_ptp_cleanup(adapter);
1510 if (adapter->mdiobus)
1511 mdiobus_unregister(adapter->mdiobus);
1516 static int tsnep_remove(struct platform_device *pdev)
1518 struct tsnep_adapter *adapter = platform_get_drvdata(pdev);
1520 unregister_netdev(adapter->netdev);
1522 tsnep_rxnfc_cleanup(adapter);
1524 tsnep_tc_cleanup(adapter);
1526 tsnep_ptp_cleanup(adapter);
1528 if (adapter->mdiobus)
1529 mdiobus_unregister(adapter->mdiobus);
1531 tsnep_disable_irq(adapter, ECM_INT_ALL);
1536 static const struct of_device_id tsnep_of_match[] = {
1537 { .compatible = "engleder,tsnep", },
1540 MODULE_DEVICE_TABLE(of, tsnep_of_match);
1542 static struct platform_driver tsnep_driver = {
1545 .of_match_table = tsnep_of_match,
1547 .probe = tsnep_probe,
1548 .remove = tsnep_remove,
1550 module_platform_driver(tsnep_driver);
1552 MODULE_AUTHOR("Gerhard Engleder <gerhard@engleder-embedded.com>");
1553 MODULE_DESCRIPTION("TSN endpoint Ethernet MAC driver");
1554 MODULE_LICENSE("GPL");