2 * Copyright (c) 2011, 2012, Qualcomm Atheros Communications Inc.
3 * Copyright (c) 2017, I2SE GmbH
5 * Permission to use, copy, modify, and/or distribute this software
6 * for any purpose with or without fee is hereby granted, provided
7 * that the above copyright notice and this permission notice appear
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
11 * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
12 * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
13 * THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR
14 * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
15 * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
16 * NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
17 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 /* This module implements the Qualcomm Atheros UART protocol for
21 * kernel-based UART device; it is essentially an Ethernet-to-UART
25 #include <linux/device.h>
26 #include <linux/errno.h>
27 #include <linux/etherdevice.h>
28 #include <linux/if_arp.h>
29 #include <linux/if_ether.h>
30 #include <linux/jiffies.h>
31 #include <linux/kernel.h>
32 #include <linux/module.h>
33 #include <linux/netdevice.h>
35 #include <linux/of_net.h>
36 #include <linux/sched.h>
37 #include <linux/serdev.h>
38 #include <linux/skbuff.h>
39 #include <linux/types.h>
41 #include "qca_7k_common.h"
43 #define QCAUART_DRV_VERSION "0.1.0"
44 #define QCAUART_DRV_NAME "qcauart"
45 #define QCAUART_TX_TIMEOUT (1 * HZ)
48 struct net_device *net_dev;
49 spinlock_t lock; /* transmit lock */
50 struct work_struct tx_work; /* Flushes transmit buffer */
52 struct serdev_device *serdev;
53 struct qcafrm_handle frm_handle;
54 struct sk_buff *rx_skb;
56 unsigned char *tx_head; /* pointer to next XMIT byte */
57 int tx_left; /* bytes left in XMIT queue */
58 unsigned char *tx_buffer;
62 qca_tty_receive(struct serdev_device *serdev, const unsigned char *data,
65 struct qcauart *qca = serdev_device_get_drvdata(serdev);
66 struct net_device *netdev = qca->net_dev;
67 struct net_device_stats *n_stats = &netdev->stats;
71 qca->rx_skb = netdev_alloc_skb_ip_align(netdev,
76 n_stats->rx_dropped++;
81 for (i = 0; i < count; i++) {
84 retcode = qcafrm_fsm_decode(&qca->frm_handle,
86 skb_tailroom(qca->rx_skb),
94 netdev_dbg(netdev, "recv: no RX tail\n");
96 n_stats->rx_dropped++;
99 netdev_dbg(netdev, "recv: invalid RX length\n");
100 n_stats->rx_errors++;
101 n_stats->rx_dropped++;
104 n_stats->rx_packets++;
105 n_stats->rx_bytes += retcode;
106 skb_put(qca->rx_skb, retcode);
107 qca->rx_skb->protocol = eth_type_trans(
108 qca->rx_skb, qca->rx_skb->dev);
109 skb_checksum_none_assert(qca->rx_skb);
110 netif_rx(qca->rx_skb);
111 qca->rx_skb = netdev_alloc_skb_ip_align(netdev,
115 netdev_dbg(netdev, "recv: out of RX resources\n");
116 n_stats->rx_errors++;
125 /* Write out any remaining transmit buffer. Scheduled when tty is writable */
126 static void qcauart_transmit(struct work_struct *work)
128 struct qcauart *qca = container_of(work, struct qcauart, tx_work);
129 struct net_device_stats *n_stats = &qca->net_dev->stats;
132 spin_lock_bh(&qca->lock);
134 /* First make sure we're connected. */
135 if (!netif_running(qca->net_dev)) {
136 spin_unlock_bh(&qca->lock);
140 if (qca->tx_left <= 0) {
141 /* Now serial buffer is almost free & we can start
142 * transmission of another packet
144 n_stats->tx_packets++;
145 spin_unlock_bh(&qca->lock);
146 netif_wake_queue(qca->net_dev);
150 written = serdev_device_write_buf(qca->serdev, qca->tx_head,
153 qca->tx_left -= written;
154 qca->tx_head += written;
156 spin_unlock_bh(&qca->lock);
159 /* Called by the driver when there's room for more data.
160 * Schedule the transmit.
162 static void qca_tty_wakeup(struct serdev_device *serdev)
164 struct qcauart *qca = serdev_device_get_drvdata(serdev);
166 schedule_work(&qca->tx_work);
169 static const struct serdev_device_ops qca_serdev_ops = {
170 .receive_buf = qca_tty_receive,
171 .write_wakeup = qca_tty_wakeup,
174 static int qcauart_netdev_open(struct net_device *dev)
176 struct qcauart *qca = netdev_priv(dev);
178 netif_start_queue(qca->net_dev);
183 static int qcauart_netdev_close(struct net_device *dev)
185 struct qcauart *qca = netdev_priv(dev);
187 netif_stop_queue(dev);
188 flush_work(&qca->tx_work);
190 spin_lock_bh(&qca->lock);
192 spin_unlock_bh(&qca->lock);
198 qcauart_netdev_xmit(struct sk_buff *skb, struct net_device *dev)
200 struct net_device_stats *n_stats = &dev->stats;
201 struct qcauart *qca = netdev_priv(dev);
206 spin_lock(&qca->lock);
208 WARN_ON(qca->tx_left);
210 if (!netif_running(dev)) {
211 spin_unlock(&qca->lock);
212 netdev_warn(qca->net_dev, "xmit: iface is down\n");
216 pos = qca->tx_buffer;
218 if (skb->len < QCAFRM_MIN_LEN)
219 pad_len = QCAFRM_MIN_LEN - skb->len;
221 pos += qcafrm_create_header(pos, skb->len + pad_len);
223 memcpy(pos, skb->data, skb->len);
227 memset(pos, 0, pad_len);
231 pos += qcafrm_create_footer(pos);
233 netif_stop_queue(qca->net_dev);
235 written = serdev_device_write_buf(qca->serdev, qca->tx_buffer,
236 pos - qca->tx_buffer);
238 qca->tx_left = (pos - qca->tx_buffer) - written;
239 qca->tx_head = qca->tx_buffer + written;
240 n_stats->tx_bytes += written;
242 spin_unlock(&qca->lock);
244 netif_trans_update(dev);
246 dev_kfree_skb_any(skb);
250 static void qcauart_netdev_tx_timeout(struct net_device *dev, unsigned int txqueue)
252 struct qcauart *qca = netdev_priv(dev);
254 netdev_info(qca->net_dev, "Transmit timeout at %ld, latency %ld\n",
255 jiffies, dev_trans_start(dev));
256 dev->stats.tx_errors++;
257 dev->stats.tx_dropped++;
260 static int qcauart_netdev_init(struct net_device *dev)
262 struct qcauart *qca = netdev_priv(dev);
265 /* Finish setting up the device info. */
266 dev->mtu = QCAFRM_MAX_MTU;
267 dev->type = ARPHRD_ETHER;
269 len = QCAFRM_HEADER_LEN + QCAFRM_MAX_LEN + QCAFRM_FOOTER_LEN;
270 qca->tx_buffer = devm_kmalloc(&qca->serdev->dev, len, GFP_KERNEL);
274 qca->rx_skb = netdev_alloc_skb_ip_align(qca->net_dev,
283 static void qcauart_netdev_uninit(struct net_device *dev)
285 struct qcauart *qca = netdev_priv(dev);
287 dev_kfree_skb(qca->rx_skb);
290 static const struct net_device_ops qcauart_netdev_ops = {
291 .ndo_init = qcauart_netdev_init,
292 .ndo_uninit = qcauart_netdev_uninit,
293 .ndo_open = qcauart_netdev_open,
294 .ndo_stop = qcauart_netdev_close,
295 .ndo_start_xmit = qcauart_netdev_xmit,
296 .ndo_set_mac_address = eth_mac_addr,
297 .ndo_tx_timeout = qcauart_netdev_tx_timeout,
298 .ndo_validate_addr = eth_validate_addr,
301 static void qcauart_netdev_setup(struct net_device *dev)
303 dev->netdev_ops = &qcauart_netdev_ops;
304 dev->watchdog_timeo = QCAUART_TX_TIMEOUT;
305 dev->priv_flags &= ~IFF_TX_SKB_SHARING;
306 dev->tx_queue_len = 100;
308 /* MTU range: 46 - 1500 */
309 dev->min_mtu = QCAFRM_MIN_MTU;
310 dev->max_mtu = QCAFRM_MAX_MTU;
313 static const struct of_device_id qca_uart_of_match[] = {
315 .compatible = "qca,qca7000",
319 MODULE_DEVICE_TABLE(of, qca_uart_of_match);
321 static int qca_uart_probe(struct serdev_device *serdev)
323 struct net_device *qcauart_dev = alloc_etherdev(sizeof(struct qcauart));
331 qcauart_netdev_setup(qcauart_dev);
332 SET_NETDEV_DEV(qcauart_dev, &serdev->dev);
334 qca = netdev_priv(qcauart_dev);
336 pr_err("qca_uart: Fail to retrieve private structure\n");
340 qca->net_dev = qcauart_dev;
341 qca->serdev = serdev;
342 qcafrm_fsm_init_uart(&qca->frm_handle);
344 spin_lock_init(&qca->lock);
345 INIT_WORK(&qca->tx_work, qcauart_transmit);
347 of_property_read_u32(serdev->dev.of_node, "current-speed", &speed);
349 ret = of_get_ethdev_address(serdev->dev.of_node, qca->net_dev);
351 eth_hw_addr_random(qca->net_dev);
352 dev_info(&serdev->dev, "Using random MAC address: %pM\n",
353 qca->net_dev->dev_addr);
356 netif_carrier_on(qca->net_dev);
357 serdev_device_set_drvdata(serdev, qca);
358 serdev_device_set_client_ops(serdev, &qca_serdev_ops);
360 ret = serdev_device_open(serdev);
362 dev_err(&serdev->dev, "Unable to open device %s\n",
367 speed = serdev_device_set_baudrate(serdev, speed);
368 dev_info(&serdev->dev, "Using baudrate: %u\n", speed);
370 serdev_device_set_flow_control(serdev, false);
372 ret = register_netdev(qcauart_dev);
374 dev_err(&serdev->dev, "Unable to register net device %s\n",
376 serdev_device_close(serdev);
377 cancel_work_sync(&qca->tx_work);
384 free_netdev(qcauart_dev);
388 static void qca_uart_remove(struct serdev_device *serdev)
390 struct qcauart *qca = serdev_device_get_drvdata(serdev);
392 unregister_netdev(qca->net_dev);
394 /* Flush any pending characters in the driver. */
395 serdev_device_close(serdev);
396 cancel_work_sync(&qca->tx_work);
398 free_netdev(qca->net_dev);
401 static struct serdev_device_driver qca_uart_driver = {
402 .probe = qca_uart_probe,
403 .remove = qca_uart_remove,
405 .name = QCAUART_DRV_NAME,
406 .of_match_table = qca_uart_of_match,
410 module_serdev_device_driver(qca_uart_driver);
412 MODULE_DESCRIPTION("Qualcomm Atheros QCA7000 UART Driver");
413 MODULE_AUTHOR("Qualcomm Atheros Communications");
414 MODULE_AUTHOR("Stefan Wahren <stefan.wahren@i2se.com>");
415 MODULE_LICENSE("Dual BSD/GPL");
416 MODULE_VERSION(QCAUART_DRV_VERSION);