2 * xhci-dbgtty.c - tty glue for xHCI debug capability
4 * Copyright (C) 2017 Intel Corporation
6 * Author: Lu Baolu <baolu.lu@linux.intel.com>
9 #include <linux/slab.h>
10 #include <linux/tty.h>
11 #include <linux/tty_flip.h>
14 #include "xhci-dbgcap.h"
17 dbc_send_packet(struct dbc_port *port, char *packet, unsigned int size)
21 len = kfifo_len(&port->write_fifo);
25 size = kfifo_out(&port->write_fifo, packet, size);
29 static int dbc_start_tx(struct dbc_port *port)
30 __releases(&port->port_lock)
31 __acquires(&port->port_lock)
34 struct dbc_request *req;
36 bool do_tty_wake = false;
37 struct list_head *pool = &port->write_pool;
39 while (!list_empty(pool)) {
40 req = list_entry(pool->next, struct dbc_request, list_pool);
41 len = dbc_send_packet(port, req->buf, DBC_MAX_PACKET);
47 list_del(&req->list_pool);
49 spin_unlock(&port->port_lock);
50 status = dbc_ep_queue(port->out, req, GFP_ATOMIC);
51 spin_lock(&port->port_lock);
54 list_add(&req->list_pool, pool);
59 if (do_tty_wake && port->port.tty)
60 tty_wakeup(port->port.tty);
65 static void dbc_start_rx(struct dbc_port *port)
66 __releases(&port->port_lock)
67 __acquires(&port->port_lock)
69 struct dbc_request *req;
71 struct list_head *pool = &port->read_pool;
73 while (!list_empty(pool)) {
77 req = list_entry(pool->next, struct dbc_request, list_pool);
78 list_del(&req->list_pool);
79 req->length = DBC_MAX_PACKET;
81 spin_unlock(&port->port_lock);
82 status = dbc_ep_queue(port->in, req, GFP_ATOMIC);
83 spin_lock(&port->port_lock);
86 list_add(&req->list_pool, pool);
93 dbc_read_complete(struct xhci_hcd *xhci, struct dbc_request *req)
96 struct xhci_dbc *dbc = xhci->dbc;
97 struct dbc_port *port = &dbc->port;
99 spin_lock_irqsave(&port->port_lock, flags);
100 list_add_tail(&req->list_pool, &port->read_queue);
101 tasklet_schedule(&port->push);
102 spin_unlock_irqrestore(&port->port_lock, flags);
105 static void dbc_write_complete(struct xhci_hcd *xhci, struct dbc_request *req)
108 struct xhci_dbc *dbc = xhci->dbc;
109 struct dbc_port *port = &dbc->port;
111 spin_lock_irqsave(&port->port_lock, flags);
112 list_add(&req->list_pool, &port->write_pool);
113 switch (req->status) {
120 xhci_warn(xhci, "unexpected write complete status %d\n",
124 spin_unlock_irqrestore(&port->port_lock, flags);
127 static void xhci_dbc_free_req(struct dbc_ep *dep, struct dbc_request *req)
130 dbc_free_request(dep, req);
134 xhci_dbc_alloc_requests(struct dbc_ep *dep, struct list_head *head,
135 void (*fn)(struct xhci_hcd *, struct dbc_request *))
138 struct dbc_request *req;
140 for (i = 0; i < DBC_QUEUE_SIZE; i++) {
141 req = dbc_alloc_request(dep, GFP_ATOMIC);
145 req->length = DBC_MAX_PACKET;
146 req->buf = kmalloc(req->length, GFP_KERNEL);
148 xhci_dbc_free_req(dep, req);
153 list_add_tail(&req->list_pool, head);
156 return list_empty(head) ? -ENOMEM : 0;
160 xhci_dbc_free_requests(struct dbc_ep *dep, struct list_head *head)
162 struct dbc_request *req;
164 while (!list_empty(head)) {
165 req = list_entry(head->next, struct dbc_request, list_pool);
166 list_del(&req->list_pool);
167 xhci_dbc_free_req(dep, req);
171 static int dbc_tty_install(struct tty_driver *driver, struct tty_struct *tty)
173 struct dbc_port *port = driver->driver_state;
175 tty->driver_data = port;
177 return tty_port_install(&port->port, driver, tty);
180 static int dbc_tty_open(struct tty_struct *tty, struct file *file)
182 struct dbc_port *port = tty->driver_data;
184 return tty_port_open(&port->port, tty, file);
187 static void dbc_tty_close(struct tty_struct *tty, struct file *file)
189 struct dbc_port *port = tty->driver_data;
191 tty_port_close(&port->port, tty, file);
194 static int dbc_tty_write(struct tty_struct *tty,
195 const unsigned char *buf,
198 struct dbc_port *port = tty->driver_data;
201 spin_lock_irqsave(&port->port_lock, flags);
203 count = kfifo_in(&port->write_fifo, buf, count);
205 spin_unlock_irqrestore(&port->port_lock, flags);
210 static int dbc_tty_put_char(struct tty_struct *tty, unsigned char ch)
212 struct dbc_port *port = tty->driver_data;
216 spin_lock_irqsave(&port->port_lock, flags);
217 status = kfifo_put(&port->write_fifo, ch);
218 spin_unlock_irqrestore(&port->port_lock, flags);
223 static void dbc_tty_flush_chars(struct tty_struct *tty)
225 struct dbc_port *port = tty->driver_data;
228 spin_lock_irqsave(&port->port_lock, flags);
230 spin_unlock_irqrestore(&port->port_lock, flags);
233 static int dbc_tty_write_room(struct tty_struct *tty)
235 struct dbc_port *port = tty->driver_data;
239 spin_lock_irqsave(&port->port_lock, flags);
240 room = kfifo_avail(&port->write_fifo);
241 spin_unlock_irqrestore(&port->port_lock, flags);
246 static int dbc_tty_chars_in_buffer(struct tty_struct *tty)
248 struct dbc_port *port = tty->driver_data;
252 spin_lock_irqsave(&port->port_lock, flags);
253 chars = kfifo_len(&port->write_fifo);
254 spin_unlock_irqrestore(&port->port_lock, flags);
259 static void dbc_tty_unthrottle(struct tty_struct *tty)
261 struct dbc_port *port = tty->driver_data;
264 spin_lock_irqsave(&port->port_lock, flags);
265 tasklet_schedule(&port->push);
266 spin_unlock_irqrestore(&port->port_lock, flags);
269 static const struct tty_operations dbc_tty_ops = {
270 .install = dbc_tty_install,
271 .open = dbc_tty_open,
272 .close = dbc_tty_close,
273 .write = dbc_tty_write,
274 .put_char = dbc_tty_put_char,
275 .flush_chars = dbc_tty_flush_chars,
276 .write_room = dbc_tty_write_room,
277 .chars_in_buffer = dbc_tty_chars_in_buffer,
278 .unthrottle = dbc_tty_unthrottle,
281 static struct tty_driver *dbc_tty_driver;
283 int xhci_dbc_tty_register_driver(struct xhci_hcd *xhci)
286 struct xhci_dbc *dbc = xhci->dbc;
288 dbc_tty_driver = tty_alloc_driver(1, TTY_DRIVER_REAL_RAW |
289 TTY_DRIVER_DYNAMIC_DEV);
290 if (IS_ERR(dbc_tty_driver)) {
291 status = PTR_ERR(dbc_tty_driver);
292 dbc_tty_driver = NULL;
296 dbc_tty_driver->driver_name = "dbc_serial";
297 dbc_tty_driver->name = "ttyDBC";
299 dbc_tty_driver->type = TTY_DRIVER_TYPE_SERIAL;
300 dbc_tty_driver->subtype = SERIAL_TYPE_NORMAL;
301 dbc_tty_driver->init_termios = tty_std_termios;
302 dbc_tty_driver->init_termios.c_cflag =
303 B9600 | CS8 | CREAD | HUPCL | CLOCAL;
304 dbc_tty_driver->init_termios.c_ispeed = 9600;
305 dbc_tty_driver->init_termios.c_ospeed = 9600;
306 dbc_tty_driver->driver_state = &dbc->port;
308 tty_set_operations(dbc_tty_driver, &dbc_tty_ops);
310 status = tty_register_driver(dbc_tty_driver);
313 "can't register dbc tty driver, err %d\n", status);
314 put_tty_driver(dbc_tty_driver);
315 dbc_tty_driver = NULL;
321 void xhci_dbc_tty_unregister_driver(void)
323 if (dbc_tty_driver) {
324 tty_unregister_driver(dbc_tty_driver);
325 put_tty_driver(dbc_tty_driver);
326 dbc_tty_driver = NULL;
330 static void dbc_rx_push(unsigned long _port)
332 struct dbc_request *req;
333 struct tty_struct *tty;
335 bool do_push = false;
336 bool disconnect = false;
337 struct dbc_port *port = (void *)_port;
338 struct list_head *queue = &port->read_queue;
340 spin_lock_irqsave(&port->port_lock, flags);
341 tty = port->port.tty;
342 while (!list_empty(queue)) {
343 req = list_first_entry(queue, struct dbc_request, list_pool);
345 if (tty && tty_throttled(tty))
348 switch (req->status) {
355 pr_warn("ttyDBC0: unexpected RX status %d\n",
361 char *packet = req->buf;
362 unsigned int n, size = req->actual;
371 count = tty_insert_flip_string(&port->port, packet,
376 port->n_read += count;
382 list_move(&req->list_pool, &port->read_pool);
386 tty_flip_buffer_push(&port->port);
388 if (!list_empty(queue) && tty) {
389 if (!tty_throttled(tty)) {
391 tasklet_schedule(&port->push);
393 pr_warn("ttyDBC0: RX not scheduled?\n");
400 spin_unlock_irqrestore(&port->port_lock, flags);
403 static int dbc_port_activate(struct tty_port *_port, struct tty_struct *tty)
406 struct dbc_port *port = container_of(_port, struct dbc_port, port);
408 spin_lock_irqsave(&port->port_lock, flags);
410 spin_unlock_irqrestore(&port->port_lock, flags);
415 static const struct tty_port_operations dbc_port_ops = {
416 .activate = dbc_port_activate,
420 xhci_dbc_tty_init_port(struct xhci_hcd *xhci, struct dbc_port *port)
422 tty_port_init(&port->port);
423 spin_lock_init(&port->port_lock);
424 tasklet_init(&port->push, dbc_rx_push, (unsigned long)port);
425 INIT_LIST_HEAD(&port->read_pool);
426 INIT_LIST_HEAD(&port->read_queue);
427 INIT_LIST_HEAD(&port->write_pool);
429 port->in = get_in_ep(xhci);
430 port->out = get_out_ep(xhci);
431 port->port.ops = &dbc_port_ops;
436 xhci_dbc_tty_exit_port(struct dbc_port *port)
438 tasklet_kill(&port->push);
439 tty_port_destroy(&port->port);
442 int xhci_dbc_tty_register_device(struct xhci_hcd *xhci)
445 struct device *tty_dev;
446 struct xhci_dbc *dbc = xhci->dbc;
447 struct dbc_port *port = &dbc->port;
449 xhci_dbc_tty_init_port(xhci, port);
450 tty_dev = tty_port_register_device(&port->port,
451 dbc_tty_driver, 0, NULL);
452 if (IS_ERR(tty_dev)) {
453 ret = PTR_ERR(tty_dev);
457 ret = kfifo_alloc(&port->write_fifo, DBC_WRITE_BUF_SIZE, GFP_KERNEL);
461 ret = xhci_dbc_alloc_requests(port->in, &port->read_pool,
466 ret = xhci_dbc_alloc_requests(port->out, &port->write_pool,
471 port->registered = true;
476 xhci_dbc_free_requests(port->in, &port->read_pool);
477 xhci_dbc_free_requests(port->out, &port->write_pool);
478 kfifo_free(&port->write_fifo);
481 tty_unregister_device(dbc_tty_driver, 0);
484 xhci_dbc_tty_exit_port(port);
486 xhci_err(xhci, "can't register tty port, err %d\n", ret);
491 void xhci_dbc_tty_unregister_device(struct xhci_hcd *xhci)
493 struct xhci_dbc *dbc = xhci->dbc;
494 struct dbc_port *port = &dbc->port;
496 tty_unregister_device(dbc_tty_driver, 0);
497 xhci_dbc_tty_exit_port(port);
498 port->registered = false;
500 kfifo_free(&port->write_fifo);
501 xhci_dbc_free_requests(get_out_ep(xhci), &port->read_pool);
502 xhci_dbc_free_requests(get_out_ep(xhci), &port->read_queue);
503 xhci_dbc_free_requests(get_in_ep(xhci), &port->write_pool);