Merge branch 'spectre' of git://git.armlinux.org.uk/~rmk/linux-arm
[sfrench/cifs-2.6.git] / samples / vfio-mdev / mtty.c
1 /*
2  * Mediated virtual PCI serial host device driver
3  *
4  * Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
5  *     Author: Neo Jia <cjia@nvidia.com>
6  *             Kirti Wankhede <kwankhede@nvidia.com>
7  *
8  * This program is free software; you can redistribute it and/or modify
9  * it under the terms of the GNU General Public License version 2 as
10  * published by the Free Software Foundation.
11  *
12  * Sample driver that creates mdev device that simulates serial port over PCI
13  * card.
14  *
15  */
16
17 #include <linux/init.h>
18 #include <linux/module.h>
19 #include <linux/device.h>
20 #include <linux/kernel.h>
21 #include <linux/fs.h>
22 #include <linux/poll.h>
23 #include <linux/slab.h>
24 #include <linux/cdev.h>
25 #include <linux/sched.h>
26 #include <linux/wait.h>
27 #include <linux/uuid.h>
28 #include <linux/vfio.h>
29 #include <linux/iommu.h>
30 #include <linux/sysfs.h>
31 #include <linux/ctype.h>
32 #include <linux/file.h>
33 #include <linux/mdev.h>
34 #include <linux/pci.h>
35 #include <linux/serial.h>
36 #include <uapi/linux/serial_reg.h>
37 #include <linux/eventfd.h>
38 /*
39  * #defines
40  */
41
42 #define VERSION_STRING  "0.1"
43 #define DRIVER_AUTHOR   "NVIDIA Corporation"
44
45 #define MTTY_CLASS_NAME "mtty"
46
47 #define MTTY_NAME       "mtty"
48
49 #define MTTY_STRING_LEN         16
50
51 #define MTTY_CONFIG_SPACE_SIZE  0xff
52 #define MTTY_IO_BAR_SIZE        0x8
53 #define MTTY_MMIO_BAR_SIZE      0x100000
54
55 #define STORE_LE16(addr, val)   (*(u16 *)addr = val)
56 #define STORE_LE32(addr, val)   (*(u32 *)addr = val)
57
58 #define MAX_FIFO_SIZE   16
59
60 #define CIRCULAR_BUF_INC_IDX(idx)    (idx = (idx + 1) & (MAX_FIFO_SIZE - 1))
61
62 #define MTTY_VFIO_PCI_OFFSET_SHIFT   40
63
64 #define MTTY_VFIO_PCI_OFFSET_TO_INDEX(off)   (off >> MTTY_VFIO_PCI_OFFSET_SHIFT)
65 #define MTTY_VFIO_PCI_INDEX_TO_OFFSET(index) \
66                                 ((u64)(index) << MTTY_VFIO_PCI_OFFSET_SHIFT)
67 #define MTTY_VFIO_PCI_OFFSET_MASK    \
68                                 (((u64)(1) << MTTY_VFIO_PCI_OFFSET_SHIFT) - 1)
69 #define MAX_MTTYS       24
70
71 /*
72  * Global Structures
73  */
74
75 struct mtty_dev {
76         dev_t           vd_devt;
77         struct class    *vd_class;
78         struct cdev     vd_cdev;
79         struct idr      vd_idr;
80         struct device   dev;
81 } mtty_dev;
82
83 struct mdev_region_info {
84         u64 start;
85         u64 phys_start;
86         u32 size;
87         u64 vfio_offset;
88 };
89
90 #if defined(DEBUG_REGS)
91 const char *wr_reg[] = {
92         "TX",
93         "IER",
94         "FCR",
95         "LCR",
96         "MCR",
97         "LSR",
98         "MSR",
99         "SCR"
100 };
101
102 const char *rd_reg[] = {
103         "RX",
104         "IER",
105         "IIR",
106         "LCR",
107         "MCR",
108         "LSR",
109         "MSR",
110         "SCR"
111 };
112 #endif
113
114 /* loop back buffer */
115 struct rxtx {
116         u8 fifo[MAX_FIFO_SIZE];
117         u8 head, tail;
118         u8 count;
119 };
120
121 struct serial_port {
122         u8 uart_reg[8];         /* 8 registers */
123         struct rxtx rxtx;       /* loop back buffer */
124         bool dlab;
125         bool overrun;
126         u16 divisor;
127         u8 fcr;                 /* FIFO control register */
128         u8 max_fifo_size;
129         u8 intr_trigger_level;  /* interrupt trigger level */
130 };
131
132 /* State of each mdev device */
133 struct mdev_state {
134         int irq_fd;
135         struct eventfd_ctx *intx_evtfd;
136         struct eventfd_ctx *msi_evtfd;
137         int irq_index;
138         u8 *vconfig;
139         struct mutex ops_lock;
140         struct mdev_device *mdev;
141         struct mdev_region_info region_info[VFIO_PCI_NUM_REGIONS];
142         u32 bar_mask[VFIO_PCI_NUM_REGIONS];
143         struct list_head next;
144         struct serial_port s[2];
145         struct mutex rxtx_lock;
146         struct vfio_device_info dev_info;
147         int nr_ports;
148 };
149
150 struct mutex mdev_list_lock;
151 struct list_head mdev_devices_list;
152
153 static const struct file_operations vd_fops = {
154         .owner          = THIS_MODULE,
155 };
156
157 /* function prototypes */
158
159 static int mtty_trigger_interrupt(uuid_le uuid);
160
161 /* Helper functions */
162 static struct mdev_state *find_mdev_state_by_uuid(uuid_le uuid)
163 {
164         struct mdev_state *mds;
165
166         list_for_each_entry(mds, &mdev_devices_list, next) {
167                 if (uuid_le_cmp(mdev_uuid(mds->mdev), uuid) == 0)
168                         return mds;
169         }
170
171         return NULL;
172 }
173
174 void dump_buffer(char *buf, uint32_t count)
175 {
176 #if defined(DEBUG)
177         int i;
178
179         pr_info("Buffer:\n");
180         for (i = 0; i < count; i++) {
181                 pr_info("%2x ", *(buf + i));
182                 if ((i + 1) % 16 == 0)
183                         pr_info("\n");
184         }
185 #endif
186 }
187
188 static void mtty_create_config_space(struct mdev_state *mdev_state)
189 {
190         /* PCI dev ID */
191         STORE_LE32((u32 *) &mdev_state->vconfig[0x0], 0x32534348);
192
193         /* Control: I/O+, Mem-, BusMaster- */
194         STORE_LE16((u16 *) &mdev_state->vconfig[0x4], 0x0001);
195
196         /* Status: capabilities list absent */
197         STORE_LE16((u16 *) &mdev_state->vconfig[0x6], 0x0200);
198
199         /* Rev ID */
200         mdev_state->vconfig[0x8] =  0x10;
201
202         /* programming interface class : 16550-compatible serial controller */
203         mdev_state->vconfig[0x9] =  0x02;
204
205         /* Sub class : 00 */
206         mdev_state->vconfig[0xa] =  0x00;
207
208         /* Base class : Simple Communication controllers */
209         mdev_state->vconfig[0xb] =  0x07;
210
211         /* base address registers */
212         /* BAR0: IO space */
213         STORE_LE32((u32 *) &mdev_state->vconfig[0x10], 0x000001);
214         mdev_state->bar_mask[0] = ~(MTTY_IO_BAR_SIZE) + 1;
215
216         if (mdev_state->nr_ports == 2) {
217                 /* BAR1: IO space */
218                 STORE_LE32((u32 *) &mdev_state->vconfig[0x14], 0x000001);
219                 mdev_state->bar_mask[1] = ~(MTTY_IO_BAR_SIZE) + 1;
220         }
221
222         /* Subsystem ID */
223         STORE_LE32((u32 *) &mdev_state->vconfig[0x2c], 0x32534348);
224
225         mdev_state->vconfig[0x34] =  0x00;   /* Cap Ptr */
226         mdev_state->vconfig[0x3d] =  0x01;   /* interrupt pin (INTA#) */
227
228         /* Vendor specific data */
229         mdev_state->vconfig[0x40] =  0x23;
230         mdev_state->vconfig[0x43] =  0x80;
231         mdev_state->vconfig[0x44] =  0x23;
232         mdev_state->vconfig[0x48] =  0x23;
233         mdev_state->vconfig[0x4c] =  0x23;
234
235         mdev_state->vconfig[0x60] =  0x50;
236         mdev_state->vconfig[0x61] =  0x43;
237         mdev_state->vconfig[0x62] =  0x49;
238         mdev_state->vconfig[0x63] =  0x20;
239         mdev_state->vconfig[0x64] =  0x53;
240         mdev_state->vconfig[0x65] =  0x65;
241         mdev_state->vconfig[0x66] =  0x72;
242         mdev_state->vconfig[0x67] =  0x69;
243         mdev_state->vconfig[0x68] =  0x61;
244         mdev_state->vconfig[0x69] =  0x6c;
245         mdev_state->vconfig[0x6a] =  0x2f;
246         mdev_state->vconfig[0x6b] =  0x55;
247         mdev_state->vconfig[0x6c] =  0x41;
248         mdev_state->vconfig[0x6d] =  0x52;
249         mdev_state->vconfig[0x6e] =  0x54;
250 }
251
252 static void handle_pci_cfg_write(struct mdev_state *mdev_state, u16 offset,
253                                  char *buf, u32 count)
254 {
255         u32 cfg_addr, bar_mask, bar_index = 0;
256
257         switch (offset) {
258         case 0x04: /* device control */
259         case 0x06: /* device status */
260                 /* do nothing */
261                 break;
262         case 0x3c:  /* interrupt line */
263                 mdev_state->vconfig[0x3c] = buf[0];
264                 break;
265         case 0x3d:
266                 /*
267                  * Interrupt Pin is hardwired to INTA.
268                  * This field is write protected by hardware
269                  */
270                 break;
271         case 0x10:  /* BAR0 */
272         case 0x14:  /* BAR1 */
273                 if (offset == 0x10)
274                         bar_index = 0;
275                 else if (offset == 0x14)
276                         bar_index = 1;
277
278                 if ((mdev_state->nr_ports == 1) && (bar_index == 1)) {
279                         STORE_LE32(&mdev_state->vconfig[offset], 0);
280                         break;
281                 }
282
283                 cfg_addr = *(u32 *)buf;
284                 pr_info("BAR%d addr 0x%x\n", bar_index, cfg_addr);
285
286                 if (cfg_addr == 0xffffffff) {
287                         bar_mask = mdev_state->bar_mask[bar_index];
288                         cfg_addr = (cfg_addr & bar_mask);
289                 }
290
291                 cfg_addr |= (mdev_state->vconfig[offset] & 0x3ul);
292                 STORE_LE32(&mdev_state->vconfig[offset], cfg_addr);
293                 break;
294         case 0x18:  /* BAR2 */
295         case 0x1c:  /* BAR3 */
296         case 0x20:  /* BAR4 */
297                 STORE_LE32(&mdev_state->vconfig[offset], 0);
298                 break;
299         default:
300                 pr_info("PCI config write @0x%x of %d bytes not handled\n",
301                         offset, count);
302                 break;
303         }
304 }
305
306 static void handle_bar_write(unsigned int index, struct mdev_state *mdev_state,
307                                 u16 offset, char *buf, u32 count)
308 {
309         u8 data = *buf;
310
311         /* Handle data written by guest */
312         switch (offset) {
313         case UART_TX:
314                 /* if DLAB set, data is LSB of divisor */
315                 if (mdev_state->s[index].dlab) {
316                         mdev_state->s[index].divisor |= data;
317                         break;
318                 }
319
320                 mutex_lock(&mdev_state->rxtx_lock);
321
322                 /* save in TX buffer */
323                 if (mdev_state->s[index].rxtx.count <
324                                 mdev_state->s[index].max_fifo_size) {
325                         mdev_state->s[index].rxtx.fifo[
326                                         mdev_state->s[index].rxtx.head] = data;
327                         mdev_state->s[index].rxtx.count++;
328                         CIRCULAR_BUF_INC_IDX(mdev_state->s[index].rxtx.head);
329                         mdev_state->s[index].overrun = false;
330
331                         /*
332                          * Trigger interrupt if receive data interrupt is
333                          * enabled and fifo reached trigger level
334                          */
335                         if ((mdev_state->s[index].uart_reg[UART_IER] &
336                                                 UART_IER_RDI) &&
337                            (mdev_state->s[index].rxtx.count ==
338                                     mdev_state->s[index].intr_trigger_level)) {
339                                 /* trigger interrupt */
340 #if defined(DEBUG_INTR)
341                                 pr_err("Serial port %d: Fifo level trigger\n",
342                                         index);
343 #endif
344                                 mtty_trigger_interrupt(
345                                                 mdev_uuid(mdev_state->mdev));
346                         }
347                 } else {
348 #if defined(DEBUG_INTR)
349                         pr_err("Serial port %d: Buffer Overflow\n", index);
350 #endif
351                         mdev_state->s[index].overrun = true;
352
353                         /*
354                          * Trigger interrupt if receiver line status interrupt
355                          * is enabled
356                          */
357                         if (mdev_state->s[index].uart_reg[UART_IER] &
358                                                                 UART_IER_RLSI)
359                                 mtty_trigger_interrupt(
360                                                 mdev_uuid(mdev_state->mdev));
361                 }
362                 mutex_unlock(&mdev_state->rxtx_lock);
363                 break;
364
365         case UART_IER:
366                 /* if DLAB set, data is MSB of divisor */
367                 if (mdev_state->s[index].dlab)
368                         mdev_state->s[index].divisor |= (u16)data << 8;
369                 else {
370                         mdev_state->s[index].uart_reg[offset] = data;
371                         mutex_lock(&mdev_state->rxtx_lock);
372                         if ((data & UART_IER_THRI) &&
373                             (mdev_state->s[index].rxtx.head ==
374                                         mdev_state->s[index].rxtx.tail)) {
375 #if defined(DEBUG_INTR)
376                                 pr_err("Serial port %d: IER_THRI write\n",
377                                         index);
378 #endif
379                                 mtty_trigger_interrupt(
380                                                 mdev_uuid(mdev_state->mdev));
381                         }
382
383                         mutex_unlock(&mdev_state->rxtx_lock);
384                 }
385
386                 break;
387
388         case UART_FCR:
389                 mdev_state->s[index].fcr = data;
390
391                 mutex_lock(&mdev_state->rxtx_lock);
392                 if (data & (UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT)) {
393                         /* clear loop back FIFO */
394                         mdev_state->s[index].rxtx.count = 0;
395                         mdev_state->s[index].rxtx.head = 0;
396                         mdev_state->s[index].rxtx.tail = 0;
397                 }
398                 mutex_unlock(&mdev_state->rxtx_lock);
399
400                 switch (data & UART_FCR_TRIGGER_MASK) {
401                 case UART_FCR_TRIGGER_1:
402                         mdev_state->s[index].intr_trigger_level = 1;
403                         break;
404
405                 case UART_FCR_TRIGGER_4:
406                         mdev_state->s[index].intr_trigger_level = 4;
407                         break;
408
409                 case UART_FCR_TRIGGER_8:
410                         mdev_state->s[index].intr_trigger_level = 8;
411                         break;
412
413                 case UART_FCR_TRIGGER_14:
414                         mdev_state->s[index].intr_trigger_level = 14;
415                         break;
416                 }
417
418                 /*
419                  * Set trigger level to 1 otherwise or  implement timer with
420                  * timeout of 4 characters and on expiring that timer set
421                  * Recevice data timeout in IIR register
422                  */
423                 mdev_state->s[index].intr_trigger_level = 1;
424                 if (data & UART_FCR_ENABLE_FIFO)
425                         mdev_state->s[index].max_fifo_size = MAX_FIFO_SIZE;
426                 else {
427                         mdev_state->s[index].max_fifo_size = 1;
428                         mdev_state->s[index].intr_trigger_level = 1;
429                 }
430
431                 break;
432
433         case UART_LCR:
434                 if (data & UART_LCR_DLAB) {
435                         mdev_state->s[index].dlab = true;
436                         mdev_state->s[index].divisor = 0;
437                 } else
438                         mdev_state->s[index].dlab = false;
439
440                 mdev_state->s[index].uart_reg[offset] = data;
441                 break;
442
443         case UART_MCR:
444                 mdev_state->s[index].uart_reg[offset] = data;
445
446                 if ((mdev_state->s[index].uart_reg[UART_IER] & UART_IER_MSI) &&
447                                 (data & UART_MCR_OUT2)) {
448 #if defined(DEBUG_INTR)
449                         pr_err("Serial port %d: MCR_OUT2 write\n", index);
450 #endif
451                         mtty_trigger_interrupt(mdev_uuid(mdev_state->mdev));
452                 }
453
454                 if ((mdev_state->s[index].uart_reg[UART_IER] & UART_IER_MSI) &&
455                                 (data & (UART_MCR_RTS | UART_MCR_DTR))) {
456 #if defined(DEBUG_INTR)
457                         pr_err("Serial port %d: MCR RTS/DTR write\n", index);
458 #endif
459                         mtty_trigger_interrupt(mdev_uuid(mdev_state->mdev));
460                 }
461                 break;
462
463         case UART_LSR:
464         case UART_MSR:
465                 /* do nothing */
466                 break;
467
468         case UART_SCR:
469                 mdev_state->s[index].uart_reg[offset] = data;
470                 break;
471
472         default:
473                 break;
474         }
475 }
476
477 static void handle_bar_read(unsigned int index, struct mdev_state *mdev_state,
478                             u16 offset, char *buf, u32 count)
479 {
480         /* Handle read requests by guest */
481         switch (offset) {
482         case UART_RX:
483                 /* if DLAB set, data is LSB of divisor */
484                 if (mdev_state->s[index].dlab) {
485                         *buf  = (u8)mdev_state->s[index].divisor;
486                         break;
487                 }
488
489                 mutex_lock(&mdev_state->rxtx_lock);
490                 /* return data in tx buffer */
491                 if (mdev_state->s[index].rxtx.head !=
492                                  mdev_state->s[index].rxtx.tail) {
493                         *buf = mdev_state->s[index].rxtx.fifo[
494                                                 mdev_state->s[index].rxtx.tail];
495                         mdev_state->s[index].rxtx.count--;
496                         CIRCULAR_BUF_INC_IDX(mdev_state->s[index].rxtx.tail);
497                 }
498
499                 if (mdev_state->s[index].rxtx.head ==
500                                 mdev_state->s[index].rxtx.tail) {
501                 /*
502                  *  Trigger interrupt if tx buffer empty interrupt is
503                  *  enabled and fifo is empty
504                  */
505 #if defined(DEBUG_INTR)
506                         pr_err("Serial port %d: Buffer Empty\n", index);
507 #endif
508                         if (mdev_state->s[index].uart_reg[UART_IER] &
509                                                          UART_IER_THRI)
510                                 mtty_trigger_interrupt(
511                                         mdev_uuid(mdev_state->mdev));
512                 }
513                 mutex_unlock(&mdev_state->rxtx_lock);
514
515                 break;
516
517         case UART_IER:
518                 if (mdev_state->s[index].dlab) {
519                         *buf = (u8)(mdev_state->s[index].divisor >> 8);
520                         break;
521                 }
522                 *buf = mdev_state->s[index].uart_reg[offset] & 0x0f;
523                 break;
524
525         case UART_IIR:
526         {
527                 u8 ier = mdev_state->s[index].uart_reg[UART_IER];
528                 *buf = 0;
529
530                 mutex_lock(&mdev_state->rxtx_lock);
531                 /* Interrupt priority 1: Parity, overrun, framing or break */
532                 if ((ier & UART_IER_RLSI) && mdev_state->s[index].overrun)
533                         *buf |= UART_IIR_RLSI;
534
535                 /* Interrupt priority 2: Fifo trigger level reached */
536                 if ((ier & UART_IER_RDI) &&
537                     (mdev_state->s[index].rxtx.count >=
538                       mdev_state->s[index].intr_trigger_level))
539                         *buf |= UART_IIR_RDI;
540
541                 /* Interrupt priotiry 3: transmitter holding register empty */
542                 if ((ier & UART_IER_THRI) &&
543                     (mdev_state->s[index].rxtx.head ==
544                                 mdev_state->s[index].rxtx.tail))
545                         *buf |= UART_IIR_THRI;
546
547                 /* Interrupt priotiry 4: Modem status: CTS, DSR, RI or DCD  */
548                 if ((ier & UART_IER_MSI) &&
549                     (mdev_state->s[index].uart_reg[UART_MCR] &
550                                  (UART_MCR_RTS | UART_MCR_DTR)))
551                         *buf |= UART_IIR_MSI;
552
553                 /* bit0: 0=> interrupt pending, 1=> no interrupt is pending */
554                 if (*buf == 0)
555                         *buf = UART_IIR_NO_INT;
556
557                 /* set bit 6 & 7 to be 16550 compatible */
558                 *buf |= 0xC0;
559                 mutex_unlock(&mdev_state->rxtx_lock);
560         }
561         break;
562
563         case UART_LCR:
564         case UART_MCR:
565                 *buf = mdev_state->s[index].uart_reg[offset];
566                 break;
567
568         case UART_LSR:
569         {
570                 u8 lsr = 0;
571
572                 mutex_lock(&mdev_state->rxtx_lock);
573                 /* atleast one char in FIFO */
574                 if (mdev_state->s[index].rxtx.head !=
575                                  mdev_state->s[index].rxtx.tail)
576                         lsr |= UART_LSR_DR;
577
578                 /* if FIFO overrun */
579                 if (mdev_state->s[index].overrun)
580                         lsr |= UART_LSR_OE;
581
582                 /* transmit FIFO empty and tramsitter empty */
583                 if (mdev_state->s[index].rxtx.head ==
584                                  mdev_state->s[index].rxtx.tail)
585                         lsr |= UART_LSR_TEMT | UART_LSR_THRE;
586
587                 mutex_unlock(&mdev_state->rxtx_lock);
588                 *buf = lsr;
589                 break;
590         }
591         case UART_MSR:
592                 *buf = UART_MSR_DSR | UART_MSR_DDSR | UART_MSR_DCD;
593
594                 mutex_lock(&mdev_state->rxtx_lock);
595                 /* if AFE is 1 and FIFO have space, set CTS bit */
596                 if (mdev_state->s[index].uart_reg[UART_MCR] &
597                                                  UART_MCR_AFE) {
598                         if (mdev_state->s[index].rxtx.count <
599                                         mdev_state->s[index].max_fifo_size)
600                                 *buf |= UART_MSR_CTS | UART_MSR_DCTS;
601                 } else
602                         *buf |= UART_MSR_CTS | UART_MSR_DCTS;
603                 mutex_unlock(&mdev_state->rxtx_lock);
604
605                 break;
606
607         case UART_SCR:
608                 *buf = mdev_state->s[index].uart_reg[offset];
609                 break;
610
611         default:
612                 break;
613         }
614 }
615
616 static void mdev_read_base(struct mdev_state *mdev_state)
617 {
618         int index, pos;
619         u32 start_lo, start_hi;
620         u32 mem_type;
621
622         pos = PCI_BASE_ADDRESS_0;
623
624         for (index = 0; index <= VFIO_PCI_BAR5_REGION_INDEX; index++) {
625
626                 if (!mdev_state->region_info[index].size)
627                         continue;
628
629                 start_lo = (*(u32 *)(mdev_state->vconfig + pos)) &
630                         PCI_BASE_ADDRESS_MEM_MASK;
631                 mem_type = (*(u32 *)(mdev_state->vconfig + pos)) &
632                         PCI_BASE_ADDRESS_MEM_TYPE_MASK;
633
634                 switch (mem_type) {
635                 case PCI_BASE_ADDRESS_MEM_TYPE_64:
636                         start_hi = (*(u32 *)(mdev_state->vconfig + pos + 4));
637                         pos += 4;
638                         break;
639                 case PCI_BASE_ADDRESS_MEM_TYPE_32:
640                 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
641                         /* 1M mem BAR treated as 32-bit BAR */
642                 default:
643                         /* mem unknown type treated as 32-bit BAR */
644                         start_hi = 0;
645                         break;
646                 }
647                 pos += 4;
648                 mdev_state->region_info[index].start = ((u64)start_hi << 32) |
649                                                         start_lo;
650         }
651 }
652
653 static ssize_t mdev_access(struct mdev_device *mdev, char *buf, size_t count,
654                            loff_t pos, bool is_write)
655 {
656         struct mdev_state *mdev_state;
657         unsigned int index;
658         loff_t offset;
659         int ret = 0;
660
661         if (!mdev || !buf)
662                 return -EINVAL;
663
664         mdev_state = mdev_get_drvdata(mdev);
665         if (!mdev_state) {
666                 pr_err("%s mdev_state not found\n", __func__);
667                 return -EINVAL;
668         }
669
670         mutex_lock(&mdev_state->ops_lock);
671
672         index = MTTY_VFIO_PCI_OFFSET_TO_INDEX(pos);
673         offset = pos & MTTY_VFIO_PCI_OFFSET_MASK;
674         switch (index) {
675         case VFIO_PCI_CONFIG_REGION_INDEX:
676
677 #if defined(DEBUG)
678                 pr_info("%s: PCI config space %s at offset 0x%llx\n",
679                          __func__, is_write ? "write" : "read", offset);
680 #endif
681                 if (is_write) {
682                         dump_buffer(buf, count);
683                         handle_pci_cfg_write(mdev_state, offset, buf, count);
684                 } else {
685                         memcpy(buf, (mdev_state->vconfig + offset), count);
686                         dump_buffer(buf, count);
687                 }
688
689                 break;
690
691         case VFIO_PCI_BAR0_REGION_INDEX ... VFIO_PCI_BAR5_REGION_INDEX:
692                 if (!mdev_state->region_info[index].start)
693                         mdev_read_base(mdev_state);
694
695                 if (is_write) {
696                         dump_buffer(buf, count);
697
698 #if defined(DEBUG_REGS)
699                         pr_info("%s: BAR%d  WR @0x%llx %s val:0x%02x dlab:%d\n",
700                                 __func__, index, offset, wr_reg[offset],
701                                 (u8)*buf, mdev_state->s[index].dlab);
702 #endif
703                         handle_bar_write(index, mdev_state, offset, buf, count);
704                 } else {
705                         handle_bar_read(index, mdev_state, offset, buf, count);
706                         dump_buffer(buf, count);
707
708 #if defined(DEBUG_REGS)
709                         pr_info("%s: BAR%d  RD @0x%llx %s val:0x%02x dlab:%d\n",
710                                 __func__, index, offset, rd_reg[offset],
711                                 (u8)*buf, mdev_state->s[index].dlab);
712 #endif
713                 }
714                 break;
715
716         default:
717                 ret = -1;
718                 goto accessfailed;
719         }
720
721         ret = count;
722
723
724 accessfailed:
725         mutex_unlock(&mdev_state->ops_lock);
726
727         return ret;
728 }
729
730 int mtty_create(struct kobject *kobj, struct mdev_device *mdev)
731 {
732         struct mdev_state *mdev_state;
733         char name[MTTY_STRING_LEN];
734         int nr_ports = 0, i;
735
736         if (!mdev)
737                 return -EINVAL;
738
739         for (i = 0; i < 2; i++) {
740                 snprintf(name, MTTY_STRING_LEN, "%s-%d",
741                         dev_driver_string(mdev_parent_dev(mdev)), i + 1);
742                 if (!strcmp(kobj->name, name)) {
743                         nr_ports = i + 1;
744                         break;
745                 }
746         }
747
748         if (!nr_ports)
749                 return -EINVAL;
750
751         mdev_state = kzalloc(sizeof(struct mdev_state), GFP_KERNEL);
752         if (mdev_state == NULL)
753                 return -ENOMEM;
754
755         mdev_state->nr_ports = nr_ports;
756         mdev_state->irq_index = -1;
757         mdev_state->s[0].max_fifo_size = MAX_FIFO_SIZE;
758         mdev_state->s[1].max_fifo_size = MAX_FIFO_SIZE;
759         mutex_init(&mdev_state->rxtx_lock);
760         mdev_state->vconfig = kzalloc(MTTY_CONFIG_SPACE_SIZE, GFP_KERNEL);
761
762         if (mdev_state->vconfig == NULL) {
763                 kfree(mdev_state);
764                 return -ENOMEM;
765         }
766
767         mutex_init(&mdev_state->ops_lock);
768         mdev_state->mdev = mdev;
769         mdev_set_drvdata(mdev, mdev_state);
770
771         mtty_create_config_space(mdev_state);
772
773         mutex_lock(&mdev_list_lock);
774         list_add(&mdev_state->next, &mdev_devices_list);
775         mutex_unlock(&mdev_list_lock);
776
777         return 0;
778 }
779
780 int mtty_remove(struct mdev_device *mdev)
781 {
782         struct mdev_state *mds, *tmp_mds;
783         struct mdev_state *mdev_state = mdev_get_drvdata(mdev);
784         int ret = -EINVAL;
785
786         mutex_lock(&mdev_list_lock);
787         list_for_each_entry_safe(mds, tmp_mds, &mdev_devices_list, next) {
788                 if (mdev_state == mds) {
789                         list_del(&mdev_state->next);
790                         mdev_set_drvdata(mdev, NULL);
791                         kfree(mdev_state->vconfig);
792                         kfree(mdev_state);
793                         ret = 0;
794                         break;
795                 }
796         }
797         mutex_unlock(&mdev_list_lock);
798
799         return ret;
800 }
801
802 int mtty_reset(struct mdev_device *mdev)
803 {
804         struct mdev_state *mdev_state;
805
806         if (!mdev)
807                 return -EINVAL;
808
809         mdev_state = mdev_get_drvdata(mdev);
810         if (!mdev_state)
811                 return -EINVAL;
812
813         pr_info("%s: called\n", __func__);
814
815         return 0;
816 }
817
818 ssize_t mtty_read(struct mdev_device *mdev, char __user *buf, size_t count,
819                   loff_t *ppos)
820 {
821         unsigned int done = 0;
822         int ret;
823
824         while (count) {
825                 size_t filled;
826
827                 if (count >= 4 && !(*ppos % 4)) {
828                         u32 val;
829
830                         ret =  mdev_access(mdev, (char *)&val, sizeof(val),
831                                            *ppos, false);
832                         if (ret <= 0)
833                                 goto read_err;
834
835                         if (copy_to_user(buf, &val, sizeof(val)))
836                                 goto read_err;
837
838                         filled = 4;
839                 } else if (count >= 2 && !(*ppos % 2)) {
840                         u16 val;
841
842                         ret = mdev_access(mdev, (char *)&val, sizeof(val),
843                                           *ppos, false);
844                         if (ret <= 0)
845                                 goto read_err;
846
847                         if (copy_to_user(buf, &val, sizeof(val)))
848                                 goto read_err;
849
850                         filled = 2;
851                 } else {
852                         u8 val;
853
854                         ret = mdev_access(mdev, (char *)&val, sizeof(val),
855                                           *ppos, false);
856                         if (ret <= 0)
857                                 goto read_err;
858
859                         if (copy_to_user(buf, &val, sizeof(val)))
860                                 goto read_err;
861
862                         filled = 1;
863                 }
864
865                 count -= filled;
866                 done += filled;
867                 *ppos += filled;
868                 buf += filled;
869         }
870
871         return done;
872
873 read_err:
874         return -EFAULT;
875 }
876
877 ssize_t mtty_write(struct mdev_device *mdev, const char __user *buf,
878                    size_t count, loff_t *ppos)
879 {
880         unsigned int done = 0;
881         int ret;
882
883         while (count) {
884                 size_t filled;
885
886                 if (count >= 4 && !(*ppos % 4)) {
887                         u32 val;
888
889                         if (copy_from_user(&val, buf, sizeof(val)))
890                                 goto write_err;
891
892                         ret = mdev_access(mdev, (char *)&val, sizeof(val),
893                                           *ppos, true);
894                         if (ret <= 0)
895                                 goto write_err;
896
897                         filled = 4;
898                 } else if (count >= 2 && !(*ppos % 2)) {
899                         u16 val;
900
901                         if (copy_from_user(&val, buf, sizeof(val)))
902                                 goto write_err;
903
904                         ret = mdev_access(mdev, (char *)&val, sizeof(val),
905                                           *ppos, true);
906                         if (ret <= 0)
907                                 goto write_err;
908
909                         filled = 2;
910                 } else {
911                         u8 val;
912
913                         if (copy_from_user(&val, buf, sizeof(val)))
914                                 goto write_err;
915
916                         ret = mdev_access(mdev, (char *)&val, sizeof(val),
917                                           *ppos, true);
918                         if (ret <= 0)
919                                 goto write_err;
920
921                         filled = 1;
922                 }
923                 count -= filled;
924                 done += filled;
925                 *ppos += filled;
926                 buf += filled;
927         }
928
929         return done;
930 write_err:
931         return -EFAULT;
932 }
933
934 static int mtty_set_irqs(struct mdev_device *mdev, uint32_t flags,
935                          unsigned int index, unsigned int start,
936                          unsigned int count, void *data)
937 {
938         int ret = 0;
939         struct mdev_state *mdev_state;
940
941         if (!mdev)
942                 return -EINVAL;
943
944         mdev_state = mdev_get_drvdata(mdev);
945         if (!mdev_state)
946                 return -EINVAL;
947
948         mutex_lock(&mdev_state->ops_lock);
949         switch (index) {
950         case VFIO_PCI_INTX_IRQ_INDEX:
951                 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
952                 case VFIO_IRQ_SET_ACTION_MASK:
953                 case VFIO_IRQ_SET_ACTION_UNMASK:
954                         break;
955                 case VFIO_IRQ_SET_ACTION_TRIGGER:
956                 {
957                         if (flags & VFIO_IRQ_SET_DATA_NONE) {
958                                 pr_info("%s: disable INTx\n", __func__);
959                                 if (mdev_state->intx_evtfd)
960                                         eventfd_ctx_put(mdev_state->intx_evtfd);
961                                 break;
962                         }
963
964                         if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
965                                 int fd = *(int *)data;
966
967                                 if (fd > 0) {
968                                         struct eventfd_ctx *evt;
969
970                                         evt = eventfd_ctx_fdget(fd);
971                                         if (IS_ERR(evt)) {
972                                                 ret = PTR_ERR(evt);
973                                                 break;
974                                         }
975                                         mdev_state->intx_evtfd = evt;
976                                         mdev_state->irq_fd = fd;
977                                         mdev_state->irq_index = index;
978                                         break;
979                                 }
980                         }
981                         break;
982                 }
983                 }
984                 break;
985         case VFIO_PCI_MSI_IRQ_INDEX:
986                 switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) {
987                 case VFIO_IRQ_SET_ACTION_MASK:
988                 case VFIO_IRQ_SET_ACTION_UNMASK:
989                         break;
990                 case VFIO_IRQ_SET_ACTION_TRIGGER:
991                         if (flags & VFIO_IRQ_SET_DATA_NONE) {
992                                 if (mdev_state->msi_evtfd)
993                                         eventfd_ctx_put(mdev_state->msi_evtfd);
994                                 pr_info("%s: disable MSI\n", __func__);
995                                 mdev_state->irq_index = VFIO_PCI_INTX_IRQ_INDEX;
996                                 break;
997                         }
998                         if (flags & VFIO_IRQ_SET_DATA_EVENTFD) {
999                                 int fd = *(int *)data;
1000                                 struct eventfd_ctx *evt;
1001
1002                                 if (fd <= 0)
1003                                         break;
1004
1005                                 if (mdev_state->msi_evtfd)
1006                                         break;
1007
1008                                 evt = eventfd_ctx_fdget(fd);
1009                                 if (IS_ERR(evt)) {
1010                                         ret = PTR_ERR(evt);
1011                                         break;
1012                                 }
1013                                 mdev_state->msi_evtfd = evt;
1014                                 mdev_state->irq_fd = fd;
1015                                 mdev_state->irq_index = index;
1016                         }
1017                         break;
1018         }
1019         break;
1020         case VFIO_PCI_MSIX_IRQ_INDEX:
1021                 pr_info("%s: MSIX_IRQ\n", __func__);
1022                 break;
1023         case VFIO_PCI_ERR_IRQ_INDEX:
1024                 pr_info("%s: ERR_IRQ\n", __func__);
1025                 break;
1026         case VFIO_PCI_REQ_IRQ_INDEX:
1027                 pr_info("%s: REQ_IRQ\n", __func__);
1028                 break;
1029         }
1030
1031         mutex_unlock(&mdev_state->ops_lock);
1032         return ret;
1033 }
1034
1035 static int mtty_trigger_interrupt(uuid_le uuid)
1036 {
1037         int ret = -1;
1038         struct mdev_state *mdev_state;
1039
1040         mdev_state = find_mdev_state_by_uuid(uuid);
1041
1042         if (!mdev_state) {
1043                 pr_info("%s: mdev not found\n", __func__);
1044                 return -EINVAL;
1045         }
1046
1047         if ((mdev_state->irq_index == VFIO_PCI_MSI_IRQ_INDEX) &&
1048             (!mdev_state->msi_evtfd))
1049                 return -EINVAL;
1050         else if ((mdev_state->irq_index == VFIO_PCI_INTX_IRQ_INDEX) &&
1051                  (!mdev_state->intx_evtfd)) {
1052                 pr_info("%s: Intr eventfd not found\n", __func__);
1053                 return -EINVAL;
1054         }
1055
1056         if (mdev_state->irq_index == VFIO_PCI_MSI_IRQ_INDEX)
1057                 ret = eventfd_signal(mdev_state->msi_evtfd, 1);
1058         else
1059                 ret = eventfd_signal(mdev_state->intx_evtfd, 1);
1060
1061 #if defined(DEBUG_INTR)
1062         pr_info("Intx triggered\n");
1063 #endif
1064         if (ret != 1)
1065                 pr_err("%s: eventfd signal failed (%d)\n", __func__, ret);
1066
1067         return ret;
1068 }
1069
1070 int mtty_get_region_info(struct mdev_device *mdev,
1071                          struct vfio_region_info *region_info,
1072                          u16 *cap_type_id, void **cap_type)
1073 {
1074         unsigned int size = 0;
1075         struct mdev_state *mdev_state;
1076         u32 bar_index;
1077
1078         if (!mdev)
1079                 return -EINVAL;
1080
1081         mdev_state = mdev_get_drvdata(mdev);
1082         if (!mdev_state)
1083                 return -EINVAL;
1084
1085         bar_index = region_info->index;
1086         if (bar_index >= VFIO_PCI_NUM_REGIONS)
1087                 return -EINVAL;
1088
1089         mutex_lock(&mdev_state->ops_lock);
1090
1091         switch (bar_index) {
1092         case VFIO_PCI_CONFIG_REGION_INDEX:
1093                 size = MTTY_CONFIG_SPACE_SIZE;
1094                 break;
1095         case VFIO_PCI_BAR0_REGION_INDEX:
1096                 size = MTTY_IO_BAR_SIZE;
1097                 break;
1098         case VFIO_PCI_BAR1_REGION_INDEX:
1099                 if (mdev_state->nr_ports == 2)
1100                         size = MTTY_IO_BAR_SIZE;
1101                 break;
1102         default:
1103                 size = 0;
1104                 break;
1105         }
1106
1107         mdev_state->region_info[bar_index].size = size;
1108         mdev_state->region_info[bar_index].vfio_offset =
1109                 MTTY_VFIO_PCI_INDEX_TO_OFFSET(bar_index);
1110
1111         region_info->size = size;
1112         region_info->offset = MTTY_VFIO_PCI_INDEX_TO_OFFSET(bar_index);
1113         region_info->flags = VFIO_REGION_INFO_FLAG_READ |
1114                 VFIO_REGION_INFO_FLAG_WRITE;
1115         mutex_unlock(&mdev_state->ops_lock);
1116         return 0;
1117 }
1118
1119 int mtty_get_irq_info(struct mdev_device *mdev, struct vfio_irq_info *irq_info)
1120 {
1121         switch (irq_info->index) {
1122         case VFIO_PCI_INTX_IRQ_INDEX:
1123         case VFIO_PCI_MSI_IRQ_INDEX:
1124         case VFIO_PCI_REQ_IRQ_INDEX:
1125                 break;
1126
1127         default:
1128                 return -EINVAL;
1129         }
1130
1131         irq_info->flags = VFIO_IRQ_INFO_EVENTFD;
1132         irq_info->count = 1;
1133
1134         if (irq_info->index == VFIO_PCI_INTX_IRQ_INDEX)
1135                 irq_info->flags |= (VFIO_IRQ_INFO_MASKABLE |
1136                                 VFIO_IRQ_INFO_AUTOMASKED);
1137         else
1138                 irq_info->flags |= VFIO_IRQ_INFO_NORESIZE;
1139
1140         return 0;
1141 }
1142
1143 int mtty_get_device_info(struct mdev_device *mdev,
1144                          struct vfio_device_info *dev_info)
1145 {
1146         dev_info->flags = VFIO_DEVICE_FLAGS_PCI;
1147         dev_info->num_regions = VFIO_PCI_NUM_REGIONS;
1148         dev_info->num_irqs = VFIO_PCI_NUM_IRQS;
1149
1150         return 0;
1151 }
1152
1153 static long mtty_ioctl(struct mdev_device *mdev, unsigned int cmd,
1154                         unsigned long arg)
1155 {
1156         int ret = 0;
1157         unsigned long minsz;
1158         struct mdev_state *mdev_state;
1159
1160         if (!mdev)
1161                 return -EINVAL;
1162
1163         mdev_state = mdev_get_drvdata(mdev);
1164         if (!mdev_state)
1165                 return -ENODEV;
1166
1167         switch (cmd) {
1168         case VFIO_DEVICE_GET_INFO:
1169         {
1170                 struct vfio_device_info info;
1171
1172                 minsz = offsetofend(struct vfio_device_info, num_irqs);
1173
1174                 if (copy_from_user(&info, (void __user *)arg, minsz))
1175                         return -EFAULT;
1176
1177                 if (info.argsz < minsz)
1178                         return -EINVAL;
1179
1180                 ret = mtty_get_device_info(mdev, &info);
1181                 if (ret)
1182                         return ret;
1183
1184                 memcpy(&mdev_state->dev_info, &info, sizeof(info));
1185
1186                 if (copy_to_user((void __user *)arg, &info, minsz))
1187                         return -EFAULT;
1188
1189                 return 0;
1190         }
1191         case VFIO_DEVICE_GET_REGION_INFO:
1192         {
1193                 struct vfio_region_info info;
1194                 u16 cap_type_id = 0;
1195                 void *cap_type = NULL;
1196
1197                 minsz = offsetofend(struct vfio_region_info, offset);
1198
1199                 if (copy_from_user(&info, (void __user *)arg, minsz))
1200                         return -EFAULT;
1201
1202                 if (info.argsz < minsz)
1203                         return -EINVAL;
1204
1205                 ret = mtty_get_region_info(mdev, &info, &cap_type_id,
1206                                            &cap_type);
1207                 if (ret)
1208                         return ret;
1209
1210                 if (copy_to_user((void __user *)arg, &info, minsz))
1211                         return -EFAULT;
1212
1213                 return 0;
1214         }
1215
1216         case VFIO_DEVICE_GET_IRQ_INFO:
1217         {
1218                 struct vfio_irq_info info;
1219
1220                 minsz = offsetofend(struct vfio_irq_info, count);
1221
1222                 if (copy_from_user(&info, (void __user *)arg, minsz))
1223                         return -EFAULT;
1224
1225                 if ((info.argsz < minsz) ||
1226                     (info.index >= mdev_state->dev_info.num_irqs))
1227                         return -EINVAL;
1228
1229                 ret = mtty_get_irq_info(mdev, &info);
1230                 if (ret)
1231                         return ret;
1232
1233                 if (copy_to_user((void __user *)arg, &info, minsz))
1234                         return -EFAULT;
1235
1236                 return 0;
1237         }
1238         case VFIO_DEVICE_SET_IRQS:
1239         {
1240                 struct vfio_irq_set hdr;
1241                 u8 *data = NULL, *ptr = NULL;
1242                 size_t data_size = 0;
1243
1244                 minsz = offsetofend(struct vfio_irq_set, count);
1245
1246                 if (copy_from_user(&hdr, (void __user *)arg, minsz))
1247                         return -EFAULT;
1248
1249                 ret = vfio_set_irqs_validate_and_prepare(&hdr,
1250                                                 mdev_state->dev_info.num_irqs,
1251                                                 VFIO_PCI_NUM_IRQS,
1252                                                 &data_size);
1253                 if (ret)
1254                         return ret;
1255
1256                 if (data_size) {
1257                         ptr = data = memdup_user((void __user *)(arg + minsz),
1258                                                  data_size);
1259                         if (IS_ERR(data))
1260                                 return PTR_ERR(data);
1261                 }
1262
1263                 ret = mtty_set_irqs(mdev, hdr.flags, hdr.index, hdr.start,
1264                                     hdr.count, data);
1265
1266                 kfree(ptr);
1267                 return ret;
1268         }
1269         case VFIO_DEVICE_RESET:
1270                 return mtty_reset(mdev);
1271         }
1272         return -ENOTTY;
1273 }
1274
1275 int mtty_open(struct mdev_device *mdev)
1276 {
1277         pr_info("%s\n", __func__);
1278         return 0;
1279 }
1280
1281 void mtty_close(struct mdev_device *mdev)
1282 {
1283         pr_info("%s\n", __func__);
1284 }
1285
1286 static ssize_t
1287 sample_mtty_dev_show(struct device *dev, struct device_attribute *attr,
1288                      char *buf)
1289 {
1290         return sprintf(buf, "This is phy device\n");
1291 }
1292
1293 static DEVICE_ATTR_RO(sample_mtty_dev);
1294
1295 static struct attribute *mtty_dev_attrs[] = {
1296         &dev_attr_sample_mtty_dev.attr,
1297         NULL,
1298 };
1299
1300 static const struct attribute_group mtty_dev_group = {
1301         .name  = "mtty_dev",
1302         .attrs = mtty_dev_attrs,
1303 };
1304
1305 const struct attribute_group *mtty_dev_groups[] = {
1306         &mtty_dev_group,
1307         NULL,
1308 };
1309
1310 static ssize_t
1311 sample_mdev_dev_show(struct device *dev, struct device_attribute *attr,
1312                      char *buf)
1313 {
1314         if (mdev_from_dev(dev))
1315                 return sprintf(buf, "This is MDEV %s\n", dev_name(dev));
1316
1317         return sprintf(buf, "\n");
1318 }
1319
1320 static DEVICE_ATTR_RO(sample_mdev_dev);
1321
1322 static struct attribute *mdev_dev_attrs[] = {
1323         &dev_attr_sample_mdev_dev.attr,
1324         NULL,
1325 };
1326
1327 static const struct attribute_group mdev_dev_group = {
1328         .name  = "vendor",
1329         .attrs = mdev_dev_attrs,
1330 };
1331
1332 const struct attribute_group *mdev_dev_groups[] = {
1333         &mdev_dev_group,
1334         NULL,
1335 };
1336
1337 static ssize_t
1338 name_show(struct kobject *kobj, struct device *dev, char *buf)
1339 {
1340         char name[MTTY_STRING_LEN];
1341         int i;
1342         const char *name_str[2] = {"Single port serial", "Dual port serial"};
1343
1344         for (i = 0; i < 2; i++) {
1345                 snprintf(name, MTTY_STRING_LEN, "%s-%d",
1346                          dev_driver_string(dev), i + 1);
1347                 if (!strcmp(kobj->name, name))
1348                         return sprintf(buf, "%s\n", name_str[i]);
1349         }
1350
1351         return -EINVAL;
1352 }
1353
1354 MDEV_TYPE_ATTR_RO(name);
1355
1356 static ssize_t
1357 available_instances_show(struct kobject *kobj, struct device *dev, char *buf)
1358 {
1359         char name[MTTY_STRING_LEN];
1360         int i;
1361         struct mdev_state *mds;
1362         int ports = 0, used = 0;
1363
1364         for (i = 0; i < 2; i++) {
1365                 snprintf(name, MTTY_STRING_LEN, "%s-%d",
1366                          dev_driver_string(dev), i + 1);
1367                 if (!strcmp(kobj->name, name)) {
1368                         ports = i + 1;
1369                         break;
1370                 }
1371         }
1372
1373         if (!ports)
1374                 return -EINVAL;
1375
1376         list_for_each_entry(mds, &mdev_devices_list, next)
1377                 used += mds->nr_ports;
1378
1379         return sprintf(buf, "%d\n", (MAX_MTTYS - used)/ports);
1380 }
1381
1382 MDEV_TYPE_ATTR_RO(available_instances);
1383
1384
1385 static ssize_t device_api_show(struct kobject *kobj, struct device *dev,
1386                                char *buf)
1387 {
1388         return sprintf(buf, "%s\n", VFIO_DEVICE_API_PCI_STRING);
1389 }
1390
1391 MDEV_TYPE_ATTR_RO(device_api);
1392
1393 static struct attribute *mdev_types_attrs[] = {
1394         &mdev_type_attr_name.attr,
1395         &mdev_type_attr_device_api.attr,
1396         &mdev_type_attr_available_instances.attr,
1397         NULL,
1398 };
1399
1400 static struct attribute_group mdev_type_group1 = {
1401         .name  = "1",
1402         .attrs = mdev_types_attrs,
1403 };
1404
1405 static struct attribute_group mdev_type_group2 = {
1406         .name  = "2",
1407         .attrs = mdev_types_attrs,
1408 };
1409
1410 struct attribute_group *mdev_type_groups[] = {
1411         &mdev_type_group1,
1412         &mdev_type_group2,
1413         NULL,
1414 };
1415
1416 static const struct mdev_parent_ops mdev_fops = {
1417         .owner                  = THIS_MODULE,
1418         .dev_attr_groups        = mtty_dev_groups,
1419         .mdev_attr_groups       = mdev_dev_groups,
1420         .supported_type_groups  = mdev_type_groups,
1421         .create                 = mtty_create,
1422         .remove                 = mtty_remove,
1423         .open                   = mtty_open,
1424         .release                = mtty_close,
1425         .read                   = mtty_read,
1426         .write                  = mtty_write,
1427         .ioctl                  = mtty_ioctl,
1428 };
1429
1430 static void mtty_device_release(struct device *dev)
1431 {
1432         dev_dbg(dev, "mtty: released\n");
1433 }
1434
1435 static int __init mtty_dev_init(void)
1436 {
1437         int ret = 0;
1438
1439         pr_info("mtty_dev: %s\n", __func__);
1440
1441         memset(&mtty_dev, 0, sizeof(mtty_dev));
1442
1443         idr_init(&mtty_dev.vd_idr);
1444
1445         ret = alloc_chrdev_region(&mtty_dev.vd_devt, 0, MINORMASK, MTTY_NAME);
1446
1447         if (ret < 0) {
1448                 pr_err("Error: failed to register mtty_dev, err:%d\n", ret);
1449                 return ret;
1450         }
1451
1452         cdev_init(&mtty_dev.vd_cdev, &vd_fops);
1453         cdev_add(&mtty_dev.vd_cdev, mtty_dev.vd_devt, MINORMASK);
1454
1455         pr_info("major_number:%d\n", MAJOR(mtty_dev.vd_devt));
1456
1457         mtty_dev.vd_class = class_create(THIS_MODULE, MTTY_CLASS_NAME);
1458
1459         if (IS_ERR(mtty_dev.vd_class)) {
1460                 pr_err("Error: failed to register mtty_dev class\n");
1461                 ret = PTR_ERR(mtty_dev.vd_class);
1462                 goto failed1;
1463         }
1464
1465         mtty_dev.dev.class = mtty_dev.vd_class;
1466         mtty_dev.dev.release = mtty_device_release;
1467         dev_set_name(&mtty_dev.dev, "%s", MTTY_NAME);
1468
1469         ret = device_register(&mtty_dev.dev);
1470         if (ret)
1471                 goto failed2;
1472
1473         ret = mdev_register_device(&mtty_dev.dev, &mdev_fops);
1474         if (ret)
1475                 goto failed3;
1476
1477         mutex_init(&mdev_list_lock);
1478         INIT_LIST_HEAD(&mdev_devices_list);
1479
1480         goto all_done;
1481
1482 failed3:
1483
1484         device_unregister(&mtty_dev.dev);
1485 failed2:
1486         class_destroy(mtty_dev.vd_class);
1487
1488 failed1:
1489         cdev_del(&mtty_dev.vd_cdev);
1490         unregister_chrdev_region(mtty_dev.vd_devt, MINORMASK);
1491
1492 all_done:
1493         return ret;
1494 }
1495
1496 static void __exit mtty_dev_exit(void)
1497 {
1498         mtty_dev.dev.bus = NULL;
1499         mdev_unregister_device(&mtty_dev.dev);
1500
1501         device_unregister(&mtty_dev.dev);
1502         idr_destroy(&mtty_dev.vd_idr);
1503         cdev_del(&mtty_dev.vd_cdev);
1504         unregister_chrdev_region(mtty_dev.vd_devt, MINORMASK);
1505         class_destroy(mtty_dev.vd_class);
1506         mtty_dev.vd_class = NULL;
1507         pr_info("mtty_dev: Unloaded!\n");
1508 }
1509
1510 module_init(mtty_dev_init)
1511 module_exit(mtty_dev_exit)
1512
1513 MODULE_LICENSE("GPL v2");
1514 MODULE_INFO(supported, "Test driver that simulate serial port over PCI");
1515 MODULE_VERSION(VERSION_STRING);
1516 MODULE_AUTHOR(DRIVER_AUTHOR);