drm/xen-front: Use Xen common shared buffer implementation
[sfrench/cifs-2.6.git] / drivers / gpu / drm / xen / xen_drm_front.c
1 // SPDX-License-Identifier: GPL-2.0 OR MIT
2
3 /*
4  *  Xen para-virtual DRM device
5  *
6  * Copyright (C) 2016-2018 EPAM Systems Inc.
7  *
8  * Author: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
9  */
10
11 #include <drm/drmP.h>
12 #include <drm/drm_atomic_helper.h>
13 #include <drm/drm_crtc_helper.h>
14 #include <drm/drm_gem.h>
15
16 #include <linux/of_device.h>
17
18 #include <xen/platform_pci.h>
19 #include <xen/xen.h>
20 #include <xen/xenbus.h>
21
22 #include <xen/xen-front-pgdir-shbuf.h>
23 #include <xen/interface/io/displif.h>
24
25 #include "xen_drm_front.h"
26 #include "xen_drm_front_cfg.h"
27 #include "xen_drm_front_evtchnl.h"
28 #include "xen_drm_front_gem.h"
29 #include "xen_drm_front_kms.h"
30
31 struct xen_drm_front_dbuf {
32         struct list_head list;
33         u64 dbuf_cookie;
34         u64 fb_cookie;
35
36         struct xen_front_pgdir_shbuf shbuf;
37 };
38
39 static void dbuf_add_to_list(struct xen_drm_front_info *front_info,
40                              struct xen_drm_front_dbuf *dbuf, u64 dbuf_cookie)
41 {
42         dbuf->dbuf_cookie = dbuf_cookie;
43         list_add(&dbuf->list, &front_info->dbuf_list);
44 }
45
46 static struct xen_drm_front_dbuf *dbuf_get(struct list_head *dbuf_list,
47                                            u64 dbuf_cookie)
48 {
49         struct xen_drm_front_dbuf *buf, *q;
50
51         list_for_each_entry_safe(buf, q, dbuf_list, list)
52                 if (buf->dbuf_cookie == dbuf_cookie)
53                         return buf;
54
55         return NULL;
56 }
57
58 static void dbuf_free(struct list_head *dbuf_list, u64 dbuf_cookie)
59 {
60         struct xen_drm_front_dbuf *buf, *q;
61
62         list_for_each_entry_safe(buf, q, dbuf_list, list)
63                 if (buf->dbuf_cookie == dbuf_cookie) {
64                         list_del(&buf->list);
65                         xen_front_pgdir_shbuf_unmap(&buf->shbuf);
66                         xen_front_pgdir_shbuf_free(&buf->shbuf);
67                         kfree(buf);
68                         break;
69                 }
70 }
71
72 static void dbuf_free_all(struct list_head *dbuf_list)
73 {
74         struct xen_drm_front_dbuf *buf, *q;
75
76         list_for_each_entry_safe(buf, q, dbuf_list, list) {
77                 list_del(&buf->list);
78                 xen_front_pgdir_shbuf_unmap(&buf->shbuf);
79                 xen_front_pgdir_shbuf_free(&buf->shbuf);
80                 kfree(buf);
81         }
82 }
83
84 static struct xendispl_req *
85 be_prepare_req(struct xen_drm_front_evtchnl *evtchnl, u8 operation)
86 {
87         struct xendispl_req *req;
88
89         req = RING_GET_REQUEST(&evtchnl->u.req.ring,
90                                evtchnl->u.req.ring.req_prod_pvt);
91         req->operation = operation;
92         req->id = evtchnl->evt_next_id++;
93         evtchnl->evt_id = req->id;
94         return req;
95 }
96
97 static int be_stream_do_io(struct xen_drm_front_evtchnl *evtchnl,
98                            struct xendispl_req *req)
99 {
100         reinit_completion(&evtchnl->u.req.completion);
101         if (unlikely(evtchnl->state != EVTCHNL_STATE_CONNECTED))
102                 return -EIO;
103
104         xen_drm_front_evtchnl_flush(evtchnl);
105         return 0;
106 }
107
108 static int be_stream_wait_io(struct xen_drm_front_evtchnl *evtchnl)
109 {
110         if (wait_for_completion_timeout(&evtchnl->u.req.completion,
111                         msecs_to_jiffies(XEN_DRM_FRONT_WAIT_BACK_MS)) <= 0)
112                 return -ETIMEDOUT;
113
114         return evtchnl->u.req.resp_status;
115 }
116
117 int xen_drm_front_mode_set(struct xen_drm_front_drm_pipeline *pipeline,
118                            u32 x, u32 y, u32 width, u32 height,
119                            u32 bpp, u64 fb_cookie)
120 {
121         struct xen_drm_front_evtchnl *evtchnl;
122         struct xen_drm_front_info *front_info;
123         struct xendispl_req *req;
124         unsigned long flags;
125         int ret;
126
127         front_info = pipeline->drm_info->front_info;
128         evtchnl = &front_info->evt_pairs[pipeline->index].req;
129         if (unlikely(!evtchnl))
130                 return -EIO;
131
132         mutex_lock(&evtchnl->u.req.req_io_lock);
133
134         spin_lock_irqsave(&front_info->io_lock, flags);
135         req = be_prepare_req(evtchnl, XENDISPL_OP_SET_CONFIG);
136         req->op.set_config.x = x;
137         req->op.set_config.y = y;
138         req->op.set_config.width = width;
139         req->op.set_config.height = height;
140         req->op.set_config.bpp = bpp;
141         req->op.set_config.fb_cookie = fb_cookie;
142
143         ret = be_stream_do_io(evtchnl, req);
144         spin_unlock_irqrestore(&front_info->io_lock, flags);
145
146         if (ret == 0)
147                 ret = be_stream_wait_io(evtchnl);
148
149         mutex_unlock(&evtchnl->u.req.req_io_lock);
150         return ret;
151 }
152
153 int xen_drm_front_dbuf_create(struct xen_drm_front_info *front_info,
154                               u64 dbuf_cookie, u32 width, u32 height,
155                               u32 bpp, u64 size, struct page **pages)
156 {
157         struct xen_drm_front_evtchnl *evtchnl;
158         struct xen_drm_front_dbuf *dbuf;
159         struct xendispl_req *req;
160         struct xen_front_pgdir_shbuf_cfg buf_cfg;
161         unsigned long flags;
162         int ret;
163
164         evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req;
165         if (unlikely(!evtchnl))
166                 return -EIO;
167
168         dbuf = kzalloc(sizeof(*dbuf), GFP_KERNEL);
169         if (!dbuf)
170                 return -ENOMEM;
171
172         dbuf_add_to_list(front_info, dbuf, dbuf_cookie);
173
174         memset(&buf_cfg, 0, sizeof(buf_cfg));
175         buf_cfg.xb_dev = front_info->xb_dev;
176         buf_cfg.num_pages = DIV_ROUND_UP(size, PAGE_SIZE);
177         buf_cfg.pages = pages;
178         buf_cfg.pgdir = &dbuf->shbuf;
179         buf_cfg.be_alloc = front_info->cfg.be_alloc;
180
181         ret = xen_front_pgdir_shbuf_alloc(&buf_cfg);
182         if (ret < 0)
183                 goto fail_shbuf_alloc;
184
185         mutex_lock(&evtchnl->u.req.req_io_lock);
186
187         spin_lock_irqsave(&front_info->io_lock, flags);
188         req = be_prepare_req(evtchnl, XENDISPL_OP_DBUF_CREATE);
189         req->op.dbuf_create.gref_directory =
190                         xen_front_pgdir_shbuf_get_dir_start(&dbuf->shbuf);
191         req->op.dbuf_create.buffer_sz = size;
192         req->op.dbuf_create.dbuf_cookie = dbuf_cookie;
193         req->op.dbuf_create.width = width;
194         req->op.dbuf_create.height = height;
195         req->op.dbuf_create.bpp = bpp;
196         if (buf_cfg.be_alloc)
197                 req->op.dbuf_create.flags |= XENDISPL_DBUF_FLG_REQ_ALLOC;
198
199         ret = be_stream_do_io(evtchnl, req);
200         spin_unlock_irqrestore(&front_info->io_lock, flags);
201
202         if (ret < 0)
203                 goto fail;
204
205         ret = be_stream_wait_io(evtchnl);
206         if (ret < 0)
207                 goto fail;
208
209         ret = xen_front_pgdir_shbuf_map(&dbuf->shbuf);
210         if (ret < 0)
211                 goto fail;
212
213         mutex_unlock(&evtchnl->u.req.req_io_lock);
214         return 0;
215
216 fail:
217         mutex_unlock(&evtchnl->u.req.req_io_lock);
218 fail_shbuf_alloc:
219         dbuf_free(&front_info->dbuf_list, dbuf_cookie);
220         return ret;
221 }
222
223 static int xen_drm_front_dbuf_destroy(struct xen_drm_front_info *front_info,
224                                       u64 dbuf_cookie)
225 {
226         struct xen_drm_front_evtchnl *evtchnl;
227         struct xendispl_req *req;
228         unsigned long flags;
229         bool be_alloc;
230         int ret;
231
232         evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req;
233         if (unlikely(!evtchnl))
234                 return -EIO;
235
236         be_alloc = front_info->cfg.be_alloc;
237
238         /*
239          * For the backend allocated buffer release references now, so backend
240          * can free the buffer.
241          */
242         if (be_alloc)
243                 dbuf_free(&front_info->dbuf_list, dbuf_cookie);
244
245         mutex_lock(&evtchnl->u.req.req_io_lock);
246
247         spin_lock_irqsave(&front_info->io_lock, flags);
248         req = be_prepare_req(evtchnl, XENDISPL_OP_DBUF_DESTROY);
249         req->op.dbuf_destroy.dbuf_cookie = dbuf_cookie;
250
251         ret = be_stream_do_io(evtchnl, req);
252         spin_unlock_irqrestore(&front_info->io_lock, flags);
253
254         if (ret == 0)
255                 ret = be_stream_wait_io(evtchnl);
256
257         /*
258          * Do this regardless of communication status with the backend:
259          * if we cannot remove remote resources remove what we can locally.
260          */
261         if (!be_alloc)
262                 dbuf_free(&front_info->dbuf_list, dbuf_cookie);
263
264         mutex_unlock(&evtchnl->u.req.req_io_lock);
265         return ret;
266 }
267
268 int xen_drm_front_fb_attach(struct xen_drm_front_info *front_info,
269                             u64 dbuf_cookie, u64 fb_cookie, u32 width,
270                             u32 height, u32 pixel_format)
271 {
272         struct xen_drm_front_evtchnl *evtchnl;
273         struct xen_drm_front_dbuf *buf;
274         struct xendispl_req *req;
275         unsigned long flags;
276         int ret;
277
278         evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req;
279         if (unlikely(!evtchnl))
280                 return -EIO;
281
282         buf = dbuf_get(&front_info->dbuf_list, dbuf_cookie);
283         if (!buf)
284                 return -EINVAL;
285
286         buf->fb_cookie = fb_cookie;
287
288         mutex_lock(&evtchnl->u.req.req_io_lock);
289
290         spin_lock_irqsave(&front_info->io_lock, flags);
291         req = be_prepare_req(evtchnl, XENDISPL_OP_FB_ATTACH);
292         req->op.fb_attach.dbuf_cookie = dbuf_cookie;
293         req->op.fb_attach.fb_cookie = fb_cookie;
294         req->op.fb_attach.width = width;
295         req->op.fb_attach.height = height;
296         req->op.fb_attach.pixel_format = pixel_format;
297
298         ret = be_stream_do_io(evtchnl, req);
299         spin_unlock_irqrestore(&front_info->io_lock, flags);
300
301         if (ret == 0)
302                 ret = be_stream_wait_io(evtchnl);
303
304         mutex_unlock(&evtchnl->u.req.req_io_lock);
305         return ret;
306 }
307
308 int xen_drm_front_fb_detach(struct xen_drm_front_info *front_info,
309                             u64 fb_cookie)
310 {
311         struct xen_drm_front_evtchnl *evtchnl;
312         struct xendispl_req *req;
313         unsigned long flags;
314         int ret;
315
316         evtchnl = &front_info->evt_pairs[GENERIC_OP_EVT_CHNL].req;
317         if (unlikely(!evtchnl))
318                 return -EIO;
319
320         mutex_lock(&evtchnl->u.req.req_io_lock);
321
322         spin_lock_irqsave(&front_info->io_lock, flags);
323         req = be_prepare_req(evtchnl, XENDISPL_OP_FB_DETACH);
324         req->op.fb_detach.fb_cookie = fb_cookie;
325
326         ret = be_stream_do_io(evtchnl, req);
327         spin_unlock_irqrestore(&front_info->io_lock, flags);
328
329         if (ret == 0)
330                 ret = be_stream_wait_io(evtchnl);
331
332         mutex_unlock(&evtchnl->u.req.req_io_lock);
333         return ret;
334 }
335
336 int xen_drm_front_page_flip(struct xen_drm_front_info *front_info,
337                             int conn_idx, u64 fb_cookie)
338 {
339         struct xen_drm_front_evtchnl *evtchnl;
340         struct xendispl_req *req;
341         unsigned long flags;
342         int ret;
343
344         if (unlikely(conn_idx >= front_info->num_evt_pairs))
345                 return -EINVAL;
346
347         evtchnl = &front_info->evt_pairs[conn_idx].req;
348
349         mutex_lock(&evtchnl->u.req.req_io_lock);
350
351         spin_lock_irqsave(&front_info->io_lock, flags);
352         req = be_prepare_req(evtchnl, XENDISPL_OP_PG_FLIP);
353         req->op.pg_flip.fb_cookie = fb_cookie;
354
355         ret = be_stream_do_io(evtchnl, req);
356         spin_unlock_irqrestore(&front_info->io_lock, flags);
357
358         if (ret == 0)
359                 ret = be_stream_wait_io(evtchnl);
360
361         mutex_unlock(&evtchnl->u.req.req_io_lock);
362         return ret;
363 }
364
365 void xen_drm_front_on_frame_done(struct xen_drm_front_info *front_info,
366                                  int conn_idx, u64 fb_cookie)
367 {
368         struct xen_drm_front_drm_info *drm_info = front_info->drm_info;
369
370         if (unlikely(conn_idx >= front_info->cfg.num_connectors))
371                 return;
372
373         xen_drm_front_kms_on_frame_done(&drm_info->pipeline[conn_idx],
374                                         fb_cookie);
375 }
376
377 static int xen_drm_drv_dumb_create(struct drm_file *filp,
378                                    struct drm_device *dev,
379                                    struct drm_mode_create_dumb *args)
380 {
381         struct xen_drm_front_drm_info *drm_info = dev->dev_private;
382         struct drm_gem_object *obj;
383         int ret;
384
385         /*
386          * Dumb creation is a two stage process: first we create a fully
387          * constructed GEM object which is communicated to the backend, and
388          * only after that we can create GEM's handle. This is done so,
389          * because of the possible races: once you create a handle it becomes
390          * immediately visible to user-space, so the latter can try accessing
391          * object without pages etc.
392          * For details also see drm_gem_handle_create
393          */
394         args->pitch = DIV_ROUND_UP(args->width * args->bpp, 8);
395         args->size = args->pitch * args->height;
396
397         obj = xen_drm_front_gem_create(dev, args->size);
398         if (IS_ERR_OR_NULL(obj)) {
399                 ret = PTR_ERR(obj);
400                 goto fail;
401         }
402
403         ret = xen_drm_front_dbuf_create(drm_info->front_info,
404                                         xen_drm_front_dbuf_to_cookie(obj),
405                                         args->width, args->height, args->bpp,
406                                         args->size,
407                                         xen_drm_front_gem_get_pages(obj));
408         if (ret)
409                 goto fail_backend;
410
411         /* This is the tail of GEM object creation */
412         ret = drm_gem_handle_create(filp, obj, &args->handle);
413         if (ret)
414                 goto fail_handle;
415
416         /* Drop reference from allocate - handle holds it now */
417         drm_gem_object_put_unlocked(obj);
418         return 0;
419
420 fail_handle:
421         xen_drm_front_dbuf_destroy(drm_info->front_info,
422                                    xen_drm_front_dbuf_to_cookie(obj));
423 fail_backend:
424         /* drop reference from allocate */
425         drm_gem_object_put_unlocked(obj);
426 fail:
427         DRM_ERROR("Failed to create dumb buffer: %d\n", ret);
428         return ret;
429 }
430
431 static void xen_drm_drv_free_object_unlocked(struct drm_gem_object *obj)
432 {
433         struct xen_drm_front_drm_info *drm_info = obj->dev->dev_private;
434         int idx;
435
436         if (drm_dev_enter(obj->dev, &idx)) {
437                 xen_drm_front_dbuf_destroy(drm_info->front_info,
438                                            xen_drm_front_dbuf_to_cookie(obj));
439                 drm_dev_exit(idx);
440         } else {
441                 dbuf_free(&drm_info->front_info->dbuf_list,
442                           xen_drm_front_dbuf_to_cookie(obj));
443         }
444
445         xen_drm_front_gem_free_object_unlocked(obj);
446 }
447
448 static void xen_drm_drv_release(struct drm_device *dev)
449 {
450         struct xen_drm_front_drm_info *drm_info = dev->dev_private;
451         struct xen_drm_front_info *front_info = drm_info->front_info;
452
453         xen_drm_front_kms_fini(drm_info);
454
455         drm_atomic_helper_shutdown(dev);
456         drm_mode_config_cleanup(dev);
457
458         drm_dev_fini(dev);
459         kfree(dev);
460
461         if (front_info->cfg.be_alloc)
462                 xenbus_switch_state(front_info->xb_dev,
463                                     XenbusStateInitialising);
464
465         kfree(drm_info);
466 }
467
468 static const struct file_operations xen_drm_dev_fops = {
469         .owner          = THIS_MODULE,
470         .open           = drm_open,
471         .release        = drm_release,
472         .unlocked_ioctl = drm_ioctl,
473 #ifdef CONFIG_COMPAT
474         .compat_ioctl   = drm_compat_ioctl,
475 #endif
476         .poll           = drm_poll,
477         .read           = drm_read,
478         .llseek         = no_llseek,
479         .mmap           = xen_drm_front_gem_mmap,
480 };
481
482 static const struct vm_operations_struct xen_drm_drv_vm_ops = {
483         .open           = drm_gem_vm_open,
484         .close          = drm_gem_vm_close,
485 };
486
487 static struct drm_driver xen_drm_driver = {
488         .driver_features           = DRIVER_GEM | DRIVER_MODESET |
489                                      DRIVER_PRIME | DRIVER_ATOMIC,
490         .release                   = xen_drm_drv_release,
491         .gem_vm_ops                = &xen_drm_drv_vm_ops,
492         .gem_free_object_unlocked  = xen_drm_drv_free_object_unlocked,
493         .prime_handle_to_fd        = drm_gem_prime_handle_to_fd,
494         .prime_fd_to_handle        = drm_gem_prime_fd_to_handle,
495         .gem_prime_import          = drm_gem_prime_import,
496         .gem_prime_export          = drm_gem_prime_export,
497         .gem_prime_import_sg_table = xen_drm_front_gem_import_sg_table,
498         .gem_prime_get_sg_table    = xen_drm_front_gem_get_sg_table,
499         .gem_prime_vmap            = xen_drm_front_gem_prime_vmap,
500         .gem_prime_vunmap          = xen_drm_front_gem_prime_vunmap,
501         .gem_prime_mmap            = xen_drm_front_gem_prime_mmap,
502         .dumb_create               = xen_drm_drv_dumb_create,
503         .fops                      = &xen_drm_dev_fops,
504         .name                      = "xendrm-du",
505         .desc                      = "Xen PV DRM Display Unit",
506         .date                      = "20180221",
507         .major                     = 1,
508         .minor                     = 0,
509
510 };
511
512 static int xen_drm_drv_init(struct xen_drm_front_info *front_info)
513 {
514         struct device *dev = &front_info->xb_dev->dev;
515         struct xen_drm_front_drm_info *drm_info;
516         struct drm_device *drm_dev;
517         int ret;
518
519         DRM_INFO("Creating %s\n", xen_drm_driver.desc);
520
521         drm_info = kzalloc(sizeof(*drm_info), GFP_KERNEL);
522         if (!drm_info) {
523                 ret = -ENOMEM;
524                 goto fail;
525         }
526
527         drm_info->front_info = front_info;
528         front_info->drm_info = drm_info;
529
530         drm_dev = drm_dev_alloc(&xen_drm_driver, dev);
531         if (IS_ERR(drm_dev)) {
532                 ret = PTR_ERR(drm_dev);
533                 goto fail;
534         }
535
536         drm_info->drm_dev = drm_dev;
537
538         drm_dev->dev_private = drm_info;
539
540         ret = xen_drm_front_kms_init(drm_info);
541         if (ret) {
542                 DRM_ERROR("Failed to initialize DRM/KMS, ret %d\n", ret);
543                 goto fail_modeset;
544         }
545
546         ret = drm_dev_register(drm_dev, 0);
547         if (ret)
548                 goto fail_register;
549
550         DRM_INFO("Initialized %s %d.%d.%d %s on minor %d\n",
551                  xen_drm_driver.name, xen_drm_driver.major,
552                  xen_drm_driver.minor, xen_drm_driver.patchlevel,
553                  xen_drm_driver.date, drm_dev->primary->index);
554
555         return 0;
556
557 fail_register:
558         drm_dev_unregister(drm_dev);
559 fail_modeset:
560         drm_kms_helper_poll_fini(drm_dev);
561         drm_mode_config_cleanup(drm_dev);
562 fail:
563         kfree(drm_info);
564         return ret;
565 }
566
567 static void xen_drm_drv_fini(struct xen_drm_front_info *front_info)
568 {
569         struct xen_drm_front_drm_info *drm_info = front_info->drm_info;
570         struct drm_device *dev;
571
572         if (!drm_info)
573                 return;
574
575         dev = drm_info->drm_dev;
576         if (!dev)
577                 return;
578
579         /* Nothing to do if device is already unplugged */
580         if (drm_dev_is_unplugged(dev))
581                 return;
582
583         drm_kms_helper_poll_fini(dev);
584         drm_dev_unplug(dev);
585
586         front_info->drm_info = NULL;
587
588         xen_drm_front_evtchnl_free_all(front_info);
589         dbuf_free_all(&front_info->dbuf_list);
590
591         /*
592          * If we are not using backend allocated buffers, then tell the
593          * backend we are ready to (re)initialize. Otherwise, wait for
594          * drm_driver.release.
595          */
596         if (!front_info->cfg.be_alloc)
597                 xenbus_switch_state(front_info->xb_dev,
598                                     XenbusStateInitialising);
599 }
600
601 static int displback_initwait(struct xen_drm_front_info *front_info)
602 {
603         struct xen_drm_front_cfg *cfg = &front_info->cfg;
604         int ret;
605
606         cfg->front_info = front_info;
607         ret = xen_drm_front_cfg_card(front_info, cfg);
608         if (ret < 0)
609                 return ret;
610
611         DRM_INFO("Have %d connector(s)\n", cfg->num_connectors);
612         /* Create event channels for all connectors and publish */
613         ret = xen_drm_front_evtchnl_create_all(front_info);
614         if (ret < 0)
615                 return ret;
616
617         return xen_drm_front_evtchnl_publish_all(front_info);
618 }
619
620 static int displback_connect(struct xen_drm_front_info *front_info)
621 {
622         xen_drm_front_evtchnl_set_state(front_info, EVTCHNL_STATE_CONNECTED);
623         return xen_drm_drv_init(front_info);
624 }
625
626 static void displback_disconnect(struct xen_drm_front_info *front_info)
627 {
628         if (!front_info->drm_info)
629                 return;
630
631         /* Tell the backend to wait until we release the DRM driver. */
632         xenbus_switch_state(front_info->xb_dev, XenbusStateReconfiguring);
633
634         xen_drm_drv_fini(front_info);
635 }
636
637 static void displback_changed(struct xenbus_device *xb_dev,
638                               enum xenbus_state backend_state)
639 {
640         struct xen_drm_front_info *front_info = dev_get_drvdata(&xb_dev->dev);
641         int ret;
642
643         DRM_DEBUG("Backend state is %s, front is %s\n",
644                   xenbus_strstate(backend_state),
645                   xenbus_strstate(xb_dev->state));
646
647         switch (backend_state) {
648         case XenbusStateReconfiguring:
649                 /* fall through */
650         case XenbusStateReconfigured:
651                 /* fall through */
652         case XenbusStateInitialised:
653                 break;
654
655         case XenbusStateInitialising:
656                 if (xb_dev->state == XenbusStateReconfiguring)
657                         break;
658
659                 /* recovering after backend unexpected closure */
660                 displback_disconnect(front_info);
661                 break;
662
663         case XenbusStateInitWait:
664                 if (xb_dev->state == XenbusStateReconfiguring)
665                         break;
666
667                 /* recovering after backend unexpected closure */
668                 displback_disconnect(front_info);
669                 if (xb_dev->state != XenbusStateInitialising)
670                         break;
671
672                 ret = displback_initwait(front_info);
673                 if (ret < 0)
674                         xenbus_dev_fatal(xb_dev, ret, "initializing frontend");
675                 else
676                         xenbus_switch_state(xb_dev, XenbusStateInitialised);
677                 break;
678
679         case XenbusStateConnected:
680                 if (xb_dev->state != XenbusStateInitialised)
681                         break;
682
683                 ret = displback_connect(front_info);
684                 if (ret < 0) {
685                         displback_disconnect(front_info);
686                         xenbus_dev_fatal(xb_dev, ret, "connecting backend");
687                 } else {
688                         xenbus_switch_state(xb_dev, XenbusStateConnected);
689                 }
690                 break;
691
692         case XenbusStateClosing:
693                 /*
694                  * in this state backend starts freeing resources,
695                  * so let it go into closed state, so we can also
696                  * remove ours
697                  */
698                 break;
699
700         case XenbusStateUnknown:
701                 /* fall through */
702         case XenbusStateClosed:
703                 if (xb_dev->state == XenbusStateClosed)
704                         break;
705
706                 displback_disconnect(front_info);
707                 break;
708         }
709 }
710
711 static int xen_drv_probe(struct xenbus_device *xb_dev,
712                          const struct xenbus_device_id *id)
713 {
714         struct xen_drm_front_info *front_info;
715         struct device *dev = &xb_dev->dev;
716         int ret;
717
718         /*
719          * The device is not spawn from a device tree, so arch_setup_dma_ops
720          * is not called, thus leaving the device with dummy DMA ops.
721          * This makes the device return error on PRIME buffer import, which
722          * is not correct: to fix this call of_dma_configure() with a NULL
723          * node to set default DMA ops.
724          */
725         dev->coherent_dma_mask = DMA_BIT_MASK(32);
726         ret = of_dma_configure(dev, NULL, true);
727         if (ret < 0) {
728                 DRM_ERROR("Cannot setup DMA ops, ret %d", ret);
729                 return ret;
730         }
731
732         front_info = devm_kzalloc(&xb_dev->dev,
733                                   sizeof(*front_info), GFP_KERNEL);
734         if (!front_info)
735                 return -ENOMEM;
736
737         front_info->xb_dev = xb_dev;
738         spin_lock_init(&front_info->io_lock);
739         INIT_LIST_HEAD(&front_info->dbuf_list);
740         dev_set_drvdata(&xb_dev->dev, front_info);
741
742         return xenbus_switch_state(xb_dev, XenbusStateInitialising);
743 }
744
745 static int xen_drv_remove(struct xenbus_device *dev)
746 {
747         struct xen_drm_front_info *front_info = dev_get_drvdata(&dev->dev);
748         int to = 100;
749
750         xenbus_switch_state(dev, XenbusStateClosing);
751
752         /*
753          * On driver removal it is disconnected from XenBus,
754          * so no backend state change events come via .otherend_changed
755          * callback. This prevents us from exiting gracefully, e.g.
756          * signaling the backend to free event channels, waiting for its
757          * state to change to XenbusStateClosed and cleaning at our end.
758          * Normally when front driver removed backend will finally go into
759          * XenbusStateInitWait state.
760          *
761          * Workaround: read backend's state manually and wait with time-out.
762          */
763         while ((xenbus_read_unsigned(front_info->xb_dev->otherend, "state",
764                                      XenbusStateUnknown) != XenbusStateInitWait) &&
765                                      --to)
766                 msleep(10);
767
768         if (!to) {
769                 unsigned int state;
770
771                 state = xenbus_read_unsigned(front_info->xb_dev->otherend,
772                                              "state", XenbusStateUnknown);
773                 DRM_ERROR("Backend state is %s while removing driver\n",
774                           xenbus_strstate(state));
775         }
776
777         xen_drm_drv_fini(front_info);
778         xenbus_frontend_closed(dev);
779         return 0;
780 }
781
782 static const struct xenbus_device_id xen_driver_ids[] = {
783         { XENDISPL_DRIVER_NAME },
784         { "" }
785 };
786
787 static struct xenbus_driver xen_driver = {
788         .ids = xen_driver_ids,
789         .probe = xen_drv_probe,
790         .remove = xen_drv_remove,
791         .otherend_changed = displback_changed,
792 };
793
794 static int __init xen_drv_init(void)
795 {
796         /* At the moment we only support case with XEN_PAGE_SIZE == PAGE_SIZE */
797         if (XEN_PAGE_SIZE != PAGE_SIZE) {
798                 DRM_ERROR(XENDISPL_DRIVER_NAME ": different kernel and Xen page sizes are not supported: XEN_PAGE_SIZE (%lu) != PAGE_SIZE (%lu)\n",
799                           XEN_PAGE_SIZE, PAGE_SIZE);
800                 return -ENODEV;
801         }
802
803         if (!xen_domain())
804                 return -ENODEV;
805
806         if (!xen_has_pv_devices())
807                 return -ENODEV;
808
809         DRM_INFO("Registering XEN PV " XENDISPL_DRIVER_NAME "\n");
810         return xenbus_register_frontend(&xen_driver);
811 }
812
813 static void __exit xen_drv_fini(void)
814 {
815         DRM_INFO("Unregistering XEN PV " XENDISPL_DRIVER_NAME "\n");
816         xenbus_unregister_driver(&xen_driver);
817 }
818
819 module_init(xen_drv_init);
820 module_exit(xen_drv_fini);
821
822 MODULE_DESCRIPTION("Xen para-virtualized display device frontend");
823 MODULE_LICENSE("GPL");
824 MODULE_ALIAS("xen:" XENDISPL_DRIVER_NAME);