Merge branch 'release' of git://git.kernel.org/pub/scm/linux/kernel/git/lenb/linux...
[sfrench/cifs-2.6.git] / drivers / char / drm / drm_vm.c
1 /**
2  * \file drm_vm.c
3  * Memory mapping for DRM
4  *
5  * \author Rickard E. (Rik) Faith <faith@valinux.com>
6  * \author Gareth Hughes <gareth@valinux.com>
7  */
8
9 /*
10  * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
11  *
12  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14  * All Rights Reserved.
15  *
16  * Permission is hereby granted, free of charge, to any person obtaining a
17  * copy of this software and associated documentation files (the "Software"),
18  * to deal in the Software without restriction, including without limitation
19  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20  * and/or sell copies of the Software, and to permit persons to whom the
21  * Software is furnished to do so, subject to the following conditions:
22  *
23  * The above copyright notice and this permission notice (including the next
24  * paragraph) shall be included in all copies or substantial portions of the
25  * Software.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
30  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33  * OTHER DEALINGS IN THE SOFTWARE.
34  */
35
36 #include "drmP.h"
37 #if defined(__ia64__)
38 #include <linux/efi.h>
39 #endif
40
41 static void drm_vm_open(struct vm_area_struct *vma);
42 static void drm_vm_close(struct vm_area_struct *vma);
43
44 static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
45 {
46         pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
47
48 #if defined(__i386__) || defined(__x86_64__)
49         if (boot_cpu_data.x86 > 3 && map_type != _DRM_AGP) {
50                 pgprot_val(tmp) |= _PAGE_PCD;
51                 pgprot_val(tmp) &= ~_PAGE_PWT;
52         }
53 #elif defined(__powerpc__)
54         pgprot_val(tmp) |= _PAGE_NO_CACHE;
55         if (map_type == _DRM_REGISTERS)
56                 pgprot_val(tmp) |= _PAGE_GUARDED;
57 #endif
58 #if defined(__ia64__)
59         if (efi_range_is_wc(vma->vm_start, vma->vm_end -
60                                     vma->vm_start))
61                 tmp = pgprot_writecombine(tmp);
62         else
63                 tmp = pgprot_noncached(tmp);
64 #endif
65         return tmp;
66 }
67
68 /**
69  * \c nopage method for AGP virtual memory.
70  *
71  * \param vma virtual memory area.
72  * \param address access address.
73  * \return pointer to the page structure.
74  *
75  * Find the right map and if it's AGP memory find the real physical page to
76  * map, get the page, increment the use count and return it.
77  */
78 #if __OS_HAS_AGP
79 static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
80                                                 unsigned long address)
81 {
82         struct drm_file *priv = vma->vm_file->private_data;
83         struct drm_device *dev = priv->head->dev;
84         struct drm_map *map = NULL;
85         struct drm_map_list *r_list;
86         struct drm_hash_item *hash;
87
88         /*
89          * Find the right map
90          */
91         if (!drm_core_has_AGP(dev))
92                 goto vm_nopage_error;
93
94         if (!dev->agp || !dev->agp->cant_use_aperture)
95                 goto vm_nopage_error;
96
97         if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
98                 goto vm_nopage_error;
99
100         r_list = drm_hash_entry(hash, struct drm_map_list, hash);
101         map = r_list->map;
102
103         if (map && map->type == _DRM_AGP) {
104                 unsigned long offset = address - vma->vm_start;
105                 unsigned long baddr = map->offset + offset;
106                 struct drm_agp_mem *agpmem;
107                 struct page *page;
108
109 #ifdef __alpha__
110                 /*
111                  * Adjust to a bus-relative address
112                  */
113                 baddr -= dev->hose->mem_space->start;
114 #endif
115
116                 /*
117                  * It's AGP memory - find the real physical page to map
118                  */
119                 list_for_each_entry(agpmem, &dev->agp->memory, head) {
120                         if (agpmem->bound <= baddr &&
121                             agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
122                                 break;
123                 }
124
125                 if (!agpmem)
126                         goto vm_nopage_error;
127
128                 /*
129                  * Get the page, inc the use count, and return it
130                  */
131                 offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
132                 page = virt_to_page(__va(agpmem->memory->memory[offset]));
133                 get_page(page);
134
135                 DRM_DEBUG
136                     ("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n",
137                      baddr, __va(agpmem->memory->memory[offset]), offset,
138                      page_count(page));
139
140                 return page;
141         }
142       vm_nopage_error:
143         return NOPAGE_SIGBUS;   /* Disallow mremap */
144 }
145 #else                           /* __OS_HAS_AGP */
146 static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
147                                                 unsigned long address)
148 {
149         return NOPAGE_SIGBUS;
150 }
151 #endif                          /* __OS_HAS_AGP */
152
153 /**
154  * \c nopage method for shared virtual memory.
155  *
156  * \param vma virtual memory area.
157  * \param address access address.
158  * \return pointer to the page structure.
159  *
160  * Get the mapping, find the real physical page to map, get the page, and
161  * return it.
162  */
163 static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma,
164                                                     unsigned long address)
165 {
166         struct drm_map *map = (struct drm_map *) vma->vm_private_data;
167         unsigned long offset;
168         unsigned long i;
169         struct page *page;
170
171         if (address > vma->vm_end)
172                 return NOPAGE_SIGBUS;   /* Disallow mremap */
173         if (!map)
174                 return NOPAGE_SIGBUS;   /* Nothing allocated */
175
176         offset = address - vma->vm_start;
177         i = (unsigned long)map->handle + offset;
178         page = vmalloc_to_page((void *)i);
179         if (!page)
180                 return NOPAGE_SIGBUS;
181         get_page(page);
182
183         DRM_DEBUG("shm_nopage 0x%lx\n", address);
184         return page;
185 }
186
187 /**
188  * \c close method for shared virtual memory.
189  *
190  * \param vma virtual memory area.
191  *
192  * Deletes map information if we are the last
193  * person to close a mapping and it's not in the global maplist.
194  */
195 static void drm_vm_shm_close(struct vm_area_struct *vma)
196 {
197         struct drm_file *priv = vma->vm_file->private_data;
198         struct drm_device *dev = priv->head->dev;
199         struct drm_vma_entry *pt, *temp;
200         struct drm_map *map;
201         struct drm_map_list *r_list;
202         int found_maps = 0;
203
204         DRM_DEBUG("0x%08lx,0x%08lx\n",
205                   vma->vm_start, vma->vm_end - vma->vm_start);
206         atomic_dec(&dev->vma_count);
207
208         map = vma->vm_private_data;
209
210         mutex_lock(&dev->struct_mutex);
211         list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
212                 if (pt->vma->vm_private_data == map)
213                         found_maps++;
214                 if (pt->vma == vma) {
215                         list_del(&pt->head);
216                         drm_free(pt, sizeof(*pt), DRM_MEM_VMAS);
217                 }
218         }
219
220         /* We were the only map that was found */
221         if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
222                 /* Check to see if we are in the maplist, if we are not, then
223                  * we delete this mappings information.
224                  */
225                 found_maps = 0;
226                 list_for_each_entry(r_list, &dev->maplist, head) {
227                         if (r_list->map == map)
228                                 found_maps++;
229                 }
230
231                 if (!found_maps) {
232                         drm_dma_handle_t dmah;
233
234                         switch (map->type) {
235                         case _DRM_REGISTERS:
236                         case _DRM_FRAME_BUFFER:
237                                 if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
238                                         int retcode;
239                                         retcode = mtrr_del(map->mtrr,
240                                                            map->offset,
241                                                            map->size);
242                                         DRM_DEBUG("mtrr_del = %d\n", retcode);
243                                 }
244                                 iounmap(map->handle);
245                                 break;
246                         case _DRM_SHM:
247                                 vfree(map->handle);
248                                 break;
249                         case _DRM_AGP:
250                         case _DRM_SCATTER_GATHER:
251                                 break;
252                         case _DRM_CONSISTENT:
253                                 dmah.vaddr = map->handle;
254                                 dmah.busaddr = map->offset;
255                                 dmah.size = map->size;
256                                 __drm_pci_free(dev, &dmah);
257                                 break;
258                         }
259                         drm_free(map, sizeof(*map), DRM_MEM_MAPS);
260                 }
261         }
262         mutex_unlock(&dev->struct_mutex);
263 }
264
265 /**
266  * \c nopage method for DMA virtual memory.
267  *
268  * \param vma virtual memory area.
269  * \param address access address.
270  * \return pointer to the page structure.
271  *
272  * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
273  */
274 static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma,
275                                                     unsigned long address)
276 {
277         struct drm_file *priv = vma->vm_file->private_data;
278         struct drm_device *dev = priv->head->dev;
279         struct drm_device_dma *dma = dev->dma;
280         unsigned long offset;
281         unsigned long page_nr;
282         struct page *page;
283
284         if (!dma)
285                 return NOPAGE_SIGBUS;   /* Error */
286         if (address > vma->vm_end)
287                 return NOPAGE_SIGBUS;   /* Disallow mremap */
288         if (!dma->pagelist)
289                 return NOPAGE_SIGBUS;   /* Nothing allocated */
290
291         offset = address - vma->vm_start;       /* vm_[pg]off[set] should be 0 */
292         page_nr = offset >> PAGE_SHIFT;
293         page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK))));
294
295         get_page(page);
296
297         DRM_DEBUG("dma_nopage 0x%lx (page %lu)\n", address, page_nr);
298         return page;
299 }
300
301 /**
302  * \c nopage method for scatter-gather virtual memory.
303  *
304  * \param vma virtual memory area.
305  * \param address access address.
306  * \return pointer to the page structure.
307  *
308  * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
309  */
310 static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma,
311                                                    unsigned long address)
312 {
313         struct drm_map *map = (struct drm_map *) vma->vm_private_data;
314         struct drm_file *priv = vma->vm_file->private_data;
315         struct drm_device *dev = priv->head->dev;
316         struct drm_sg_mem *entry = dev->sg;
317         unsigned long offset;
318         unsigned long map_offset;
319         unsigned long page_offset;
320         struct page *page;
321
322         if (!entry)
323                 return NOPAGE_SIGBUS;   /* Error */
324         if (address > vma->vm_end)
325                 return NOPAGE_SIGBUS;   /* Disallow mremap */
326         if (!entry->pagelist)
327                 return NOPAGE_SIGBUS;   /* Nothing allocated */
328
329         offset = address - vma->vm_start;
330         map_offset = map->offset - (unsigned long)dev->sg->virtual;
331         page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
332         page = entry->pagelist[page_offset];
333         get_page(page);
334
335         return page;
336 }
337
338 static struct page *drm_vm_nopage(struct vm_area_struct *vma,
339                                   unsigned long address, int *type)
340 {
341         if (type)
342                 *type = VM_FAULT_MINOR;
343         return drm_do_vm_nopage(vma, address);
344 }
345
346 static struct page *drm_vm_shm_nopage(struct vm_area_struct *vma,
347                                       unsigned long address, int *type)
348 {
349         if (type)
350                 *type = VM_FAULT_MINOR;
351         return drm_do_vm_shm_nopage(vma, address);
352 }
353
354 static struct page *drm_vm_dma_nopage(struct vm_area_struct *vma,
355                                       unsigned long address, int *type)
356 {
357         if (type)
358                 *type = VM_FAULT_MINOR;
359         return drm_do_vm_dma_nopage(vma, address);
360 }
361
362 static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
363                                      unsigned long address, int *type)
364 {
365         if (type)
366                 *type = VM_FAULT_MINOR;
367         return drm_do_vm_sg_nopage(vma, address);
368 }
369
370 /** AGP virtual memory operations */
371 static struct vm_operations_struct drm_vm_ops = {
372         .nopage = drm_vm_nopage,
373         .open = drm_vm_open,
374         .close = drm_vm_close,
375 };
376
377 /** Shared virtual memory operations */
378 static struct vm_operations_struct drm_vm_shm_ops = {
379         .nopage = drm_vm_shm_nopage,
380         .open = drm_vm_open,
381         .close = drm_vm_shm_close,
382 };
383
384 /** DMA virtual memory operations */
385 static struct vm_operations_struct drm_vm_dma_ops = {
386         .nopage = drm_vm_dma_nopage,
387         .open = drm_vm_open,
388         .close = drm_vm_close,
389 };
390
391 /** Scatter-gather virtual memory operations */
392 static struct vm_operations_struct drm_vm_sg_ops = {
393         .nopage = drm_vm_sg_nopage,
394         .open = drm_vm_open,
395         .close = drm_vm_close,
396 };
397
398 /**
399  * \c open method for shared virtual memory.
400  *
401  * \param vma virtual memory area.
402  *
403  * Create a new drm_vma_entry structure as the \p vma private data entry and
404  * add it to drm_device::vmalist.
405  */
406 static void drm_vm_open_locked(struct vm_area_struct *vma)
407 {
408         struct drm_file *priv = vma->vm_file->private_data;
409         struct drm_device *dev = priv->head->dev;
410         struct drm_vma_entry *vma_entry;
411
412         DRM_DEBUG("0x%08lx,0x%08lx\n",
413                   vma->vm_start, vma->vm_end - vma->vm_start);
414         atomic_inc(&dev->vma_count);
415
416         vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS);
417         if (vma_entry) {
418                 vma_entry->vma = vma;
419                 vma_entry->pid = current->pid;
420                 list_add(&vma_entry->head, &dev->vmalist);
421         }
422 }
423
424 static void drm_vm_open(struct vm_area_struct *vma)
425 {
426         struct drm_file *priv = vma->vm_file->private_data;
427         struct drm_device *dev = priv->head->dev;
428
429         mutex_lock(&dev->struct_mutex);
430         drm_vm_open_locked(vma);
431         mutex_unlock(&dev->struct_mutex);
432 }
433
434 /**
435  * \c close method for all virtual memory types.
436  *
437  * \param vma virtual memory area.
438  *
439  * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
440  * free it.
441  */
442 static void drm_vm_close(struct vm_area_struct *vma)
443 {
444         struct drm_file *priv = vma->vm_file->private_data;
445         struct drm_device *dev = priv->head->dev;
446         struct drm_vma_entry *pt, *temp;
447
448         DRM_DEBUG("0x%08lx,0x%08lx\n",
449                   vma->vm_start, vma->vm_end - vma->vm_start);
450         atomic_dec(&dev->vma_count);
451
452         mutex_lock(&dev->struct_mutex);
453         list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
454                 if (pt->vma == vma) {
455                         list_del(&pt->head);
456                         drm_free(pt, sizeof(*pt), DRM_MEM_VMAS);
457                         break;
458                 }
459         }
460         mutex_unlock(&dev->struct_mutex);
461 }
462
463 /**
464  * mmap DMA memory.
465  *
466  * \param file_priv DRM file private.
467  * \param vma virtual memory area.
468  * \return zero on success or a negative number on failure.
469  *
470  * Sets the virtual memory area operations structure to vm_dma_ops, the file
471  * pointer, and calls vm_open().
472  */
473 static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
474 {
475         struct drm_file *priv = filp->private_data;
476         struct drm_device *dev;
477         struct drm_device_dma *dma;
478         unsigned long length = vma->vm_end - vma->vm_start;
479
480         dev = priv->head->dev;
481         dma = dev->dma;
482         DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
483                   vma->vm_start, vma->vm_end, vma->vm_pgoff);
484
485         /* Length must match exact page count */
486         if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
487                 return -EINVAL;
488         }
489
490         if (!capable(CAP_SYS_ADMIN) &&
491             (dma->flags & _DRM_DMA_USE_PCI_RO)) {
492                 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
493 #if defined(__i386__) || defined(__x86_64__)
494                 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
495 #else
496                 /* Ye gads this is ugly.  With more thought
497                    we could move this up higher and use
498                    `protection_map' instead.  */
499                 vma->vm_page_prot =
500                     __pgprot(pte_val
501                              (pte_wrprotect
502                               (__pte(pgprot_val(vma->vm_page_prot)))));
503 #endif
504         }
505
506         vma->vm_ops = &drm_vm_dma_ops;
507
508         vma->vm_flags |= VM_RESERVED;   /* Don't swap */
509
510         vma->vm_file = filp;    /* Needed for drm_vm_open() */
511         drm_vm_open_locked(vma);
512         return 0;
513 }
514
515 unsigned long drm_core_get_map_ofs(struct drm_map * map)
516 {
517         return map->offset;
518 }
519
520 EXPORT_SYMBOL(drm_core_get_map_ofs);
521
522 unsigned long drm_core_get_reg_ofs(struct drm_device *dev)
523 {
524 #ifdef __alpha__
525         return dev->hose->dense_mem_base - dev->hose->mem_space->start;
526 #else
527         return 0;
528 #endif
529 }
530
531 EXPORT_SYMBOL(drm_core_get_reg_ofs);
532
533 /**
534  * mmap DMA memory.
535  *
536  * \param file_priv DRM file private.
537  * \param vma virtual memory area.
538  * \return zero on success or a negative number on failure.
539  *
540  * If the virtual memory area has no offset associated with it then it's a DMA
541  * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
542  * checks that the restricted flag is not set, sets the virtual memory operations
543  * according to the mapping type and remaps the pages. Finally sets the file
544  * pointer and calls vm_open().
545  */
546 static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
547 {
548         struct drm_file *priv = filp->private_data;
549         struct drm_device *dev = priv->head->dev;
550         struct drm_map *map = NULL;
551         unsigned long offset = 0;
552         struct drm_hash_item *hash;
553
554         DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
555                   vma->vm_start, vma->vm_end, vma->vm_pgoff);
556
557         if (!priv->authenticated)
558                 return -EACCES;
559
560         /* We check for "dma". On Apple's UniNorth, it's valid to have
561          * the AGP mapped at physical address 0
562          * --BenH.
563          */
564         if (!vma->vm_pgoff
565 #if __OS_HAS_AGP
566             && (!dev->agp
567                 || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
568 #endif
569             )
570                 return drm_mmap_dma(filp, vma);
571
572         if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
573                 DRM_ERROR("Could not find map\n");
574                 return -EINVAL;
575         }
576
577         map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
578         if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
579                 return -EPERM;
580
581         /* Check for valid size. */
582         if (map->size < vma->vm_end - vma->vm_start)
583                 return -EINVAL;
584
585         if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
586                 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
587 #if defined(__i386__) || defined(__x86_64__)
588                 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
589 #else
590                 /* Ye gads this is ugly.  With more thought
591                    we could move this up higher and use
592                    `protection_map' instead.  */
593                 vma->vm_page_prot =
594                     __pgprot(pte_val
595                              (pte_wrprotect
596                               (__pte(pgprot_val(vma->vm_page_prot)))));
597 #endif
598         }
599
600         switch (map->type) {
601         case _DRM_AGP:
602                 if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) {
603                         /*
604                          * On some platforms we can't talk to bus dma address from the CPU, so for
605                          * memory of type DRM_AGP, we'll deal with sorting out the real physical
606                          * pages and mappings in nopage()
607                          */
608 #if defined(__powerpc__)
609                         pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
610 #endif
611                         vma->vm_ops = &drm_vm_ops;
612                         break;
613                 }
614                 /* fall through to _DRM_FRAME_BUFFER... */
615         case _DRM_FRAME_BUFFER:
616         case _DRM_REGISTERS:
617                 offset = dev->driver->get_reg_ofs(dev);
618                 vma->vm_flags |= VM_IO; /* not in core dump */
619                 vma->vm_page_prot = drm_io_prot(map->type, vma);
620 #ifdef __sparc__
621                 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
622 #endif
623                 if (io_remap_pfn_range(vma, vma->vm_start,
624                                        (map->offset + offset) >> PAGE_SHIFT,
625                                        vma->vm_end - vma->vm_start,
626                                        vma->vm_page_prot))
627                         return -EAGAIN;
628                 DRM_DEBUG("   Type = %d; start = 0x%lx, end = 0x%lx,"
629                           " offset = 0x%lx\n",
630                           map->type,
631                           vma->vm_start, vma->vm_end, map->offset + offset);
632                 vma->vm_ops = &drm_vm_ops;
633                 break;
634         case _DRM_CONSISTENT:
635                 /* Consistent memory is really like shared memory. But
636                  * it's allocated in a different way, so avoid nopage */
637                 if (remap_pfn_range(vma, vma->vm_start,
638                     page_to_pfn(virt_to_page(map->handle)),
639                     vma->vm_end - vma->vm_start, vma->vm_page_prot))
640                         return -EAGAIN;
641         /* fall through to _DRM_SHM */
642         case _DRM_SHM:
643                 vma->vm_ops = &drm_vm_shm_ops;
644                 vma->vm_private_data = (void *)map;
645                 /* Don't let this area swap.  Change when
646                    DRM_KERNEL advisory is supported. */
647                 vma->vm_flags |= VM_RESERVED;
648                 break;
649         case _DRM_SCATTER_GATHER:
650                 vma->vm_ops = &drm_vm_sg_ops;
651                 vma->vm_private_data = (void *)map;
652                 vma->vm_flags |= VM_RESERVED;
653                 break;
654         default:
655                 return -EINVAL; /* This should never happen. */
656         }
657         vma->vm_flags |= VM_RESERVED;   /* Don't swap */
658
659         vma->vm_file = filp;    /* Needed for drm_vm_open() */
660         drm_vm_open_locked(vma);
661         return 0;
662 }
663
664 int drm_mmap(struct file *filp, struct vm_area_struct *vma)
665 {
666         struct drm_file *priv = filp->private_data;
667         struct drm_device *dev = priv->head->dev;
668         int ret;
669
670         mutex_lock(&dev->struct_mutex);
671         ret = drm_mmap_locked(filp, vma);
672         mutex_unlock(&dev->struct_mutex);
673
674         return ret;
675 }
676 EXPORT_SYMBOL(drm_mmap);