drm: add _DRM_CONSISTENT map type
[sfrench/cifs-2.6.git] / drivers / char / drm / drm_vm.c
1 /**
2  * \file drm_vm.h
3  * Memory mapping for DRM
4  * 
5  * \author Rickard E. (Rik) Faith <faith@valinux.com>
6  * \author Gareth Hughes <gareth@valinux.com>
7  */
8
9 /*
10  * Created: Mon Jan  4 08:58:31 1999 by faith@valinux.com
11  *
12  * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14  * All Rights Reserved.
15  *
16  * Permission is hereby granted, free of charge, to any person obtaining a
17  * copy of this software and associated documentation files (the "Software"),
18  * to deal in the Software without restriction, including without limitation
19  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20  * and/or sell copies of the Software, and to permit persons to whom the
21  * Software is furnished to do so, subject to the following conditions:
22  *
23  * The above copyright notice and this permission notice (including the next
24  * paragraph) shall be included in all copies or substantial portions of the
25  * Software.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
30  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33  * OTHER DEALINGS IN THE SOFTWARE.
34  */
35
36 #include "drmP.h"
37 #if defined(__ia64__)
38 #include <linux/efi.h>
39 #endif
40
41 static void drm_vm_open(struct vm_area_struct *vma);
42 static void drm_vm_close(struct vm_area_struct *vma);
43
44 /**
45  * \c nopage method for AGP virtual memory.
46  *
47  * \param vma virtual memory area.
48  * \param address access address.
49  * \return pointer to the page structure.
50  * 
51  * Find the right map and if it's AGP memory find the real physical page to
52  * map, get the page, increment the use count and return it.
53  */
54 #if __OS_HAS_AGP
55 static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
56                                                  unsigned long address)
57 {
58         drm_file_t *priv  = vma->vm_file->private_data;
59         drm_device_t *dev = priv->head->dev;
60         drm_map_t *map    = NULL;
61         drm_map_list_t  *r_list;
62         struct list_head *list;
63
64         /*
65          * Find the right map
66          */
67         if (!drm_core_has_AGP(dev))
68                 goto vm_nopage_error;
69
70         if(!dev->agp || !dev->agp->cant_use_aperture) goto vm_nopage_error;
71
72         list_for_each(list, &dev->maplist->head) {
73                 r_list = list_entry(list, drm_map_list_t, head);
74                 map = r_list->map;
75                 if (!map) continue;
76                 if (map->offset == VM_OFFSET(vma)) break;
77         }
78
79         if (map && map->type == _DRM_AGP) {
80                 unsigned long offset = address - vma->vm_start;
81                 unsigned long baddr = VM_OFFSET(vma) + offset;
82                 struct drm_agp_mem *agpmem;
83                 struct page *page;
84
85 #ifdef __alpha__
86                 /*
87                  * Adjust to a bus-relative address
88                  */
89                 baddr -= dev->hose->mem_space->start;
90 #endif
91
92                 /*
93                  * It's AGP memory - find the real physical page to map
94                  */
95                 for(agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next) {
96                         if (agpmem->bound <= baddr &&
97                             agpmem->bound + agpmem->pages * PAGE_SIZE > baddr) 
98                                 break;
99                 }
100
101                 if (!agpmem) goto vm_nopage_error;
102
103                 /*
104                  * Get the page, inc the use count, and return it
105                  */
106                 offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
107                 page = virt_to_page(__va(agpmem->memory->memory[offset]));
108                 get_page(page);
109
110                 DRM_DEBUG("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n",
111                           baddr, __va(agpmem->memory->memory[offset]), offset,
112                           page_count(page));
113
114                 return page;
115         }
116 vm_nopage_error:
117         return NOPAGE_SIGBUS;           /* Disallow mremap */
118 }
119 #else /* __OS_HAS_AGP */
120 static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
121                                                  unsigned long address)
122 {
123         return NOPAGE_SIGBUS;
124 }
125 #endif /* __OS_HAS_AGP */
126
127 /**
128  * \c nopage method for shared virtual memory.
129  *
130  * \param vma virtual memory area.
131  * \param address access address.
132  * \return pointer to the page structure.
133  * 
134  * Get the the mapping, find the real physical page to map, get the page, and
135  * return it.
136  */
137 static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma,
138                                                      unsigned long address)
139 {
140         drm_map_t        *map    = (drm_map_t *)vma->vm_private_data;
141         unsigned long    offset;
142         unsigned long    i;
143         struct page      *page;
144
145         if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */
146         if (!map)                  return NOPAGE_OOM;  /* Nothing allocated */
147
148         offset   = address - vma->vm_start;
149         i = (unsigned long)map->handle + offset;
150         page = vmalloc_to_page((void *)i);
151         if (!page)
152                 return NOPAGE_OOM;
153         get_page(page);
154
155         DRM_DEBUG("shm_nopage 0x%lx\n", address);
156         return page;
157 }
158
159
160 /**
161  * \c close method for shared virtual memory.
162  * 
163  * \param vma virtual memory area.
164  * 
165  * Deletes map information if we are the last
166  * person to close a mapping and it's not in the global maplist.
167  */
168 static void drm_vm_shm_close(struct vm_area_struct *vma)
169 {
170         drm_file_t      *priv   = vma->vm_file->private_data;
171         drm_device_t    *dev    = priv->head->dev;
172         drm_vma_entry_t *pt, *prev, *next;
173         drm_map_t *map;
174         drm_map_list_t *r_list;
175         struct list_head *list;
176         int found_maps = 0;
177
178         DRM_DEBUG("0x%08lx,0x%08lx\n",
179                   vma->vm_start, vma->vm_end - vma->vm_start);
180         atomic_dec(&dev->vma_count);
181
182         map = vma->vm_private_data;
183
184         down(&dev->struct_sem);
185         for (pt = dev->vmalist, prev = NULL; pt; pt = next) {
186                 next = pt->next;
187                 if (pt->vma->vm_private_data == map) found_maps++;
188                 if (pt->vma == vma) {
189                         if (prev) {
190                                 prev->next = pt->next;
191                         } else {
192                                 dev->vmalist = pt->next;
193                         }
194                         drm_free(pt, sizeof(*pt), DRM_MEM_VMAS);
195                 } else {
196                         prev = pt;
197                 }
198         }
199         /* We were the only map that was found */
200         if(found_maps == 1 &&
201            map->flags & _DRM_REMOVABLE) {
202                 /* Check to see if we are in the maplist, if we are not, then
203                  * we delete this mappings information.
204                  */
205                 found_maps = 0;
206                 list = &dev->maplist->head;
207                 list_for_each(list, &dev->maplist->head) {
208                         r_list = list_entry(list, drm_map_list_t, head);
209                         if (r_list->map == map) found_maps++;
210                 }
211
212                 if(!found_maps) {
213                         switch (map->type) {
214                         case _DRM_REGISTERS:
215                         case _DRM_FRAME_BUFFER:
216                                 if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
217                                         int retcode;
218                                         retcode = mtrr_del(map->mtrr,
219                                                            map->offset,
220                                                            map->size);
221                                         DRM_DEBUG("mtrr_del = %d\n", retcode);
222                                 }
223                                 drm_ioremapfree(map->handle, map->size, dev);
224                                 break;
225                         case _DRM_SHM:
226                                 vfree(map->handle);
227                                 break;
228                         case _DRM_AGP:
229                         case _DRM_SCATTER_GATHER:
230                                 break;
231                         case _DRM_CONSISTENT:
232                                 drm_pci_free(dev, map->size, map->handle,
233                                              map->offset);
234                                 break;
235                         }
236                         drm_free(map, sizeof(*map), DRM_MEM_MAPS);
237                 }
238         }
239         up(&dev->struct_sem);
240 }
241
242 /**
243  * \c nopage method for DMA virtual memory.
244  *
245  * \param vma virtual memory area.
246  * \param address access address.
247  * \return pointer to the page structure.
248  * 
249  * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
250  */
251 static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma,
252                                                      unsigned long address)
253 {
254         drm_file_t       *priv   = vma->vm_file->private_data;
255         drm_device_t     *dev    = priv->head->dev;
256         drm_device_dma_t *dma    = dev->dma;
257         unsigned long    offset;
258         unsigned long    page_nr;
259         struct page      *page;
260
261         if (!dma)                  return NOPAGE_SIGBUS; /* Error */
262         if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */
263         if (!dma->pagelist)        return NOPAGE_OOM ; /* Nothing allocated */
264
265         offset   = address - vma->vm_start; /* vm_[pg]off[set] should be 0 */
266         page_nr  = offset >> PAGE_SHIFT;
267         page = virt_to_page((dma->pagelist[page_nr] + 
268                              (offset & (~PAGE_MASK))));
269
270         get_page(page);
271
272         DRM_DEBUG("dma_nopage 0x%lx (page %lu)\n", address, page_nr);
273         return page;
274 }
275
276 /**
277  * \c nopage method for scatter-gather virtual memory.
278  *
279  * \param vma virtual memory area.
280  * \param address access address.
281  * \return pointer to the page structure.
282  * 
283  * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
284  */
285 static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma,
286                                                     unsigned long address)
287 {
288         drm_map_t        *map    = (drm_map_t *)vma->vm_private_data;
289         drm_file_t *priv = vma->vm_file->private_data;
290         drm_device_t *dev = priv->head->dev;
291         drm_sg_mem_t *entry = dev->sg;
292         unsigned long offset;
293         unsigned long map_offset;
294         unsigned long page_offset;
295         struct page *page;
296
297         if (!entry)                return NOPAGE_SIGBUS; /* Error */
298         if (address > vma->vm_end) return NOPAGE_SIGBUS; /* Disallow mremap */
299         if (!entry->pagelist)      return NOPAGE_OOM ;  /* Nothing allocated */
300
301
302         offset = address - vma->vm_start;
303         map_offset = map->offset - dev->sg->handle;
304         page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
305         page = entry->pagelist[page_offset];
306         get_page(page);
307
308         return page;
309 }
310
311
312 #if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,0)
313
314 static struct page *drm_vm_nopage(struct vm_area_struct *vma,
315                                    unsigned long address,
316                                    int *type) {
317         if (type) *type = VM_FAULT_MINOR;
318         return drm_do_vm_nopage(vma, address);
319 }
320
321 static struct page *drm_vm_shm_nopage(struct vm_area_struct *vma,
322                                        unsigned long address,
323                                        int *type) {
324         if (type) *type = VM_FAULT_MINOR;
325         return drm_do_vm_shm_nopage(vma, address);
326 }
327
328 static struct page *drm_vm_dma_nopage(struct vm_area_struct *vma,
329                                        unsigned long address,
330                                        int *type) {
331         if (type) *type = VM_FAULT_MINOR;
332         return drm_do_vm_dma_nopage(vma, address);
333 }
334
335 static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
336                                       unsigned long address,
337                                       int *type) {
338         if (type) *type = VM_FAULT_MINOR;
339         return drm_do_vm_sg_nopage(vma, address);
340 }
341
342 #else   /* LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,0) */
343
344 static struct page *drm_vm_nopage(struct vm_area_struct *vma,
345                                    unsigned long address,
346                                    int unused) {
347         return drm_do_vm_nopage(vma, address);
348 }
349
350 static struct page *drm_vm_shm_nopage(struct vm_area_struct *vma,
351                                        unsigned long address,
352                                        int unused) {
353         return drm_do_vm_shm_nopage(vma, address);
354 }
355
356 static struct page *drm_vm_dma_nopage(struct vm_area_struct *vma,
357                                        unsigned long address,
358                                        int unused) {
359         return drm_do_vm_dma_nopage(vma, address);
360 }
361
362 static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
363                                       unsigned long address,
364                                       int unused) {
365         return drm_do_vm_sg_nopage(vma, address);
366 }
367
368 #endif
369
370
371 /** AGP virtual memory operations */
372 static struct vm_operations_struct   drm_vm_ops = {
373         .nopage = drm_vm_nopage,
374         .open   = drm_vm_open,
375         .close  = drm_vm_close,
376 };
377
378 /** Shared virtual memory operations */
379 static struct vm_operations_struct   drm_vm_shm_ops = {
380         .nopage = drm_vm_shm_nopage,
381         .open   = drm_vm_open,
382         .close  = drm_vm_shm_close,
383 };
384
385 /** DMA virtual memory operations */
386 static struct vm_operations_struct   drm_vm_dma_ops = {
387         .nopage = drm_vm_dma_nopage,
388         .open   = drm_vm_open,
389         .close  = drm_vm_close,
390 };
391
392 /** Scatter-gather virtual memory operations */
393 static struct vm_operations_struct   drm_vm_sg_ops = {
394         .nopage = drm_vm_sg_nopage,
395         .open   = drm_vm_open,
396         .close  = drm_vm_close,
397 };
398
399
400 /**
401  * \c open method for shared virtual memory.
402  * 
403  * \param vma virtual memory area.
404  * 
405  * Create a new drm_vma_entry structure as the \p vma private data entry and
406  * add it to drm_device::vmalist.
407  */
408 static void drm_vm_open(struct vm_area_struct *vma)
409 {
410         drm_file_t      *priv   = vma->vm_file->private_data;
411         drm_device_t    *dev    = priv->head->dev;
412         drm_vma_entry_t *vma_entry;
413
414         DRM_DEBUG("0x%08lx,0x%08lx\n",
415                   vma->vm_start, vma->vm_end - vma->vm_start);
416         atomic_inc(&dev->vma_count);
417
418         vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS);
419         if (vma_entry) {
420                 down(&dev->struct_sem);
421                 vma_entry->vma  = vma;
422                 vma_entry->next = dev->vmalist;
423                 vma_entry->pid  = current->pid;
424                 dev->vmalist    = vma_entry;
425                 up(&dev->struct_sem);
426         }
427 }
428
429 /**
430  * \c close method for all virtual memory types.
431  * 
432  * \param vma virtual memory area.
433  * 
434  * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
435  * free it.
436  */
437 static void drm_vm_close(struct vm_area_struct *vma)
438 {
439         drm_file_t      *priv   = vma->vm_file->private_data;
440         drm_device_t    *dev    = priv->head->dev;
441         drm_vma_entry_t *pt, *prev;
442
443         DRM_DEBUG("0x%08lx,0x%08lx\n",
444                   vma->vm_start, vma->vm_end - vma->vm_start);
445         atomic_dec(&dev->vma_count);
446
447         down(&dev->struct_sem);
448         for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
449                 if (pt->vma == vma) {
450                         if (prev) {
451                                 prev->next = pt->next;
452                         } else {
453                                 dev->vmalist = pt->next;
454                         }
455                         drm_free(pt, sizeof(*pt), DRM_MEM_VMAS);
456                         break;
457                 }
458         }
459         up(&dev->struct_sem);
460 }
461
462 /**
463  * mmap DMA memory.
464  *
465  * \param filp file pointer.
466  * \param vma virtual memory area.
467  * \return zero on success or a negative number on failure.
468  * 
469  * Sets the virtual memory area operations structure to vm_dma_ops, the file
470  * pointer, and calls vm_open().
471  */
472 static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
473 {
474         drm_file_t       *priv   = filp->private_data;
475         drm_device_t     *dev;
476         drm_device_dma_t *dma;
477         unsigned long    length  = vma->vm_end - vma->vm_start;
478
479         lock_kernel();
480         dev      = priv->head->dev;
481         dma      = dev->dma;
482         DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
483                   vma->vm_start, vma->vm_end, VM_OFFSET(vma));
484
485                                 /* Length must match exact page count */
486         if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
487                 unlock_kernel();
488                 return -EINVAL;
489         }
490         unlock_kernel();
491
492         vma->vm_ops   = &drm_vm_dma_ops;
493
494 #if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
495         vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
496 #else
497         vma->vm_flags |= VM_RESERVED; /* Don't swap */
498 #endif
499
500         vma->vm_file  =  filp;  /* Needed for drm_vm_open() */
501         drm_vm_open(vma);
502         return 0;
503 }
504
505 unsigned long drm_core_get_map_ofs(drm_map_t *map)
506 {
507         return map->offset;
508 }
509 EXPORT_SYMBOL(drm_core_get_map_ofs);
510
511 unsigned long drm_core_get_reg_ofs(struct drm_device *dev)
512 {
513 #ifdef __alpha__
514         return dev->hose->dense_mem_base - dev->hose->mem_space->start;
515 #else
516         return 0;
517 #endif
518 }
519 EXPORT_SYMBOL(drm_core_get_reg_ofs);
520
521 /**
522  * mmap DMA memory.
523  *
524  * \param filp file pointer.
525  * \param vma virtual memory area.
526  * \return zero on success or a negative number on failure.
527  * 
528  * If the virtual memory area has no offset associated with it then it's a DMA
529  * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
530  * checks that the restricted flag is not set, sets the virtual memory operations
531  * according to the mapping type and remaps the pages. Finally sets the file
532  * pointer and calls vm_open().
533  */
534 int drm_mmap(struct file *filp, struct vm_area_struct *vma)
535 {
536         drm_file_t      *priv   = filp->private_data;
537         drm_device_t    *dev    = priv->head->dev;
538         drm_map_t       *map    = NULL;
539         drm_map_list_t  *r_list;
540         unsigned long   offset  = 0;
541         struct list_head *list;
542
543         DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
544                   vma->vm_start, vma->vm_end, VM_OFFSET(vma));
545
546         if ( !priv->authenticated ) return -EACCES;
547
548         /* We check for "dma". On Apple's UniNorth, it's valid to have
549          * the AGP mapped at physical address 0
550          * --BenH.
551          */
552         if (!VM_OFFSET(vma)
553 #if __OS_HAS_AGP
554             && (!dev->agp || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
555 #endif
556             )
557                 return drm_mmap_dma(filp, vma);
558
559                                 /* A sequential search of a linked list is
560                                    fine here because: 1) there will only be
561                                    about 5-10 entries in the list and, 2) a
562                                    DRI client only has to do this mapping
563                                    once, so it doesn't have to be optimized
564                                    for performance, even if the list was a
565                                    bit longer. */
566         list_for_each(list, &dev->maplist->head) {
567                 unsigned long off;
568
569                 r_list = list_entry(list, drm_map_list_t, head);
570                 map = r_list->map;
571                 if (!map) continue;
572                 off = dev->driver->get_map_ofs(map);
573                 if (off == VM_OFFSET(vma)) break;
574         }
575
576         if (!map || ((map->flags&_DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
577                 return -EPERM;
578
579                                 /* Check for valid size. */
580         if (map->size != vma->vm_end - vma->vm_start) return -EINVAL;
581
582         if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
583                 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
584 #if defined(__i386__) || defined(__x86_64__)
585                 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
586 #else
587                                 /* Ye gads this is ugly.  With more thought
588                                    we could move this up higher and use
589                                    `protection_map' instead.  */
590                 vma->vm_page_prot = __pgprot(pte_val(pte_wrprotect(
591                         __pte(pgprot_val(vma->vm_page_prot)))));
592 #endif
593         }
594
595         switch (map->type) {
596         case _DRM_AGP:
597           if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) {
598                 /*
599                  * On some platforms we can't talk to bus dma address from the CPU, so for
600                  * memory of type DRM_AGP, we'll deal with sorting out the real physical
601                  * pages and mappings in nopage()
602                  */
603 #if defined(__powerpc__)
604                 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
605 #endif
606                 vma->vm_ops = &drm_vm_ops;
607                 break;
608           }
609                 /* fall through to _DRM_FRAME_BUFFER... */        
610         case _DRM_FRAME_BUFFER:
611         case _DRM_REGISTERS:
612                 if (VM_OFFSET(vma) >= __pa(high_memory)) {
613 #if defined(__i386__) || defined(__x86_64__)
614                         if (boot_cpu_data.x86 > 3 && map->type != _DRM_AGP) {
615                                 pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
616                                 pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT;
617                         }
618 #elif defined(__powerpc__)
619                         pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE | _PAGE_GUARDED;
620 #endif
621                         vma->vm_flags |= VM_IO; /* not in core dump */
622                 }
623 #if defined(__ia64__)
624                 if (efi_range_is_wc(vma->vm_start, vma->vm_end -
625                                     vma->vm_start))
626                         vma->vm_page_prot =
627                                 pgprot_writecombine(vma->vm_page_prot);
628                 else
629                         vma->vm_page_prot =
630                                 pgprot_noncached(vma->vm_page_prot);
631 #endif
632                 offset = dev->driver->get_reg_ofs(dev);
633 #ifdef __sparc__
634                 if (io_remap_pfn_range(DRM_RPR_ARG(vma) vma->vm_start,
635                                         (VM_OFFSET(vma) + offset) >> PAGE_SHIFT,
636                                         vma->vm_end - vma->vm_start,
637                                         vma->vm_page_prot))
638 #else
639                 if (io_remap_pfn_range(vma, vma->vm_start,
640                                      (VM_OFFSET(vma) + offset) >> PAGE_SHIFT,
641                                      vma->vm_end - vma->vm_start,
642                                      vma->vm_page_prot))
643 #endif
644                                 return -EAGAIN;
645                 DRM_DEBUG("   Type = %d; start = 0x%lx, end = 0x%lx,"
646                           " offset = 0x%lx\n",
647                           map->type,
648                           vma->vm_start, vma->vm_end, VM_OFFSET(vma) + offset);
649                 vma->vm_ops = &drm_vm_ops;
650                 break;
651         case _DRM_SHM:
652         case _DRM_CONSISTENT:
653                 /* Consistent memory is really like shared memory. It's only
654                  * allocate in a different way */
655                 vma->vm_ops = &drm_vm_shm_ops;
656                 vma->vm_private_data = (void *)map;
657                                 /* Don't let this area swap.  Change when
658                                    DRM_KERNEL advisory is supported. */
659 #if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
660                 vma->vm_flags |= VM_LOCKED;
661 #else
662                 vma->vm_flags |= VM_RESERVED;
663 #endif
664                 break;
665         case _DRM_SCATTER_GATHER:
666                 vma->vm_ops = &drm_vm_sg_ops;
667                 vma->vm_private_data = (void *)map;
668 #if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
669                 vma->vm_flags |= VM_LOCKED;
670 #else
671                 vma->vm_flags |= VM_RESERVED;
672 #endif
673                 break;
674         default:
675                 return -EINVAL; /* This should never happen. */
676         }
677 #if LINUX_VERSION_CODE <= 0x02040e /* KERNEL_VERSION(2,4,14) */
678         vma->vm_flags |= VM_LOCKED | VM_SHM; /* Don't swap */
679 #else
680         vma->vm_flags |= VM_RESERVED; /* Don't swap */
681 #endif
682
683         vma->vm_file  =  filp;  /* Needed for drm_vm_open() */
684         drm_vm_open(vma);
685         return 0;
686 }
687 EXPORT_SYMBOL(drm_mmap);