Merge branch 'drm-patches' of master.kernel.org:/pub/scm/linux/kernel/git/airlied...
[sfrench/cifs-2.6.git] / drivers / char / drm / drm_bufs.c
1 /**
2  * \file drm_bufs.c
3  * Generic buffer template
4  *
5  * \author Rickard E. (Rik) Faith <faith@valinux.com>
6  * \author Gareth Hughes <gareth@valinux.com>
7  */
8
9 /*
10  * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
11  *
12  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14  * All Rights Reserved.
15  *
16  * Permission is hereby granted, free of charge, to any person obtaining a
17  * copy of this software and associated documentation files (the "Software"),
18  * to deal in the Software without restriction, including without limitation
19  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20  * and/or sell copies of the Software, and to permit persons to whom the
21  * Software is furnished to do so, subject to the following conditions:
22  *
23  * The above copyright notice and this permission notice (including the next
24  * paragraph) shall be included in all copies or substantial portions of the
25  * Software.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
30  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33  * OTHER DEALINGS IN THE SOFTWARE.
34  */
35
36 #include <linux/vmalloc.h>
37 #include "drmP.h"
38
39 unsigned long drm_get_resource_start(drm_device_t *dev, unsigned int resource)
40 {
41         return pci_resource_start(dev->pdev, resource);
42 }
43 EXPORT_SYMBOL(drm_get_resource_start);
44
45 unsigned long drm_get_resource_len(drm_device_t *dev, unsigned int resource)
46 {
47         return pci_resource_len(dev->pdev, resource);
48 }
49
50 EXPORT_SYMBOL(drm_get_resource_len);
51
52 static drm_map_list_t *drm_find_matching_map(drm_device_t *dev,
53                                              drm_local_map_t *map)
54 {
55         struct list_head *list;
56
57         list_for_each(list, &dev->maplist->head) {
58                 drm_map_list_t *entry = list_entry(list, drm_map_list_t, head);
59                 if (entry->map && map->type == entry->map->type &&
60                     ((entry->map->offset == map->offset) ||
61                      (map->type == _DRM_SHM && map->flags==_DRM_CONTAINS_LOCK))) {
62                         return entry;
63                 }
64         }
65
66         return NULL;
67 }
68
69 static int drm_map_handle(drm_device_t *dev, drm_hash_item_t *hash,
70                           unsigned long user_token, int hashed_handle)
71 {
72         int use_hashed_handle;
73 #if (BITS_PER_LONG == 64)
74         use_hashed_handle = ((user_token & 0xFFFFFFFF00000000UL) || hashed_handle);
75 #elif (BITS_PER_LONG == 32)
76         use_hashed_handle = hashed_handle;
77 #else
78 #error Unsupported long size. Neither 64 nor 32 bits.
79 #endif
80
81         if (!use_hashed_handle) {
82                 int ret;
83                 hash->key = user_token >> PAGE_SHIFT;
84                 ret = drm_ht_insert_item(&dev->map_hash, hash);
85                 if (ret != -EINVAL)
86                         return ret;
87         }
88         return drm_ht_just_insert_please(&dev->map_hash, hash,
89                                          user_token, 32 - PAGE_SHIFT - 3,
90                                          0, DRM_MAP_HASH_OFFSET >> PAGE_SHIFT);
91 }
92
93 /**
94  * Ioctl to specify a range of memory that is available for mapping by a non-root process.
95  *
96  * \param inode device inode.
97  * \param filp file pointer.
98  * \param cmd command.
99  * \param arg pointer to a drm_map structure.
100  * \return zero on success or a negative value on error.
101  *
102  * Adjusts the memory offset to its absolute value according to the mapping
103  * type.  Adds the map to the map list drm_device::maplist. Adds MTRR's where
104  * applicable and if supported by the kernel.
105  */
106 static int drm_addmap_core(drm_device_t * dev, unsigned int offset,
107                            unsigned int size, drm_map_type_t type,
108                            drm_map_flags_t flags, drm_map_list_t ** maplist)
109 {
110         drm_map_t *map;
111         drm_map_list_t *list;
112         drm_dma_handle_t *dmah;
113         unsigned long user_token;
114         int ret;
115
116         map = drm_alloc(sizeof(*map), DRM_MEM_MAPS);
117         if (!map)
118                 return -ENOMEM;
119
120         map->offset = offset;
121         map->size = size;
122         map->flags = flags;
123         map->type = type;
124
125         /* Only allow shared memory to be removable since we only keep enough
126          * book keeping information about shared memory to allow for removal
127          * when processes fork.
128          */
129         if ((map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM) {
130                 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
131                 return -EINVAL;
132         }
133         DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n",
134                   map->offset, map->size, map->type);
135         if ((map->offset & (~PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
136                 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
137                 return -EINVAL;
138         }
139         map->mtrr = -1;
140         map->handle = NULL;
141
142         switch (map->type) {
143         case _DRM_REGISTERS:
144         case _DRM_FRAME_BUFFER:
145 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__)
146                 if (map->offset + (map->size-1) < map->offset ||
147                     map->offset < virt_to_phys(high_memory)) {
148                         drm_free(map, sizeof(*map), DRM_MEM_MAPS);
149                         return -EINVAL;
150                 }
151 #endif
152 #ifdef __alpha__
153                 map->offset += dev->hose->mem_space->start;
154 #endif
155                 /* Some drivers preinitialize some maps, without the X Server
156                  * needing to be aware of it.  Therefore, we just return success
157                  * when the server tries to create a duplicate map.
158                  */
159                 list = drm_find_matching_map(dev, map);
160                 if (list != NULL) {
161                         if (list->map->size != map->size) {
162                                 DRM_DEBUG("Matching maps of type %d with "
163                                           "mismatched sizes, (%ld vs %ld)\n",
164                                           map->type, map->size,
165                                           list->map->size);
166                                 list->map->size = map->size;
167                         }
168
169                         drm_free(map, sizeof(*map), DRM_MEM_MAPS);
170                         *maplist = list;
171                         return 0;
172                 }
173
174                 if (drm_core_has_MTRR(dev)) {
175                         if (map->type == _DRM_FRAME_BUFFER ||
176                             (map->flags & _DRM_WRITE_COMBINING)) {
177                                 map->mtrr = mtrr_add(map->offset, map->size,
178                                                      MTRR_TYPE_WRCOMB, 1);
179                         }
180                 }
181                 if (map->type == _DRM_REGISTERS)
182                         map->handle = ioremap(map->offset, map->size);
183                 break;
184         case _DRM_SHM:
185                 list = drm_find_matching_map(dev, map);
186                 if (list != NULL) {
187                         if(list->map->size != map->size) {
188                                 DRM_DEBUG("Matching maps of type %d with "
189                                           "mismatched sizes, (%ld vs %ld)\n",
190                                           map->type, map->size, list->map->size);
191                                 list->map->size = map->size;
192                         }
193
194                         drm_free(map, sizeof(*map), DRM_MEM_MAPS);
195                         *maplist = list;
196                         return 0;
197                 }
198                 map->handle = vmalloc_user(map->size);
199                 DRM_DEBUG("%lu %d %p\n",
200                           map->size, drm_order(map->size), map->handle);
201                 if (!map->handle) {
202                         drm_free(map, sizeof(*map), DRM_MEM_MAPS);
203                         return -ENOMEM;
204                 }
205                 map->offset = (unsigned long)map->handle;
206                 if (map->flags & _DRM_CONTAINS_LOCK) {
207                         /* Prevent a 2nd X Server from creating a 2nd lock */
208                         if (dev->lock.hw_lock != NULL) {
209                                 vfree(map->handle);
210                                 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
211                                 return -EBUSY;
212                         }
213                         dev->sigdata.lock = dev->lock.hw_lock = map->handle;    /* Pointer to lock */
214                 }
215                 break;
216         case _DRM_AGP: {
217                 drm_agp_mem_t *entry;
218                 int valid = 0;
219
220                 if (!drm_core_has_AGP(dev)) {
221                         drm_free(map, sizeof(*map), DRM_MEM_MAPS);
222                         return -EINVAL;
223                 }
224 #ifdef __alpha__
225                 map->offset += dev->hose->mem_space->start;
226 #endif
227                 /* Note: dev->agp->base may actually be 0 when the DRM
228                  * is not in control of AGP space. But if user space is
229                  * it should already have added the AGP base itself.
230                  */
231                 map->offset += dev->agp->base;
232                 map->mtrr = dev->agp->agp_mtrr; /* for getmap */
233
234                 /* This assumes the DRM is in total control of AGP space.
235                  * It's not always the case as AGP can be in the control
236                  * of user space (i.e. i810 driver). So this loop will get
237                  * skipped and we double check that dev->agp->memory is
238                  * actually set as well as being invalid before EPERM'ing
239                  */
240                 for (entry = dev->agp->memory; entry; entry = entry->next) {
241                         if ((map->offset >= entry->bound) &&
242                             (map->offset + map->size <= entry->bound + entry->pages * PAGE_SIZE)) {
243                                 valid = 1;
244                                 break;
245                         }
246                 }
247                 if (dev->agp->memory && !valid) {
248                         drm_free(map, sizeof(*map), DRM_MEM_MAPS);
249                         return -EPERM;
250                 }
251                 DRM_DEBUG("AGP offset = 0x%08lx, size = 0x%08lx\n", map->offset, map->size);
252
253                 break;
254         }
255         case _DRM_SCATTER_GATHER:
256                 if (!dev->sg) {
257                         drm_free(map, sizeof(*map), DRM_MEM_MAPS);
258                         return -EINVAL;
259                 }
260                 map->offset += (unsigned long)dev->sg->virtual;
261                 break;
262         case _DRM_CONSISTENT:
263                 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
264                  * As we're limiting the address to 2^32-1 (or less),
265                  * casting it down to 32 bits is no problem, but we
266                  * need to point to a 64bit variable first. */
267                 dmah = drm_pci_alloc(dev, map->size, map->size, 0xffffffffUL);
268                 if (!dmah) {
269                         drm_free(map, sizeof(*map), DRM_MEM_MAPS);
270                         return -ENOMEM;
271                 }
272                 map->handle = dmah->vaddr;
273                 map->offset = (unsigned long)dmah->busaddr;
274                 kfree(dmah);
275                 break;
276         default:
277                 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
278                 return -EINVAL;
279         }
280
281         list = drm_alloc(sizeof(*list), DRM_MEM_MAPS);
282         if (!list) {
283                 if (map->type == _DRM_REGISTERS)
284                         iounmap(map->handle);
285                 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
286                 return -EINVAL;
287         }
288         memset(list, 0, sizeof(*list));
289         list->map = map;
290
291         mutex_lock(&dev->struct_mutex);
292         list_add(&list->head, &dev->maplist->head);
293
294         /* Assign a 32-bit handle */
295         /* We do it here so that dev->struct_mutex protects the increment */
296         user_token = (map->type == _DRM_SHM) ? (unsigned long)map->handle :
297                 map->offset;
298         ret = drm_map_handle(dev, &list->hash, user_token, 0);
299         if (ret) {
300                 if (map->type == _DRM_REGISTERS)
301                         iounmap(map->handle);
302                 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
303                 drm_free(list, sizeof(*list), DRM_MEM_MAPS);
304                 mutex_unlock(&dev->struct_mutex);
305                 return ret;
306         }
307
308         list->user_token = list->hash.key << PAGE_SHIFT;
309         mutex_unlock(&dev->struct_mutex);
310
311         *maplist = list;
312         return 0;
313         }
314
315 int drm_addmap(drm_device_t * dev, unsigned int offset,
316                unsigned int size, drm_map_type_t type,
317                drm_map_flags_t flags, drm_local_map_t ** map_ptr)
318 {
319         drm_map_list_t *list;
320         int rc;
321
322         rc = drm_addmap_core(dev, offset, size, type, flags, &list);
323         if (!rc)
324                 *map_ptr = list->map;
325         return rc;
326 }
327
328 EXPORT_SYMBOL(drm_addmap);
329
330 int drm_addmap_ioctl(struct inode *inode, struct file *filp,
331                      unsigned int cmd, unsigned long arg)
332 {
333         drm_file_t *priv = filp->private_data;
334         drm_device_t *dev = priv->head->dev;
335         drm_map_t map;
336         drm_map_list_t *maplist;
337         drm_map_t __user *argp = (void __user *)arg;
338         int err;
339
340         if (!(filp->f_mode & 3))
341                 return -EACCES; /* Require read/write */
342
343         if (copy_from_user(&map, argp, sizeof(map))) {
344                 return -EFAULT;
345         }
346
347         if (!(capable(CAP_SYS_ADMIN) || map.type == _DRM_AGP))
348                 return -EPERM;
349
350         err = drm_addmap_core(dev, map.offset, map.size, map.type, map.flags,
351                               &maplist);
352
353         if (err)
354                 return err;
355
356         if (copy_to_user(argp, maplist->map, sizeof(drm_map_t)))
357                 return -EFAULT;
358
359         /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
360         if (put_user((void *)(unsigned long)maplist->user_token, &argp->handle))
361                 return -EFAULT;
362         return 0;
363 }
364
365 /**
366  * Remove a map private from list and deallocate resources if the mapping
367  * isn't in use.
368  *
369  * \param inode device inode.
370  * \param filp file pointer.
371  * \param cmd command.
372  * \param arg pointer to a drm_map_t structure.
373  * \return zero on success or a negative value on error.
374  *
375  * Searches the map on drm_device::maplist, removes it from the list, see if
376  * its being used, and free any associate resource (such as MTRR's) if it's not
377  * being on use.
378  *
379  * \sa drm_addmap
380  */
381 int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map)
382 {
383         struct list_head *list;
384         drm_map_list_t *r_list = NULL;
385         drm_dma_handle_t dmah;
386
387         /* Find the list entry for the map and remove it */
388         list_for_each(list, &dev->maplist->head) {
389                 r_list = list_entry(list, drm_map_list_t, head);
390
391                 if (r_list->map == map) {
392                         list_del(list);
393                         drm_ht_remove_key(&dev->map_hash,
394                                           r_list->user_token >> PAGE_SHIFT);
395                         drm_free(list, sizeof(*list), DRM_MEM_MAPS);
396                         break;
397                 }
398         }
399
400         /* List has wrapped around to the head pointer, or it's empty and we
401          * didn't find anything.
402          */
403         if (list == (&dev->maplist->head)) {
404                 return -EINVAL;
405         }
406
407         switch (map->type) {
408         case _DRM_REGISTERS:
409                 iounmap(map->handle);
410                 /* FALLTHROUGH */
411         case _DRM_FRAME_BUFFER:
412                 if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
413                         int retcode;
414                         retcode = mtrr_del(map->mtrr, map->offset, map->size);
415                         DRM_DEBUG("mtrr_del=%d\n", retcode);
416                 }
417                 break;
418         case _DRM_SHM:
419                 vfree(map->handle);
420                 break;
421         case _DRM_AGP:
422         case _DRM_SCATTER_GATHER:
423                 break;
424         case _DRM_CONSISTENT:
425                 dmah.vaddr = map->handle;
426                 dmah.busaddr = map->offset;
427                 dmah.size = map->size;
428                 __drm_pci_free(dev, &dmah);
429                 break;
430         }
431         drm_free(map, sizeof(*map), DRM_MEM_MAPS);
432
433         return 0;
434 }
435
436 int drm_rmmap(drm_device_t *dev, drm_local_map_t *map)
437 {
438         int ret;
439
440         mutex_lock(&dev->struct_mutex);
441         ret = drm_rmmap_locked(dev, map);
442         mutex_unlock(&dev->struct_mutex);
443
444         return ret;
445 }
446
447 /* The rmmap ioctl appears to be unnecessary.  All mappings are torn down on
448  * the last close of the device, and this is necessary for cleanup when things
449  * exit uncleanly.  Therefore, having userland manually remove mappings seems
450  * like a pointless exercise since they're going away anyway.
451  *
452  * One use case might be after addmap is allowed for normal users for SHM and
453  * gets used by drivers that the server doesn't need to care about.  This seems
454  * unlikely.
455  */
456 int drm_rmmap_ioctl(struct inode *inode, struct file *filp,
457                     unsigned int cmd, unsigned long arg)
458 {
459         drm_file_t *priv = filp->private_data;
460         drm_device_t *dev = priv->head->dev;
461         drm_map_t request;
462         drm_local_map_t *map = NULL;
463         struct list_head *list;
464         int ret;
465
466         if (copy_from_user(&request, (drm_map_t __user *) arg, sizeof(request))) {
467                 return -EFAULT;
468         }
469
470         mutex_lock(&dev->struct_mutex);
471         list_for_each(list, &dev->maplist->head) {
472                 drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head);
473
474                 if (r_list->map &&
475                     r_list->user_token == (unsigned long)request.handle &&
476                     r_list->map->flags & _DRM_REMOVABLE) {
477                         map = r_list->map;
478                         break;
479                 }
480         }
481
482         /* List has wrapped around to the head pointer, or its empty we didn't
483          * find anything.
484          */
485         if (list == (&dev->maplist->head)) {
486                 mutex_unlock(&dev->struct_mutex);
487                 return -EINVAL;
488         }
489
490         if (!map) {
491                 mutex_unlock(&dev->struct_mutex);
492                 return -EINVAL;
493         }
494
495         /* Register and framebuffer maps are permanent */
496         if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
497                 mutex_unlock(&dev->struct_mutex);
498                 return 0;
499         }
500
501         ret = drm_rmmap_locked(dev, map);
502
503         mutex_unlock(&dev->struct_mutex);
504
505         return ret;
506 }
507
508 /**
509  * Cleanup after an error on one of the addbufs() functions.
510  *
511  * \param dev DRM device.
512  * \param entry buffer entry where the error occurred.
513  *
514  * Frees any pages and buffers associated with the given entry.
515  */
516 static void drm_cleanup_buf_error(drm_device_t * dev, drm_buf_entry_t * entry)
517 {
518         int i;
519
520         if (entry->seg_count) {
521                 for (i = 0; i < entry->seg_count; i++) {
522                         if (entry->seglist[i]) {
523                                 drm_pci_free(dev, entry->seglist[i]);
524                         }
525                 }
526                 drm_free(entry->seglist,
527                          entry->seg_count *
528                          sizeof(*entry->seglist), DRM_MEM_SEGS);
529
530                 entry->seg_count = 0;
531         }
532
533         if (entry->buf_count) {
534                 for (i = 0; i < entry->buf_count; i++) {
535                         if (entry->buflist[i].dev_private) {
536                                 drm_free(entry->buflist[i].dev_private,
537                                          entry->buflist[i].dev_priv_size,
538                                          DRM_MEM_BUFS);
539                         }
540                 }
541                 drm_free(entry->buflist,
542                          entry->buf_count *
543                          sizeof(*entry->buflist), DRM_MEM_BUFS);
544
545                 entry->buf_count = 0;
546         }
547 }
548
549 #if __OS_HAS_AGP
550 /**
551  * Add AGP buffers for DMA transfers.
552  *
553  * \param dev drm_device_t to which the buffers are to be added.
554  * \param request pointer to a drm_buf_desc_t describing the request.
555  * \return zero on success or a negative number on failure.
556  *
557  * After some sanity checks creates a drm_buf structure for each buffer and
558  * reallocates the buffer list of the same size order to accommodate the new
559  * buffers.
560  */
561 int drm_addbufs_agp(drm_device_t * dev, drm_buf_desc_t * request)
562 {
563         drm_device_dma_t *dma = dev->dma;
564         drm_buf_entry_t *entry;
565         drm_agp_mem_t *agp_entry;
566         drm_buf_t *buf;
567         unsigned long offset;
568         unsigned long agp_offset;
569         int count;
570         int order;
571         int size;
572         int alignment;
573         int page_order;
574         int total;
575         int byte_count;
576         int i, valid;
577         drm_buf_t **temp_buflist;
578
579         if (!dma)
580                 return -EINVAL;
581
582         count = request->count;
583         order = drm_order(request->size);
584         size = 1 << order;
585
586         alignment = (request->flags & _DRM_PAGE_ALIGN)
587             ? PAGE_ALIGN(size) : size;
588         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
589         total = PAGE_SIZE << page_order;
590
591         byte_count = 0;
592         agp_offset = dev->agp->base + request->agp_start;
593
594         DRM_DEBUG("count:      %d\n", count);
595         DRM_DEBUG("order:      %d\n", order);
596         DRM_DEBUG("size:       %d\n", size);
597         DRM_DEBUG("agp_offset: %lx\n", agp_offset);
598         DRM_DEBUG("alignment:  %d\n", alignment);
599         DRM_DEBUG("page_order: %d\n", page_order);
600         DRM_DEBUG("total:      %d\n", total);
601
602         if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
603                 return -EINVAL;
604         if (dev->queue_count)
605                 return -EBUSY;  /* Not while in use */
606
607         /* Make sure buffers are located in AGP memory that we own */
608         valid = 0;
609         for (agp_entry = dev->agp->memory; agp_entry; agp_entry = agp_entry->next) {
610                 if ((agp_offset >= agp_entry->bound) &&
611                     (agp_offset + total * count <= agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
612                         valid = 1;
613                         break;
614                 }
615         }
616         if (dev->agp->memory && !valid) {
617                 DRM_DEBUG("zone invalid\n");
618                 return -EINVAL;
619         }
620         spin_lock(&dev->count_lock);
621         if (dev->buf_use) {
622                 spin_unlock(&dev->count_lock);
623                 return -EBUSY;
624         }
625         atomic_inc(&dev->buf_alloc);
626         spin_unlock(&dev->count_lock);
627
628         mutex_lock(&dev->struct_mutex);
629         entry = &dma->bufs[order];
630         if (entry->buf_count) {
631                 mutex_unlock(&dev->struct_mutex);
632                 atomic_dec(&dev->buf_alloc);
633                 return -ENOMEM; /* May only call once for each order */
634         }
635
636         if (count < 0 || count > 4096) {
637                 mutex_unlock(&dev->struct_mutex);
638                 atomic_dec(&dev->buf_alloc);
639                 return -EINVAL;
640         }
641
642         entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
643                                    DRM_MEM_BUFS);
644         if (!entry->buflist) {
645                 mutex_unlock(&dev->struct_mutex);
646                 atomic_dec(&dev->buf_alloc);
647                 return -ENOMEM;
648         }
649         memset(entry->buflist, 0, count * sizeof(*entry->buflist));
650
651         entry->buf_size = size;
652         entry->page_order = page_order;
653
654         offset = 0;
655
656         while (entry->buf_count < count) {
657                 buf = &entry->buflist[entry->buf_count];
658                 buf->idx = dma->buf_count + entry->buf_count;
659                 buf->total = alignment;
660                 buf->order = order;
661                 buf->used = 0;
662
663                 buf->offset = (dma->byte_count + offset);
664                 buf->bus_address = agp_offset + offset;
665                 buf->address = (void *)(agp_offset + offset);
666                 buf->next = NULL;
667                 buf->waiting = 0;
668                 buf->pending = 0;
669                 init_waitqueue_head(&buf->dma_wait);
670                 buf->filp = NULL;
671
672                 buf->dev_priv_size = dev->driver->dev_priv_size;
673                 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
674                 if (!buf->dev_private) {
675                         /* Set count correctly so we free the proper amount. */
676                         entry->buf_count = count;
677                         drm_cleanup_buf_error(dev, entry);
678                         mutex_unlock(&dev->struct_mutex);
679                         atomic_dec(&dev->buf_alloc);
680                         return -ENOMEM;
681                 }
682                 memset(buf->dev_private, 0, buf->dev_priv_size);
683
684                 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
685
686                 offset += alignment;
687                 entry->buf_count++;
688                 byte_count += PAGE_SIZE << page_order;
689         }
690
691         DRM_DEBUG("byte_count: %d\n", byte_count);
692
693         temp_buflist = drm_realloc(dma->buflist,
694                                    dma->buf_count * sizeof(*dma->buflist),
695                                    (dma->buf_count + entry->buf_count)
696                                    * sizeof(*dma->buflist), DRM_MEM_BUFS);
697         if (!temp_buflist) {
698                 /* Free the entry because it isn't valid */
699                 drm_cleanup_buf_error(dev, entry);
700                 mutex_unlock(&dev->struct_mutex);
701                 atomic_dec(&dev->buf_alloc);
702                 return -ENOMEM;
703         }
704         dma->buflist = temp_buflist;
705
706         for (i = 0; i < entry->buf_count; i++) {
707                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
708         }
709
710         dma->buf_count += entry->buf_count;
711         dma->seg_count += entry->seg_count;
712         dma->page_count += byte_count >> PAGE_SHIFT;
713         dma->byte_count += byte_count;
714
715         DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
716         DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
717
718         mutex_unlock(&dev->struct_mutex);
719
720         request->count = entry->buf_count;
721         request->size = size;
722
723         dma->flags = _DRM_DMA_USE_AGP;
724
725         atomic_dec(&dev->buf_alloc);
726         return 0;
727 }
728 EXPORT_SYMBOL(drm_addbufs_agp);
729 #endif                          /* __OS_HAS_AGP */
730
731 int drm_addbufs_pci(drm_device_t * dev, drm_buf_desc_t * request)
732 {
733         drm_device_dma_t *dma = dev->dma;
734         int count;
735         int order;
736         int size;
737         int total;
738         int page_order;
739         drm_buf_entry_t *entry;
740         drm_dma_handle_t *dmah;
741         drm_buf_t *buf;
742         int alignment;
743         unsigned long offset;
744         int i;
745         int byte_count;
746         int page_count;
747         unsigned long *temp_pagelist;
748         drm_buf_t **temp_buflist;
749
750         if (!drm_core_check_feature(dev, DRIVER_PCI_DMA))
751                 return -EINVAL;
752
753         if (!dma)
754                 return -EINVAL;
755
756         if (!capable(CAP_SYS_ADMIN))
757                 return -EPERM;
758
759         count = request->count;
760         order = drm_order(request->size);
761         size = 1 << order;
762
763         DRM_DEBUG("count=%d, size=%d (%d), order=%d, queue_count=%d\n",
764                   request->count, request->size, size, order, dev->queue_count);
765
766         if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
767                 return -EINVAL;
768         if (dev->queue_count)
769                 return -EBUSY;  /* Not while in use */
770
771         alignment = (request->flags & _DRM_PAGE_ALIGN)
772             ? PAGE_ALIGN(size) : size;
773         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
774         total = PAGE_SIZE << page_order;
775
776         spin_lock(&dev->count_lock);
777         if (dev->buf_use) {
778                 spin_unlock(&dev->count_lock);
779                 return -EBUSY;
780         }
781         atomic_inc(&dev->buf_alloc);
782         spin_unlock(&dev->count_lock);
783
784         mutex_lock(&dev->struct_mutex);
785         entry = &dma->bufs[order];
786         if (entry->buf_count) {
787                 mutex_unlock(&dev->struct_mutex);
788                 atomic_dec(&dev->buf_alloc);
789                 return -ENOMEM; /* May only call once for each order */
790         }
791
792         if (count < 0 || count > 4096) {
793                 mutex_unlock(&dev->struct_mutex);
794                 atomic_dec(&dev->buf_alloc);
795                 return -EINVAL;
796         }
797
798         entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
799                                    DRM_MEM_BUFS);
800         if (!entry->buflist) {
801                 mutex_unlock(&dev->struct_mutex);
802                 atomic_dec(&dev->buf_alloc);
803                 return -ENOMEM;
804         }
805         memset(entry->buflist, 0, count * sizeof(*entry->buflist));
806
807         entry->seglist = drm_alloc(count * sizeof(*entry->seglist),
808                                    DRM_MEM_SEGS);
809         if (!entry->seglist) {
810                 drm_free(entry->buflist,
811                          count * sizeof(*entry->buflist), DRM_MEM_BUFS);
812                 mutex_unlock(&dev->struct_mutex);
813                 atomic_dec(&dev->buf_alloc);
814                 return -ENOMEM;
815         }
816         memset(entry->seglist, 0, count * sizeof(*entry->seglist));
817
818         /* Keep the original pagelist until we know all the allocations
819          * have succeeded
820          */
821         temp_pagelist = drm_alloc((dma->page_count + (count << page_order))
822                                   * sizeof(*dma->pagelist), DRM_MEM_PAGES);
823         if (!temp_pagelist) {
824                 drm_free(entry->buflist,
825                          count * sizeof(*entry->buflist), DRM_MEM_BUFS);
826                 drm_free(entry->seglist,
827                          count * sizeof(*entry->seglist), DRM_MEM_SEGS);
828                 mutex_unlock(&dev->struct_mutex);
829                 atomic_dec(&dev->buf_alloc);
830                 return -ENOMEM;
831         }
832         memcpy(temp_pagelist,
833                dma->pagelist, dma->page_count * sizeof(*dma->pagelist));
834         DRM_DEBUG("pagelist: %d entries\n",
835                   dma->page_count + (count << page_order));
836
837         entry->buf_size = size;
838         entry->page_order = page_order;
839         byte_count = 0;
840         page_count = 0;
841
842         while (entry->buf_count < count) {
843                 
844                 dmah = drm_pci_alloc(dev, PAGE_SIZE << page_order, 0x1000, 0xfffffffful);
845                 
846                 if (!dmah) {
847                         /* Set count correctly so we free the proper amount. */
848                         entry->buf_count = count;
849                         entry->seg_count = count;
850                         drm_cleanup_buf_error(dev, entry);
851                         drm_free(temp_pagelist,
852                                  (dma->page_count + (count << page_order))
853                                  * sizeof(*dma->pagelist), DRM_MEM_PAGES);
854                         mutex_unlock(&dev->struct_mutex);
855                         atomic_dec(&dev->buf_alloc);
856                         return -ENOMEM;
857                 }
858                 entry->seglist[entry->seg_count++] = dmah;
859                 for (i = 0; i < (1 << page_order); i++) {
860                         DRM_DEBUG("page %d @ 0x%08lx\n",
861                                   dma->page_count + page_count,
862                                   (unsigned long)dmah->vaddr + PAGE_SIZE * i);
863                         temp_pagelist[dma->page_count + page_count++]
864                                 = (unsigned long)dmah->vaddr + PAGE_SIZE * i;
865                 }
866                 for (offset = 0;
867                      offset + size <= total && entry->buf_count < count;
868                      offset += alignment, ++entry->buf_count) {
869                         buf = &entry->buflist[entry->buf_count];
870                         buf->idx = dma->buf_count + entry->buf_count;
871                         buf->total = alignment;
872                         buf->order = order;
873                         buf->used = 0;
874                         buf->offset = (dma->byte_count + byte_count + offset);
875                         buf->address = (void *)(dmah->vaddr + offset);
876                         buf->bus_address = dmah->busaddr + offset;
877                         buf->next = NULL;
878                         buf->waiting = 0;
879                         buf->pending = 0;
880                         init_waitqueue_head(&buf->dma_wait);
881                         buf->filp = NULL;
882
883                         buf->dev_priv_size = dev->driver->dev_priv_size;
884                         buf->dev_private = drm_alloc(buf->dev_priv_size,
885                                                      DRM_MEM_BUFS);
886                         if (!buf->dev_private) {
887                                 /* Set count correctly so we free the proper amount. */
888                                 entry->buf_count = count;
889                                 entry->seg_count = count;
890                                 drm_cleanup_buf_error(dev, entry);
891                                 drm_free(temp_pagelist,
892                                          (dma->page_count +
893                                           (count << page_order))
894                                          * sizeof(*dma->pagelist),
895                                          DRM_MEM_PAGES);
896                                 mutex_unlock(&dev->struct_mutex);
897                                 atomic_dec(&dev->buf_alloc);
898                                 return -ENOMEM;
899                         }
900                         memset(buf->dev_private, 0, buf->dev_priv_size);
901
902                         DRM_DEBUG("buffer %d @ %p\n",
903                                   entry->buf_count, buf->address);
904                 }
905                 byte_count += PAGE_SIZE << page_order;
906         }
907
908         temp_buflist = drm_realloc(dma->buflist,
909                                    dma->buf_count * sizeof(*dma->buflist),
910                                    (dma->buf_count + entry->buf_count)
911                                    * sizeof(*dma->buflist), DRM_MEM_BUFS);
912         if (!temp_buflist) {
913                 /* Free the entry because it isn't valid */
914                 drm_cleanup_buf_error(dev, entry);
915                 drm_free(temp_pagelist,
916                          (dma->page_count + (count << page_order))
917                          * sizeof(*dma->pagelist), DRM_MEM_PAGES);
918                 mutex_unlock(&dev->struct_mutex);
919                 atomic_dec(&dev->buf_alloc);
920                 return -ENOMEM;
921         }
922         dma->buflist = temp_buflist;
923
924         for (i = 0; i < entry->buf_count; i++) {
925                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
926         }
927
928         /* No allocations failed, so now we can replace the orginal pagelist
929          * with the new one.
930          */
931         if (dma->page_count) {
932                 drm_free(dma->pagelist,
933                          dma->page_count * sizeof(*dma->pagelist),
934                          DRM_MEM_PAGES);
935         }
936         dma->pagelist = temp_pagelist;
937
938         dma->buf_count += entry->buf_count;
939         dma->seg_count += entry->seg_count;
940         dma->page_count += entry->seg_count << page_order;
941         dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
942
943         mutex_unlock(&dev->struct_mutex);
944
945         request->count = entry->buf_count;
946         request->size = size;
947
948         if (request->flags & _DRM_PCI_BUFFER_RO)
949                 dma->flags = _DRM_DMA_USE_PCI_RO;
950
951         atomic_dec(&dev->buf_alloc);
952         return 0;
953
954 }
955 EXPORT_SYMBOL(drm_addbufs_pci);
956
957 static int drm_addbufs_sg(drm_device_t * dev, drm_buf_desc_t * request)
958 {
959         drm_device_dma_t *dma = dev->dma;
960         drm_buf_entry_t *entry;
961         drm_buf_t *buf;
962         unsigned long offset;
963         unsigned long agp_offset;
964         int count;
965         int order;
966         int size;
967         int alignment;
968         int page_order;
969         int total;
970         int byte_count;
971         int i;
972         drm_buf_t **temp_buflist;
973
974         if (!drm_core_check_feature(dev, DRIVER_SG))
975                 return -EINVAL;
976
977         if (!dma)
978                 return -EINVAL;
979
980         if (!capable(CAP_SYS_ADMIN))
981                 return -EPERM;
982
983         count = request->count;
984         order = drm_order(request->size);
985         size = 1 << order;
986
987         alignment = (request->flags & _DRM_PAGE_ALIGN)
988             ? PAGE_ALIGN(size) : size;
989         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
990         total = PAGE_SIZE << page_order;
991
992         byte_count = 0;
993         agp_offset = request->agp_start;
994
995         DRM_DEBUG("count:      %d\n", count);
996         DRM_DEBUG("order:      %d\n", order);
997         DRM_DEBUG("size:       %d\n", size);
998         DRM_DEBUG("agp_offset: %lu\n", agp_offset);
999         DRM_DEBUG("alignment:  %d\n", alignment);
1000         DRM_DEBUG("page_order: %d\n", page_order);
1001         DRM_DEBUG("total:      %d\n", total);
1002
1003         if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1004                 return -EINVAL;
1005         if (dev->queue_count)
1006                 return -EBUSY;  /* Not while in use */
1007
1008         spin_lock(&dev->count_lock);
1009         if (dev->buf_use) {
1010                 spin_unlock(&dev->count_lock);
1011                 return -EBUSY;
1012         }
1013         atomic_inc(&dev->buf_alloc);
1014         spin_unlock(&dev->count_lock);
1015
1016         mutex_lock(&dev->struct_mutex);
1017         entry = &dma->bufs[order];
1018         if (entry->buf_count) {
1019                 mutex_unlock(&dev->struct_mutex);
1020                 atomic_dec(&dev->buf_alloc);
1021                 return -ENOMEM; /* May only call once for each order */
1022         }
1023
1024         if (count < 0 || count > 4096) {
1025                 mutex_unlock(&dev->struct_mutex);
1026                 atomic_dec(&dev->buf_alloc);
1027                 return -EINVAL;
1028         }
1029
1030         entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
1031                                    DRM_MEM_BUFS);
1032         if (!entry->buflist) {
1033                 mutex_unlock(&dev->struct_mutex);
1034                 atomic_dec(&dev->buf_alloc);
1035                 return -ENOMEM;
1036         }
1037         memset(entry->buflist, 0, count * sizeof(*entry->buflist));
1038
1039         entry->buf_size = size;
1040         entry->page_order = page_order;
1041
1042         offset = 0;
1043
1044         while (entry->buf_count < count) {
1045                 buf = &entry->buflist[entry->buf_count];
1046                 buf->idx = dma->buf_count + entry->buf_count;
1047                 buf->total = alignment;
1048                 buf->order = order;
1049                 buf->used = 0;
1050
1051                 buf->offset = (dma->byte_count + offset);
1052                 buf->bus_address = agp_offset + offset;
1053                 buf->address = (void *)(agp_offset + offset
1054                                         + (unsigned long)dev->sg->virtual);
1055                 buf->next = NULL;
1056                 buf->waiting = 0;
1057                 buf->pending = 0;
1058                 init_waitqueue_head(&buf->dma_wait);
1059                 buf->filp = NULL;
1060
1061                 buf->dev_priv_size = dev->driver->dev_priv_size;
1062                 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
1063                 if (!buf->dev_private) {
1064                         /* Set count correctly so we free the proper amount. */
1065                         entry->buf_count = count;
1066                         drm_cleanup_buf_error(dev, entry);
1067                         mutex_unlock(&dev->struct_mutex);
1068                         atomic_dec(&dev->buf_alloc);
1069                         return -ENOMEM;
1070                 }
1071
1072                 memset(buf->dev_private, 0, buf->dev_priv_size);
1073
1074                 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1075
1076                 offset += alignment;
1077                 entry->buf_count++;
1078                 byte_count += PAGE_SIZE << page_order;
1079         }
1080
1081         DRM_DEBUG("byte_count: %d\n", byte_count);
1082
1083         temp_buflist = drm_realloc(dma->buflist,
1084                                    dma->buf_count * sizeof(*dma->buflist),
1085                                    (dma->buf_count + entry->buf_count)
1086                                    * sizeof(*dma->buflist), DRM_MEM_BUFS);
1087         if (!temp_buflist) {
1088                 /* Free the entry because it isn't valid */
1089                 drm_cleanup_buf_error(dev, entry);
1090                 mutex_unlock(&dev->struct_mutex);
1091                 atomic_dec(&dev->buf_alloc);
1092                 return -ENOMEM;
1093         }
1094         dma->buflist = temp_buflist;
1095
1096         for (i = 0; i < entry->buf_count; i++) {
1097                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1098         }
1099
1100         dma->buf_count += entry->buf_count;
1101         dma->seg_count += entry->seg_count;
1102         dma->page_count += byte_count >> PAGE_SHIFT;
1103         dma->byte_count += byte_count;
1104
1105         DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1106         DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1107
1108         mutex_unlock(&dev->struct_mutex);
1109
1110         request->count = entry->buf_count;
1111         request->size = size;
1112
1113         dma->flags = _DRM_DMA_USE_SG;
1114
1115         atomic_dec(&dev->buf_alloc);
1116         return 0;
1117 }
1118
1119 static int drm_addbufs_fb(drm_device_t * dev, drm_buf_desc_t * request)
1120 {
1121         drm_device_dma_t *dma = dev->dma;
1122         drm_buf_entry_t *entry;
1123         drm_buf_t *buf;
1124         unsigned long offset;
1125         unsigned long agp_offset;
1126         int count;
1127         int order;
1128         int size;
1129         int alignment;
1130         int page_order;
1131         int total;
1132         int byte_count;
1133         int i;
1134         drm_buf_t **temp_buflist;
1135
1136         if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
1137                 return -EINVAL;
1138
1139         if (!dma)
1140                 return -EINVAL;
1141
1142         if (!capable(CAP_SYS_ADMIN))
1143                 return -EPERM;
1144
1145         count = request->count;
1146         order = drm_order(request->size);
1147         size = 1 << order;
1148
1149         alignment = (request->flags & _DRM_PAGE_ALIGN)
1150             ? PAGE_ALIGN(size) : size;
1151         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1152         total = PAGE_SIZE << page_order;
1153
1154         byte_count = 0;
1155         agp_offset = request->agp_start;
1156
1157         DRM_DEBUG("count:      %d\n", count);
1158         DRM_DEBUG("order:      %d\n", order);
1159         DRM_DEBUG("size:       %d\n", size);
1160         DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1161         DRM_DEBUG("alignment:  %d\n", alignment);
1162         DRM_DEBUG("page_order: %d\n", page_order);
1163         DRM_DEBUG("total:      %d\n", total);
1164
1165         if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1166                 return -EINVAL;
1167         if (dev->queue_count)
1168                 return -EBUSY;  /* Not while in use */
1169
1170         spin_lock(&dev->count_lock);
1171         if (dev->buf_use) {
1172                 spin_unlock(&dev->count_lock);
1173                 return -EBUSY;
1174         }
1175         atomic_inc(&dev->buf_alloc);
1176         spin_unlock(&dev->count_lock);
1177
1178         mutex_lock(&dev->struct_mutex);
1179         entry = &dma->bufs[order];
1180         if (entry->buf_count) {
1181                 mutex_unlock(&dev->struct_mutex);
1182                 atomic_dec(&dev->buf_alloc);
1183                 return -ENOMEM; /* May only call once for each order */
1184         }
1185
1186         if (count < 0 || count > 4096) {
1187                 mutex_unlock(&dev->struct_mutex);
1188                 atomic_dec(&dev->buf_alloc);
1189                 return -EINVAL;
1190         }
1191
1192         entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
1193                                    DRM_MEM_BUFS);
1194         if (!entry->buflist) {
1195                 mutex_unlock(&dev->struct_mutex);
1196                 atomic_dec(&dev->buf_alloc);
1197                 return -ENOMEM;
1198         }
1199         memset(entry->buflist, 0, count * sizeof(*entry->buflist));
1200
1201         entry->buf_size = size;
1202         entry->page_order = page_order;
1203
1204         offset = 0;
1205
1206         while (entry->buf_count < count) {
1207                 buf = &entry->buflist[entry->buf_count];
1208                 buf->idx = dma->buf_count + entry->buf_count;
1209                 buf->total = alignment;
1210                 buf->order = order;
1211                 buf->used = 0;
1212
1213                 buf->offset = (dma->byte_count + offset);
1214                 buf->bus_address = agp_offset + offset;
1215                 buf->address = (void *)(agp_offset + offset);
1216                 buf->next = NULL;
1217                 buf->waiting = 0;
1218                 buf->pending = 0;
1219                 init_waitqueue_head(&buf->dma_wait);
1220                 buf->filp = NULL;
1221
1222                 buf->dev_priv_size = dev->driver->dev_priv_size;
1223                 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
1224                 if (!buf->dev_private) {
1225                         /* Set count correctly so we free the proper amount. */
1226                         entry->buf_count = count;
1227                         drm_cleanup_buf_error(dev, entry);
1228                         mutex_unlock(&dev->struct_mutex);
1229                         atomic_dec(&dev->buf_alloc);
1230                         return -ENOMEM;
1231                 }
1232                 memset(buf->dev_private, 0, buf->dev_priv_size);
1233
1234                 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1235
1236                 offset += alignment;
1237                 entry->buf_count++;
1238                 byte_count += PAGE_SIZE << page_order;
1239         }
1240
1241         DRM_DEBUG("byte_count: %d\n", byte_count);
1242
1243         temp_buflist = drm_realloc(dma->buflist,
1244                                    dma->buf_count * sizeof(*dma->buflist),
1245                                    (dma->buf_count + entry->buf_count)
1246                                    * sizeof(*dma->buflist), DRM_MEM_BUFS);
1247         if (!temp_buflist) {
1248                 /* Free the entry because it isn't valid */
1249                 drm_cleanup_buf_error(dev, entry);
1250                 mutex_unlock(&dev->struct_mutex);
1251                 atomic_dec(&dev->buf_alloc);
1252                 return -ENOMEM;
1253         }
1254         dma->buflist = temp_buflist;
1255
1256         for (i = 0; i < entry->buf_count; i++) {
1257                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1258         }
1259
1260         dma->buf_count += entry->buf_count;
1261         dma->seg_count += entry->seg_count;
1262         dma->page_count += byte_count >> PAGE_SHIFT;
1263         dma->byte_count += byte_count;
1264
1265         DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1266         DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1267
1268         mutex_unlock(&dev->struct_mutex);
1269
1270         request->count = entry->buf_count;
1271         request->size = size;
1272
1273         dma->flags = _DRM_DMA_USE_FB;
1274
1275         atomic_dec(&dev->buf_alloc);
1276         return 0;
1277 }
1278
1279
1280 /**
1281  * Add buffers for DMA transfers (ioctl).
1282  *
1283  * \param inode device inode.
1284  * \param filp file pointer.
1285  * \param cmd command.
1286  * \param arg pointer to a drm_buf_desc_t request.
1287  * \return zero on success or a negative number on failure.
1288  *
1289  * According with the memory type specified in drm_buf_desc::flags and the
1290  * build options, it dispatches the call either to addbufs_agp(),
1291  * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1292  * PCI memory respectively.
1293  */
1294 int drm_addbufs(struct inode *inode, struct file *filp,
1295                 unsigned int cmd, unsigned long arg)
1296 {
1297         drm_buf_desc_t request;
1298         drm_file_t *priv = filp->private_data;
1299         drm_device_t *dev = priv->head->dev;
1300         int ret;
1301
1302         if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1303                 return -EINVAL;
1304
1305         if (copy_from_user(&request, (drm_buf_desc_t __user *) arg,
1306                            sizeof(request)))
1307                 return -EFAULT;
1308
1309 #if __OS_HAS_AGP
1310         if (request.flags & _DRM_AGP_BUFFER)
1311                 ret = drm_addbufs_agp(dev, &request);
1312         else
1313 #endif
1314         if (request.flags & _DRM_SG_BUFFER)
1315                 ret = drm_addbufs_sg(dev, &request);
1316         else if (request.flags & _DRM_FB_BUFFER)
1317                 ret = drm_addbufs_fb(dev, &request);
1318         else
1319                 ret = drm_addbufs_pci(dev, &request);
1320
1321         if (ret == 0) {
1322                 if (copy_to_user((void __user *)arg, &request, sizeof(request))) {
1323                         ret = -EFAULT;
1324                 }
1325         }
1326         return ret;
1327 }
1328
1329 /**
1330  * Get information about the buffer mappings.
1331  *
1332  * This was originally mean for debugging purposes, or by a sophisticated
1333  * client library to determine how best to use the available buffers (e.g.,
1334  * large buffers can be used for image transfer).
1335  *
1336  * \param inode device inode.
1337  * \param filp file pointer.
1338  * \param cmd command.
1339  * \param arg pointer to a drm_buf_info structure.
1340  * \return zero on success or a negative number on failure.
1341  *
1342  * Increments drm_device::buf_use while holding the drm_device::count_lock
1343  * lock, preventing of allocating more buffers after this call. Information
1344  * about each requested buffer is then copied into user space.
1345  */
1346 int drm_infobufs(struct inode *inode, struct file *filp,
1347                  unsigned int cmd, unsigned long arg)
1348 {
1349         drm_file_t *priv = filp->private_data;
1350         drm_device_t *dev = priv->head->dev;
1351         drm_device_dma_t *dma = dev->dma;
1352         drm_buf_info_t request;
1353         drm_buf_info_t __user *argp = (void __user *)arg;
1354         int i;
1355         int count;
1356
1357         if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1358                 return -EINVAL;
1359
1360         if (!dma)
1361                 return -EINVAL;
1362
1363         spin_lock(&dev->count_lock);
1364         if (atomic_read(&dev->buf_alloc)) {
1365                 spin_unlock(&dev->count_lock);
1366                 return -EBUSY;
1367         }
1368         ++dev->buf_use;         /* Can't allocate more after this call */
1369         spin_unlock(&dev->count_lock);
1370
1371         if (copy_from_user(&request, argp, sizeof(request)))
1372                 return -EFAULT;
1373
1374         for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1375                 if (dma->bufs[i].buf_count)
1376                         ++count;
1377         }
1378
1379         DRM_DEBUG("count = %d\n", count);
1380
1381         if (request.count >= count) {
1382                 for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {
1383                         if (dma->bufs[i].buf_count) {
1384                                 drm_buf_desc_t __user *to =
1385                                     &request.list[count];
1386                                 drm_buf_entry_t *from = &dma->bufs[i];
1387                                 drm_freelist_t *list = &dma->bufs[i].freelist;
1388                                 if (copy_to_user(&to->count,
1389                                                  &from->buf_count,
1390                                                  sizeof(from->buf_count)) ||
1391                                     copy_to_user(&to->size,
1392                                                  &from->buf_size,
1393                                                  sizeof(from->buf_size)) ||
1394                                     copy_to_user(&to->low_mark,
1395                                                  &list->low_mark,
1396                                                  sizeof(list->low_mark)) ||
1397                                     copy_to_user(&to->high_mark,
1398                                                  &list->high_mark,
1399                                                  sizeof(list->high_mark)))
1400                                         return -EFAULT;
1401
1402                                 DRM_DEBUG("%d %d %d %d %d\n",
1403                                           i,
1404                                           dma->bufs[i].buf_count,
1405                                           dma->bufs[i].buf_size,
1406                                           dma->bufs[i].freelist.low_mark,
1407                                           dma->bufs[i].freelist.high_mark);
1408                                 ++count;
1409                         }
1410                 }
1411         }
1412         request.count = count;
1413
1414         if (copy_to_user(argp, &request, sizeof(request)))
1415                 return -EFAULT;
1416
1417         return 0;
1418 }
1419
1420 /**
1421  * Specifies a low and high water mark for buffer allocation
1422  *
1423  * \param inode device inode.
1424  * \param filp file pointer.
1425  * \param cmd command.
1426  * \param arg a pointer to a drm_buf_desc structure.
1427  * \return zero on success or a negative number on failure.
1428  *
1429  * Verifies that the size order is bounded between the admissible orders and
1430  * updates the respective drm_device_dma::bufs entry low and high water mark.
1431  *
1432  * \note This ioctl is deprecated and mostly never used.
1433  */
1434 int drm_markbufs(struct inode *inode, struct file *filp,
1435                  unsigned int cmd, unsigned long arg)
1436 {
1437         drm_file_t *priv = filp->private_data;
1438         drm_device_t *dev = priv->head->dev;
1439         drm_device_dma_t *dma = dev->dma;
1440         drm_buf_desc_t request;
1441         int order;
1442         drm_buf_entry_t *entry;
1443
1444         if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1445                 return -EINVAL;
1446
1447         if (!dma)
1448                 return -EINVAL;
1449
1450         if (copy_from_user(&request,
1451                            (drm_buf_desc_t __user *) arg, sizeof(request)))
1452                 return -EFAULT;
1453
1454         DRM_DEBUG("%d, %d, %d\n",
1455                   request.size, request.low_mark, request.high_mark);
1456         order = drm_order(request.size);
1457         if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1458                 return -EINVAL;
1459         entry = &dma->bufs[order];
1460
1461         if (request.low_mark < 0 || request.low_mark > entry->buf_count)
1462                 return -EINVAL;
1463         if (request.high_mark < 0 || request.high_mark > entry->buf_count)
1464                 return -EINVAL;
1465
1466         entry->freelist.low_mark = request.low_mark;
1467         entry->freelist.high_mark = request.high_mark;
1468
1469         return 0;
1470 }
1471
1472 /**
1473  * Unreserve the buffers in list, previously reserved using drmDMA.
1474  *
1475  * \param inode device inode.
1476  * \param filp file pointer.
1477  * \param cmd command.
1478  * \param arg pointer to a drm_buf_free structure.
1479  * \return zero on success or a negative number on failure.
1480  *
1481  * Calls free_buffer() for each used buffer.
1482  * This function is primarily used for debugging.
1483  */
1484 int drm_freebufs(struct inode *inode, struct file *filp,
1485                  unsigned int cmd, unsigned long arg)
1486 {
1487         drm_file_t *priv = filp->private_data;
1488         drm_device_t *dev = priv->head->dev;
1489         drm_device_dma_t *dma = dev->dma;
1490         drm_buf_free_t request;
1491         int i;
1492         int idx;
1493         drm_buf_t *buf;
1494
1495         if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1496                 return -EINVAL;
1497
1498         if (!dma)
1499                 return -EINVAL;
1500
1501         if (copy_from_user(&request,
1502                            (drm_buf_free_t __user *) arg, sizeof(request)))
1503                 return -EFAULT;
1504
1505         DRM_DEBUG("%d\n", request.count);
1506         for (i = 0; i < request.count; i++) {
1507                 if (copy_from_user(&idx, &request.list[i], sizeof(idx)))
1508                         return -EFAULT;
1509                 if (idx < 0 || idx >= dma->buf_count) {
1510                         DRM_ERROR("Index %d (of %d max)\n",
1511                                   idx, dma->buf_count - 1);
1512                         return -EINVAL;
1513                 }
1514                 buf = dma->buflist[idx];
1515                 if (buf->filp != filp) {
1516                         DRM_ERROR("Process %d freeing buffer not owned\n",
1517                                   current->pid);
1518                         return -EINVAL;
1519                 }
1520                 drm_free_buffer(dev, buf);
1521         }
1522
1523         return 0;
1524 }
1525
1526 /**
1527  * Maps all of the DMA buffers into client-virtual space (ioctl).
1528  *
1529  * \param inode device inode.
1530  * \param filp file pointer.
1531  * \param cmd command.
1532  * \param arg pointer to a drm_buf_map structure.
1533  * \return zero on success or a negative number on failure.
1534  *
1535  * Maps the AGP, SG or PCI buffer region with do_mmap(), and copies information
1536  * about each buffer into user space. For PCI buffers, it calls do_mmap() with
1537  * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
1538  * drm_mmap_dma().
1539  */
1540 int drm_mapbufs(struct inode *inode, struct file *filp,
1541                 unsigned int cmd, unsigned long arg)
1542 {
1543         drm_file_t *priv = filp->private_data;
1544         drm_device_t *dev = priv->head->dev;
1545         drm_device_dma_t *dma = dev->dma;
1546         drm_buf_map_t __user *argp = (void __user *)arg;
1547         int retcode = 0;
1548         const int zero = 0;
1549         unsigned long virtual;
1550         unsigned long address;
1551         drm_buf_map_t request;
1552         int i;
1553
1554         if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1555                 return -EINVAL;
1556
1557         if (!dma)
1558                 return -EINVAL;
1559
1560         spin_lock(&dev->count_lock);
1561         if (atomic_read(&dev->buf_alloc)) {
1562                 spin_unlock(&dev->count_lock);
1563                 return -EBUSY;
1564         }
1565         dev->buf_use++;         /* Can't allocate more after this call */
1566         spin_unlock(&dev->count_lock);
1567
1568         if (copy_from_user(&request, argp, sizeof(request)))
1569                 return -EFAULT;
1570
1571         if (request.count >= dma->buf_count) {
1572                 if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
1573                     || (drm_core_check_feature(dev, DRIVER_SG)
1574                         && (dma->flags & _DRM_DMA_USE_SG))
1575                     || (drm_core_check_feature(dev, DRIVER_FB_DMA)
1576                         && (dma->flags & _DRM_DMA_USE_FB))) {
1577                         drm_map_t *map = dev->agp_buffer_map;
1578                         unsigned long token = dev->agp_buffer_token;
1579
1580                         if (!map) {
1581                                 retcode = -EINVAL;
1582                                 goto done;
1583                         }
1584
1585                         down_write(&current->mm->mmap_sem);
1586                         virtual = do_mmap(filp, 0, map->size,
1587                                           PROT_READ | PROT_WRITE,
1588                                           MAP_SHARED, token);
1589                         up_write(&current->mm->mmap_sem);
1590                 } else {
1591                         down_write(&current->mm->mmap_sem);
1592                         virtual = do_mmap(filp, 0, dma->byte_count,
1593                                           PROT_READ | PROT_WRITE,
1594                                           MAP_SHARED, 0);
1595                         up_write(&current->mm->mmap_sem);
1596                 }
1597                 if (virtual > -1024UL) {
1598                         /* Real error */
1599                         retcode = (signed long)virtual;
1600                         goto done;
1601                 }
1602                 request.virtual = (void __user *)virtual;
1603
1604                 for (i = 0; i < dma->buf_count; i++) {
1605                         if (copy_to_user(&request.list[i].idx,
1606                                          &dma->buflist[i]->idx,
1607                                          sizeof(request.list[0].idx))) {
1608                                 retcode = -EFAULT;
1609                                 goto done;
1610                         }
1611                         if (copy_to_user(&request.list[i].total,
1612                                          &dma->buflist[i]->total,
1613                                          sizeof(request.list[0].total))) {
1614                                 retcode = -EFAULT;
1615                                 goto done;
1616                         }
1617                         if (copy_to_user(&request.list[i].used,
1618                                          &zero, sizeof(zero))) {
1619                                 retcode = -EFAULT;
1620                                 goto done;
1621                         }
1622                         address = virtual + dma->buflist[i]->offset;    /* *** */
1623                         if (copy_to_user(&request.list[i].address,
1624                                          &address, sizeof(address))) {
1625                                 retcode = -EFAULT;
1626                                 goto done;
1627                         }
1628                 }
1629         }
1630       done:
1631         request.count = dma->buf_count;
1632         DRM_DEBUG("%d buffers, retcode = %d\n", request.count, retcode);
1633
1634         if (copy_to_user(argp, &request, sizeof(request)))
1635                 return -EFAULT;
1636
1637         return retcode;
1638 }
1639
1640 /**
1641  * Compute size order.  Returns the exponent of the smaller power of two which
1642  * is greater or equal to given number.
1643  *
1644  * \param size size.
1645  * \return order.
1646  *
1647  * \todo Can be made faster.
1648  */
1649 int drm_order(unsigned long size)
1650 {
1651         int order;
1652         unsigned long tmp;
1653
1654         for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++) ;
1655
1656         if (size & (size - 1))
1657                 ++order;
1658
1659         return order;
1660 }
1661 EXPORT_SYMBOL(drm_order);
1662
1663