Merge branch 'for-linus' from kernel.org:/.../shaggy/jfs-2.6 manually
[sfrench/cifs-2.6.git] / drivers / char / drm / drm_bufs.c
1 /**
2  * \file drm_bufs.h 
3  * Generic buffer template
4  * 
5  * \author Rickard E. (Rik) Faith <faith@valinux.com>
6  * \author Gareth Hughes <gareth@valinux.com>
7  */
8
9 /*
10  * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
11  *
12  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14  * All Rights Reserved.
15  *
16  * Permission is hereby granted, free of charge, to any person obtaining a
17  * copy of this software and associated documentation files (the "Software"),
18  * to deal in the Software without restriction, including without limitation
19  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20  * and/or sell copies of the Software, and to permit persons to whom the
21  * Software is furnished to do so, subject to the following conditions:
22  *
23  * The above copyright notice and this permission notice (including the next
24  * paragraph) shall be included in all copies or substantial portions of the
25  * Software.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
30  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33  * OTHER DEALINGS IN THE SOFTWARE.
34  */
35
36 #include <linux/vmalloc.h>
37 #include "drmP.h"
38
39 unsigned long drm_get_resource_start(drm_device_t *dev, unsigned int resource)
40 {
41         return pci_resource_start(dev->pdev, resource);
42 }
43 EXPORT_SYMBOL(drm_get_resource_start);
44
45 unsigned long drm_get_resource_len(drm_device_t *dev, unsigned int resource)
46 {
47         return pci_resource_len(dev->pdev, resource);
48 }
49 EXPORT_SYMBOL(drm_get_resource_len);
50
51 static drm_map_list_t *drm_find_matching_map(drm_device_t *dev,
52                                              drm_local_map_t *map)
53 {
54         struct list_head *list;
55
56         list_for_each(list, &dev->maplist->head) {
57                 drm_map_list_t *entry = list_entry(list, drm_map_list_t, head);
58                 if (entry->map && map->type == entry->map->type &&
59                     entry->map->offset == map->offset) {
60                         return entry;
61                 }
62         }
63
64         return NULL;
65 }
66
67 /*
68  * Used to allocate 32-bit handles for mappings.
69  */
70 #define START_RANGE 0x10000000
71 #define END_RANGE 0x40000000
72
73 #ifdef _LP64
74 static __inline__ unsigned int HandleID(unsigned long lhandle, drm_device_t *dev) 
75 {
76         static unsigned int map32_handle = START_RANGE;
77         unsigned int hash;
78
79         if (lhandle & 0xffffffff00000000) {
80                 hash = map32_handle;
81                 map32_handle += PAGE_SIZE;
82                 if (map32_handle > END_RANGE)
83                         map32_handle = START_RANGE;
84         } else 
85                 hash = lhandle;
86
87         while (1) {
88                 drm_map_list_t *_entry;
89                 list_for_each_entry(_entry, &dev->maplist->head,head) {
90                         if (_entry->user_token == hash)
91                                 break;
92                 }
93                 if (&_entry->head == &dev->maplist->head)
94                         return hash;
95
96                 hash += PAGE_SIZE;
97                 map32_handle += PAGE_SIZE;
98         }
99 }
100 #else
101 # define HandleID(x,dev) (unsigned int)(x)
102 #endif
103
104 /**
105  * Ioctl to specify a range of memory that is available for mapping by a non-root process.
106  *
107  * \param inode device inode.
108  * \param filp file pointer.
109  * \param cmd command.
110  * \param arg pointer to a drm_map structure.
111  * \return zero on success or a negative value on error.
112  *
113  * Adjusts the memory offset to its absolute value according to the mapping
114  * type.  Adds the map to the map list drm_device::maplist. Adds MTRR's where
115  * applicable and if supported by the kernel.
116  */
117 int drm_addmap_core(drm_device_t * dev, unsigned int offset,
118                     unsigned int size, drm_map_type_t type,
119                     drm_map_flags_t flags, drm_map_list_t **maplist)
120 {
121         drm_map_t *map;
122         drm_map_list_t *list;
123         drm_dma_handle_t *dmah;
124
125         map = drm_alloc( sizeof(*map), DRM_MEM_MAPS );
126         if ( !map )
127                 return -ENOMEM;
128
129         map->offset = offset;
130         map->size = size;
131         map->flags = flags;
132         map->type = type;
133
134         /* Only allow shared memory to be removable since we only keep enough
135          * book keeping information about shared memory to allow for removal
136          * when processes fork.
137          */
138         if ( (map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM ) {
139                 drm_free( map, sizeof(*map), DRM_MEM_MAPS );
140                 return -EINVAL;
141         }
142         DRM_DEBUG( "offset = 0x%08lx, size = 0x%08lx, type = %d\n",
143                    map->offset, map->size, map->type );
144         if ( (map->offset & (~PAGE_MASK)) || (map->size & (~PAGE_MASK)) ) {
145                 drm_free( map, sizeof(*map), DRM_MEM_MAPS );
146                 return -EINVAL;
147         }
148         map->mtrr   = -1;
149         map->handle = NULL;
150
151         switch ( map->type ) {
152         case _DRM_REGISTERS:
153         case _DRM_FRAME_BUFFER:
154 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__)
155                 if ( map->offset + map->size < map->offset ||
156                      map->offset < virt_to_phys(high_memory) ) {
157                         drm_free( map, sizeof(*map), DRM_MEM_MAPS );
158                         return -EINVAL;
159                 }
160 #endif
161 #ifdef __alpha__
162                 map->offset += dev->hose->mem_space->start;
163 #endif
164                 /* Some drivers preinitialize some maps, without the X Server
165                  * needing to be aware of it.  Therefore, we just return success
166                  * when the server tries to create a duplicate map.
167                  */
168                 list = drm_find_matching_map(dev, map);
169                 if (list != NULL) {
170                         if (list->map->size != map->size) {
171                                 DRM_DEBUG("Matching maps of type %d with "
172                                    "mismatched sizes, (%ld vs %ld)\n",
173                                     map->type, map->size, list->map->size);
174                                 list->map->size = map->size;
175                         }
176
177                         drm_free(map, sizeof(*map), DRM_MEM_MAPS);
178                         *maplist = list;
179                         return 0;
180                 }
181
182                 if (drm_core_has_MTRR(dev)) {
183                         if ( map->type == _DRM_FRAME_BUFFER ||
184                              (map->flags & _DRM_WRITE_COMBINING) ) {
185                                 map->mtrr = mtrr_add( map->offset, map->size,
186                                                       MTRR_TYPE_WRCOMB, 1 );
187                         }
188                 }
189                 if (map->type == _DRM_REGISTERS)
190                         map->handle = drm_ioremap( map->offset, map->size,
191                                                     dev );
192                 break;
193
194         case _DRM_SHM:
195                 map->handle = vmalloc_32(map->size);
196                 DRM_DEBUG( "%lu %d %p\n",
197                            map->size, drm_order( map->size ), map->handle );
198                 if ( !map->handle ) {
199                         drm_free( map, sizeof(*map), DRM_MEM_MAPS );
200                         return -ENOMEM;
201                 }
202                 map->offset = (unsigned long)map->handle;
203                 if ( map->flags & _DRM_CONTAINS_LOCK ) {
204                         /* Prevent a 2nd X Server from creating a 2nd lock */
205                         if (dev->lock.hw_lock != NULL) {
206                                 vfree( map->handle );
207                                 drm_free( map, sizeof(*map), DRM_MEM_MAPS );
208                                 return -EBUSY;
209                         }
210                         dev->sigdata.lock =
211                         dev->lock.hw_lock = map->handle; /* Pointer to lock */
212                 }
213                 break;
214         case _DRM_AGP:
215                 if (drm_core_has_AGP(dev)) {
216 #ifdef __alpha__
217                         map->offset += dev->hose->mem_space->start;
218 #endif
219                         map->offset += dev->agp->base;
220                         map->mtrr   = dev->agp->agp_mtrr; /* for getmap */
221                 }
222                 break;
223         case _DRM_SCATTER_GATHER:
224                 if (!dev->sg) {
225                         drm_free(map, sizeof(*map), DRM_MEM_MAPS);
226                         return -EINVAL;
227                 }
228                 map->offset += (unsigned long)dev->sg->virtual;
229                 break;
230         case _DRM_CONSISTENT: 
231                 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
232                  * As we're limiting the address to 2^32-1 (or less),
233                  * casting it down to 32 bits is no problem, but we
234                  * need to point to a 64bit variable first. */
235                 dmah = drm_pci_alloc(dev, map->size, map->size, 0xffffffffUL);
236                 if (!dmah) {
237                         drm_free(map, sizeof(*map), DRM_MEM_MAPS);
238                         return -ENOMEM;
239                 }
240                 map->handle = dmah->vaddr;
241                 map->offset = (unsigned long)dmah->busaddr;
242                 kfree(dmah);
243                 break;
244         default:
245                 drm_free( map, sizeof(*map), DRM_MEM_MAPS );
246                 return -EINVAL;
247         }
248
249         list = drm_alloc(sizeof(*list), DRM_MEM_MAPS);
250         if(!list) {
251                 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
252                 return -EINVAL;
253         }
254         memset(list, 0, sizeof(*list));
255         list->map = map;
256
257         down(&dev->struct_sem);
258         list_add(&list->head, &dev->maplist->head);
259         /* Assign a 32-bit handle */
260         /* We do it here so that dev->struct_sem protects the increment */
261         list->user_token = HandleID(map->type==_DRM_SHM
262                                     ? (unsigned long)map->handle
263                                     : map->offset, dev);
264         up(&dev->struct_sem);
265
266         *maplist = list;
267         return 0;
268 }
269
270 int drm_addmap(drm_device_t *dev, unsigned int offset,
271                unsigned int size, drm_map_type_t type,
272                drm_map_flags_t flags, drm_local_map_t **map_ptr)
273 {
274         drm_map_list_t *list;
275         int rc;
276
277         rc = drm_addmap_core(dev, offset, size, type, flags, &list);
278         if (!rc)
279                 *map_ptr = list->map;
280         return rc;
281 }
282 EXPORT_SYMBOL(drm_addmap);
283
284 int drm_addmap_ioctl(struct inode *inode, struct file *filp,
285                      unsigned int cmd, unsigned long arg)
286 {
287         drm_file_t *priv = filp->private_data;
288         drm_device_t *dev = priv->head->dev;
289         drm_map_t map;
290         drm_map_list_t *maplist;
291         drm_map_t __user *argp = (void __user *)arg;
292         int err;
293
294         if (!(filp->f_mode & 3))
295                 return -EACCES; /* Require read/write */
296
297         if (copy_from_user(& map, argp, sizeof(map))) {
298                 return -EFAULT;
299         }
300
301         err = drm_addmap_core(dev, map.offset, map.size, map.type, map.flags,
302                               &maplist);
303
304         if (err) 
305                 return err;
306
307         if (copy_to_user(argp, maplist->map, sizeof(drm_map_t)))
308                 return -EFAULT;
309         if (put_user(maplist->user_token, &argp->handle))
310                 return -EFAULT;
311         return 0;
312 }
313
314
315 /**
316  * Remove a map private from list and deallocate resources if the mapping
317  * isn't in use.
318  *
319  * \param inode device inode.
320  * \param filp file pointer.
321  * \param cmd command.
322  * \param arg pointer to a drm_map_t structure.
323  * \return zero on success or a negative value on error.
324  *
325  * Searches the map on drm_device::maplist, removes it from the list, see if
326  * its being used, and free any associate resource (such as MTRR's) if it's not
327  * being on use.
328  *
329  * \sa drm_addmap
330  */
331 int drm_rmmap_locked(drm_device_t *dev, drm_local_map_t *map)
332 {
333         struct list_head *list;
334         drm_map_list_t *r_list = NULL;
335         drm_dma_handle_t dmah;
336
337         /* Find the list entry for the map and remove it */
338         list_for_each(list, &dev->maplist->head) {
339                 r_list = list_entry(list, drm_map_list_t, head);
340
341                 if (r_list->map == map) {
342                         list_del(list);
343                         drm_free(list, sizeof(*list), DRM_MEM_MAPS);
344                         break;
345                 }
346         }
347
348         /* List has wrapped around to the head pointer, or it's empty and we
349          * didn't find anything.
350          */
351         if (list == (&dev->maplist->head)) {
352                 return -EINVAL;
353         }
354
355         switch (map->type) {
356         case _DRM_REGISTERS:
357                 drm_ioremapfree(map->handle, map->size, dev);
358                 /* FALLTHROUGH */
359         case _DRM_FRAME_BUFFER:
360                 if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
361                         int retcode;
362                         retcode = mtrr_del(map->mtrr, map->offset,
363                                            map->size);
364                         DRM_DEBUG ("mtrr_del=%d\n", retcode);
365                 }
366                 break;
367         case _DRM_SHM:
368                 vfree(map->handle);
369                 break;
370         case _DRM_AGP:
371         case _DRM_SCATTER_GATHER:
372                 break;
373         case _DRM_CONSISTENT:
374                 dmah.vaddr = map->handle;
375                 dmah.busaddr = map->offset;
376                 dmah.size = map->size;
377                 __drm_pci_free(dev, &dmah);
378                 break;
379         }
380         drm_free(map, sizeof(*map), DRM_MEM_MAPS);
381
382         return 0;
383 }
384 EXPORT_SYMBOL(drm_rmmap_locked);
385
386 int drm_rmmap(drm_device_t *dev, drm_local_map_t *map)
387 {
388         int ret;
389
390         down(&dev->struct_sem);
391         ret = drm_rmmap_locked(dev, map);
392         up(&dev->struct_sem);
393
394         return ret;
395 }
396 EXPORT_SYMBOL(drm_rmmap);
397
398 /* The rmmap ioctl appears to be unnecessary.  All mappings are torn down on
399  * the last close of the device, and this is necessary for cleanup when things
400  * exit uncleanly.  Therefore, having userland manually remove mappings seems
401  * like a pointless exercise since they're going away anyway.
402  *
403  * One use case might be after addmap is allowed for normal users for SHM and
404  * gets used by drivers that the server doesn't need to care about.  This seems
405  * unlikely.
406  */
407 int drm_rmmap_ioctl(struct inode *inode, struct file *filp,
408                     unsigned int cmd, unsigned long arg)
409 {
410         drm_file_t *priv = filp->private_data;
411         drm_device_t *dev = priv->head->dev;
412         drm_map_t request;
413         drm_local_map_t *map = NULL;
414         struct list_head *list;
415         int ret;
416
417         if (copy_from_user(&request, (drm_map_t __user *)arg, sizeof(request))) {
418                 return -EFAULT;
419         }
420
421         down(&dev->struct_sem);
422         list_for_each(list, &dev->maplist->head) {
423                 drm_map_list_t *r_list = list_entry(list, drm_map_list_t, head);
424
425                 if (r_list->map &&
426                     r_list->user_token == (unsigned long) request.handle &&
427                     r_list->map->flags & _DRM_REMOVABLE) {
428                         map = r_list->map;
429                         break;
430                 }
431         }
432
433         /* List has wrapped around to the head pointer, or its empty we didn't
434          * find anything.
435          */
436         if (list == (&dev->maplist->head)) {
437                 up(&dev->struct_sem);
438                 return -EINVAL;
439         }
440
441         if (!map)
442                 return -EINVAL;
443
444         /* Register and framebuffer maps are permanent */
445         if ((map->type == _DRM_REGISTERS) || (map->type == _DRM_FRAME_BUFFER)) {
446                 up(&dev->struct_sem);
447                 return 0;
448         }
449
450         ret = drm_rmmap_locked(dev, map);
451
452         up(&dev->struct_sem);
453
454         return ret;
455 }
456
457 /**
458  * Cleanup after an error on one of the addbufs() functions.
459  *
460  * \param dev DRM device.
461  * \param entry buffer entry where the error occurred.
462  *
463  * Frees any pages and buffers associated with the given entry.
464  */
465 static void drm_cleanup_buf_error(drm_device_t *dev, drm_buf_entry_t *entry)
466 {
467         int i;
468
469         if (entry->seg_count) {
470                 for (i = 0; i < entry->seg_count; i++) {
471                         if (entry->seglist[i]) {
472                                 drm_free_pages(entry->seglist[i],
473                                                 entry->page_order,
474                                                 DRM_MEM_DMA);
475                         }
476                 }
477                 drm_free(entry->seglist,
478                           entry->seg_count *
479                           sizeof(*entry->seglist),
480                           DRM_MEM_SEGS);
481
482                 entry->seg_count = 0;
483         }
484
485         if (entry->buf_count) {
486                 for (i = 0; i < entry->buf_count; i++) {
487                         if (entry->buflist[i].dev_private) {
488                                 drm_free(entry->buflist[i].dev_private,
489                                           entry->buflist[i].dev_priv_size,
490                                           DRM_MEM_BUFS);
491                         }
492                 }
493                 drm_free(entry->buflist,
494                           entry->buf_count *
495                           sizeof(*entry->buflist),
496                           DRM_MEM_BUFS);
497
498                 entry->buf_count = 0;
499         }
500 }
501
502 #if __OS_HAS_AGP
503 /**
504  * Add AGP buffers for DMA transfers.
505  *
506  * \param dev drm_device_t to which the buffers are to be added.
507  * \param request pointer to a drm_buf_desc_t describing the request.
508  * \return zero on success or a negative number on failure.
509  * 
510  * After some sanity checks creates a drm_buf structure for each buffer and
511  * reallocates the buffer list of the same size order to accommodate the new
512  * buffers.
513  */
514 int drm_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request)
515 {
516         drm_device_dma_t *dma = dev->dma;
517         drm_buf_entry_t *entry;
518         drm_buf_t *buf;
519         unsigned long offset;
520         unsigned long agp_offset;
521         int count;
522         int order;
523         int size;
524         int alignment;
525         int page_order;
526         int total;
527         int byte_count;
528         int i;
529         drm_buf_t **temp_buflist;
530
531         if ( !dma ) return -EINVAL;
532
533         count = request->count;
534         order = drm_order(request->size);
535         size = 1 << order;
536
537         alignment  = (request->flags & _DRM_PAGE_ALIGN)
538                 ? PAGE_ALIGN(size) : size;
539         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
540         total = PAGE_SIZE << page_order;
541
542         byte_count = 0;
543         agp_offset = dev->agp->base + request->agp_start;
544
545         DRM_DEBUG( "count:      %d\n",  count );
546         DRM_DEBUG( "order:      %d\n",  order );
547         DRM_DEBUG( "size:       %d\n",  size );
548         DRM_DEBUG( "agp_offset: %lu\n", agp_offset );
549         DRM_DEBUG( "alignment:  %d\n",  alignment );
550         DRM_DEBUG( "page_order: %d\n",  page_order );
551         DRM_DEBUG( "total:      %d\n",  total );
552
553         if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
554         if ( dev->queue_count ) return -EBUSY; /* Not while in use */
555
556         spin_lock( &dev->count_lock );
557         if ( dev->buf_use ) {
558                 spin_unlock( &dev->count_lock );
559                 return -EBUSY;
560         }
561         atomic_inc( &dev->buf_alloc );
562         spin_unlock( &dev->count_lock );
563
564         down( &dev->struct_sem );
565         entry = &dma->bufs[order];
566         if ( entry->buf_count ) {
567                 up( &dev->struct_sem );
568                 atomic_dec( &dev->buf_alloc );
569                 return -ENOMEM; /* May only call once for each order */
570         }
571
572         if (count < 0 || count > 4096) {
573                 up( &dev->struct_sem );
574                 atomic_dec( &dev->buf_alloc );
575                 return -EINVAL;
576         }
577
578         entry->buflist = drm_alloc( count * sizeof(*entry->buflist),
579                                     DRM_MEM_BUFS );
580         if ( !entry->buflist ) {
581                 up( &dev->struct_sem );
582                 atomic_dec( &dev->buf_alloc );
583                 return -ENOMEM;
584         }
585         memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
586
587         entry->buf_size = size;
588         entry->page_order = page_order;
589
590         offset = 0;
591
592         while ( entry->buf_count < count ) {
593                 buf          = &entry->buflist[entry->buf_count];
594                 buf->idx     = dma->buf_count + entry->buf_count;
595                 buf->total   = alignment;
596                 buf->order   = order;
597                 buf->used    = 0;
598
599                 buf->offset  = (dma->byte_count + offset);
600                 buf->bus_address = agp_offset + offset;
601                 buf->address = (void *)(agp_offset + offset);
602                 buf->next    = NULL;
603                 buf->waiting = 0;
604                 buf->pending = 0;
605                 init_waitqueue_head( &buf->dma_wait );
606                 buf->filp    = NULL;
607
608                 buf->dev_priv_size = dev->driver->dev_priv_size;
609                 buf->dev_private = drm_alloc( buf->dev_priv_size,
610                                                DRM_MEM_BUFS );
611                 if(!buf->dev_private) {
612                         /* Set count correctly so we free the proper amount. */
613                         entry->buf_count = count;
614                         drm_cleanup_buf_error(dev,entry);
615                         up( &dev->struct_sem );
616                         atomic_dec( &dev->buf_alloc );
617                         return -ENOMEM;
618                 }
619                 memset( buf->dev_private, 0, buf->dev_priv_size );
620
621                 DRM_DEBUG( "buffer %d @ %p\n",
622                            entry->buf_count, buf->address );
623
624                 offset += alignment;
625                 entry->buf_count++;
626                 byte_count += PAGE_SIZE << page_order;
627         }
628
629         DRM_DEBUG( "byte_count: %d\n", byte_count );
630
631         temp_buflist = drm_realloc( dma->buflist,
632                                      dma->buf_count * sizeof(*dma->buflist),
633                                      (dma->buf_count + entry->buf_count)
634                                      * sizeof(*dma->buflist),
635                                      DRM_MEM_BUFS );
636         if(!temp_buflist) {
637                 /* Free the entry because it isn't valid */
638                 drm_cleanup_buf_error(dev,entry);
639                 up( &dev->struct_sem );
640                 atomic_dec( &dev->buf_alloc );
641                 return -ENOMEM;
642         }
643         dma->buflist = temp_buflist;
644
645         for ( i = 0 ; i < entry->buf_count ; i++ ) {
646                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
647         }
648
649         dma->buf_count += entry->buf_count;
650         dma->byte_count += byte_count;
651
652         DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
653         DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
654
655         up( &dev->struct_sem );
656
657         request->count = entry->buf_count;
658         request->size = size;
659
660         dma->flags = _DRM_DMA_USE_AGP;
661
662         atomic_dec( &dev->buf_alloc );
663         return 0;
664 }
665 EXPORT_SYMBOL(drm_addbufs_agp);
666 #endif /* __OS_HAS_AGP */
667
668 int drm_addbufs_pci(drm_device_t *dev, drm_buf_desc_t *request)
669 {
670         drm_device_dma_t *dma = dev->dma;
671         int count;
672         int order;
673         int size;
674         int total;
675         int page_order;
676         drm_buf_entry_t *entry;
677         unsigned long page;
678         drm_buf_t *buf;
679         int alignment;
680         unsigned long offset;
681         int i;
682         int byte_count;
683         int page_count;
684         unsigned long *temp_pagelist;
685         drm_buf_t **temp_buflist;
686
687         if (!drm_core_check_feature(dev, DRIVER_PCI_DMA)) return -EINVAL;
688         if ( !dma ) return -EINVAL;
689
690         count = request->count;
691         order = drm_order(request->size);
692         size = 1 << order;
693
694         DRM_DEBUG( "count=%d, size=%d (%d), order=%d, queue_count=%d\n",
695                    request->count, request->size, size,
696                    order, dev->queue_count );
697
698         if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
699         if ( dev->queue_count ) return -EBUSY; /* Not while in use */
700
701         alignment = (request->flags & _DRM_PAGE_ALIGN)
702                 ? PAGE_ALIGN(size) : size;
703         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
704         total = PAGE_SIZE << page_order;
705
706         spin_lock( &dev->count_lock );
707         if ( dev->buf_use ) {
708                 spin_unlock( &dev->count_lock );
709                 return -EBUSY;
710         }
711         atomic_inc( &dev->buf_alloc );
712         spin_unlock( &dev->count_lock );
713
714         down( &dev->struct_sem );
715         entry = &dma->bufs[order];
716         if ( entry->buf_count ) {
717                 up( &dev->struct_sem );
718                 atomic_dec( &dev->buf_alloc );
719                 return -ENOMEM; /* May only call once for each order */
720         }
721
722         if (count < 0 || count > 4096) {
723                 up( &dev->struct_sem );
724                 atomic_dec( &dev->buf_alloc );
725                 return -EINVAL;
726         }
727
728         entry->buflist = drm_alloc( count * sizeof(*entry->buflist),
729                                     DRM_MEM_BUFS );
730         if ( !entry->buflist ) {
731                 up( &dev->struct_sem );
732                 atomic_dec( &dev->buf_alloc );
733                 return -ENOMEM;
734         }
735         memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
736
737         entry->seglist = drm_alloc( count * sizeof(*entry->seglist),
738                                     DRM_MEM_SEGS );
739         if ( !entry->seglist ) {
740                 drm_free( entry->buflist,
741                           count * sizeof(*entry->buflist),
742                           DRM_MEM_BUFS );
743                 up( &dev->struct_sem );
744                 atomic_dec( &dev->buf_alloc );
745                 return -ENOMEM;
746         }
747         memset( entry->seglist, 0, count * sizeof(*entry->seglist) );
748
749         /* Keep the original pagelist until we know all the allocations
750          * have succeeded
751          */
752         temp_pagelist = drm_alloc( (dma->page_count + (count << page_order))
753                                     * sizeof(*dma->pagelist),
754                                     DRM_MEM_PAGES );
755         if (!temp_pagelist) {
756                 drm_free( entry->buflist,
757                            count * sizeof(*entry->buflist),
758                            DRM_MEM_BUFS );
759                 drm_free( entry->seglist,
760                            count * sizeof(*entry->seglist),
761                            DRM_MEM_SEGS );
762                 up( &dev->struct_sem );
763                 atomic_dec( &dev->buf_alloc );
764                 return -ENOMEM;
765         }
766         memcpy(temp_pagelist,
767                dma->pagelist,
768                dma->page_count * sizeof(*dma->pagelist));
769         DRM_DEBUG( "pagelist: %d entries\n",
770                    dma->page_count + (count << page_order) );
771
772         entry->buf_size = size;
773         entry->page_order = page_order;
774         byte_count = 0;
775         page_count = 0;
776
777         while ( entry->buf_count < count ) {
778                 page = drm_alloc_pages( page_order, DRM_MEM_DMA );
779                 if ( !page ) {
780                         /* Set count correctly so we free the proper amount. */
781                         entry->buf_count = count;
782                         entry->seg_count = count;
783                         drm_cleanup_buf_error(dev, entry);
784                         drm_free( temp_pagelist,
785                                    (dma->page_count + (count << page_order))
786                                    * sizeof(*dma->pagelist),
787                                    DRM_MEM_PAGES );
788                         up( &dev->struct_sem );
789                         atomic_dec( &dev->buf_alloc );
790                         return -ENOMEM;
791                 }
792                 entry->seglist[entry->seg_count++] = page;
793                 for ( i = 0 ; i < (1 << page_order) ; i++ ) {
794                         DRM_DEBUG( "page %d @ 0x%08lx\n",
795                                    dma->page_count + page_count,
796                                    page + PAGE_SIZE * i );
797                         temp_pagelist[dma->page_count + page_count++]
798                                 = page + PAGE_SIZE * i;
799                 }
800                 for ( offset = 0 ;
801                       offset + size <= total && entry->buf_count < count ;
802                       offset += alignment, ++entry->buf_count ) {
803                         buf          = &entry->buflist[entry->buf_count];
804                         buf->idx     = dma->buf_count + entry->buf_count;
805                         buf->total   = alignment;
806                         buf->order   = order;
807                         buf->used    = 0;
808                         buf->offset  = (dma->byte_count + byte_count + offset);
809                         buf->address = (void *)(page + offset);
810                         buf->next    = NULL;
811                         buf->waiting = 0;
812                         buf->pending = 0;
813                         init_waitqueue_head( &buf->dma_wait );
814                         buf->filp    = NULL;
815
816                         buf->dev_priv_size = dev->driver->dev_priv_size;
817                         buf->dev_private = drm_alloc( buf->dev_priv_size,
818                                                        DRM_MEM_BUFS );
819                         if(!buf->dev_private) {
820                                 /* Set count correctly so we free the proper amount. */
821                                 entry->buf_count = count;
822                                 entry->seg_count = count;
823                                 drm_cleanup_buf_error(dev,entry);
824                                 drm_free( temp_pagelist,
825                                            (dma->page_count + (count << page_order))
826                                            * sizeof(*dma->pagelist),
827                                            DRM_MEM_PAGES );
828                                 up( &dev->struct_sem );
829                                 atomic_dec( &dev->buf_alloc );
830                                 return -ENOMEM;
831                         }
832                         memset( buf->dev_private, 0, buf->dev_priv_size );
833
834                         DRM_DEBUG( "buffer %d @ %p\n",
835                                    entry->buf_count, buf->address );
836                 }
837                 byte_count += PAGE_SIZE << page_order;
838         }
839
840         temp_buflist = drm_realloc( dma->buflist,
841                                      dma->buf_count * sizeof(*dma->buflist),
842                                      (dma->buf_count + entry->buf_count)
843                                      * sizeof(*dma->buflist),
844                                      DRM_MEM_BUFS );
845         if (!temp_buflist) {
846                 /* Free the entry because it isn't valid */
847                 drm_cleanup_buf_error(dev,entry);
848                 drm_free( temp_pagelist,
849                            (dma->page_count + (count << page_order))
850                            * sizeof(*dma->pagelist),
851                            DRM_MEM_PAGES );
852                 up( &dev->struct_sem );
853                 atomic_dec( &dev->buf_alloc );
854                 return -ENOMEM;
855         }
856         dma->buflist = temp_buflist;
857
858         for ( i = 0 ; i < entry->buf_count ; i++ ) {
859                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
860         }
861
862         /* No allocations failed, so now we can replace the orginal pagelist
863          * with the new one.
864          */
865         if (dma->page_count) {
866                 drm_free(dma->pagelist,
867                           dma->page_count * sizeof(*dma->pagelist),
868                           DRM_MEM_PAGES);
869         }
870         dma->pagelist = temp_pagelist;
871
872         dma->buf_count += entry->buf_count;
873         dma->seg_count += entry->seg_count;
874         dma->page_count += entry->seg_count << page_order;
875         dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
876
877         up( &dev->struct_sem );
878
879         request->count = entry->buf_count;
880         request->size = size;
881
882         atomic_dec( &dev->buf_alloc );
883         return 0;
884
885 }
886 EXPORT_SYMBOL(drm_addbufs_pci);
887
888 static int drm_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request)
889 {
890         drm_device_dma_t *dma = dev->dma;
891         drm_buf_entry_t *entry;
892         drm_buf_t *buf;
893         unsigned long offset;
894         unsigned long agp_offset;
895         int count;
896         int order;
897         int size;
898         int alignment;
899         int page_order;
900         int total;
901         int byte_count;
902         int i;
903         drm_buf_t **temp_buflist;
904
905         if (!drm_core_check_feature(dev, DRIVER_SG)) return -EINVAL;
906         
907         if ( !dma ) return -EINVAL;
908
909         count = request->count;
910         order = drm_order(request->size);
911         size = 1 << order;
912
913         alignment  = (request->flags & _DRM_PAGE_ALIGN)
914                         ? PAGE_ALIGN(size) : size;
915         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
916         total = PAGE_SIZE << page_order;
917
918         byte_count = 0;
919         agp_offset = request->agp_start;
920
921         DRM_DEBUG( "count:      %d\n",  count );
922         DRM_DEBUG( "order:      %d\n",  order );
923         DRM_DEBUG( "size:       %d\n",  size );
924         DRM_DEBUG( "agp_offset: %lu\n", agp_offset );
925         DRM_DEBUG( "alignment:  %d\n",  alignment );
926         DRM_DEBUG( "page_order: %d\n",  page_order );
927         DRM_DEBUG( "total:      %d\n",  total );
928
929         if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
930         if ( dev->queue_count ) return -EBUSY; /* Not while in use */
931
932         spin_lock( &dev->count_lock );
933         if ( dev->buf_use ) {
934                 spin_unlock( &dev->count_lock );
935                 return -EBUSY;
936         }
937         atomic_inc( &dev->buf_alloc );
938         spin_unlock( &dev->count_lock );
939
940         down( &dev->struct_sem );
941         entry = &dma->bufs[order];
942         if ( entry->buf_count ) {
943                 up( &dev->struct_sem );
944                 atomic_dec( &dev->buf_alloc );
945                 return -ENOMEM; /* May only call once for each order */
946         }
947
948         if (count < 0 || count > 4096) {
949                 up( &dev->struct_sem );
950                 atomic_dec( &dev->buf_alloc );
951                 return -EINVAL;
952         }
953
954         entry->buflist = drm_alloc( count * sizeof(*entry->buflist),
955                                      DRM_MEM_BUFS );
956         if ( !entry->buflist ) {
957                 up( &dev->struct_sem );
958                 atomic_dec( &dev->buf_alloc );
959                 return -ENOMEM;
960         }
961         memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
962
963         entry->buf_size = size;
964         entry->page_order = page_order;
965
966         offset = 0;
967
968         while ( entry->buf_count < count ) {
969                 buf          = &entry->buflist[entry->buf_count];
970                 buf->idx     = dma->buf_count + entry->buf_count;
971                 buf->total   = alignment;
972                 buf->order   = order;
973                 buf->used    = 0;
974
975                 buf->offset  = (dma->byte_count + offset);
976                 buf->bus_address = agp_offset + offset;
977                 buf->address = (void *)(agp_offset + offset 
978                                         + (unsigned long)dev->sg->virtual);
979                 buf->next    = NULL;
980                 buf->waiting = 0;
981                 buf->pending = 0;
982                 init_waitqueue_head( &buf->dma_wait );
983                 buf->filp    = NULL;
984
985                 buf->dev_priv_size = dev->driver->dev_priv_size;
986                 buf->dev_private = drm_alloc( buf->dev_priv_size,
987                                                DRM_MEM_BUFS );
988                 if(!buf->dev_private) {
989                         /* Set count correctly so we free the proper amount. */
990                         entry->buf_count = count;
991                         drm_cleanup_buf_error(dev,entry);
992                         up( &dev->struct_sem );
993                         atomic_dec( &dev->buf_alloc );
994                         return -ENOMEM;
995                 }
996
997                 memset( buf->dev_private, 0, buf->dev_priv_size );
998
999                 DRM_DEBUG( "buffer %d @ %p\n",
1000                            entry->buf_count, buf->address );
1001
1002                 offset += alignment;
1003                 entry->buf_count++;
1004                 byte_count += PAGE_SIZE << page_order;
1005         }
1006
1007         DRM_DEBUG( "byte_count: %d\n", byte_count );
1008
1009         temp_buflist = drm_realloc( dma->buflist,
1010                                      dma->buf_count * sizeof(*dma->buflist),
1011                                      (dma->buf_count + entry->buf_count)
1012                                      * sizeof(*dma->buflist),
1013                                      DRM_MEM_BUFS );
1014         if(!temp_buflist) {
1015                 /* Free the entry because it isn't valid */
1016                 drm_cleanup_buf_error(dev,entry);
1017                 up( &dev->struct_sem );
1018                 atomic_dec( &dev->buf_alloc );
1019                 return -ENOMEM;
1020         }
1021         dma->buflist = temp_buflist;
1022
1023         for ( i = 0 ; i < entry->buf_count ; i++ ) {
1024                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1025         }
1026
1027         dma->buf_count += entry->buf_count;
1028         dma->byte_count += byte_count;
1029
1030         DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
1031         DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
1032
1033         up( &dev->struct_sem );
1034
1035         request->count = entry->buf_count;
1036         request->size = size;
1037
1038         dma->flags = _DRM_DMA_USE_SG;
1039
1040         atomic_dec( &dev->buf_alloc );
1041         return 0;
1042 }
1043
1044 static int drm_addbufs_fb(drm_device_t *dev, drm_buf_desc_t *request)
1045 {
1046         drm_device_dma_t *dma = dev->dma;
1047         drm_buf_entry_t *entry;
1048         drm_buf_t *buf;
1049         unsigned long offset;
1050         unsigned long agp_offset;
1051         int count;
1052         int order;
1053         int size;
1054         int alignment;
1055         int page_order;
1056         int total;
1057         int byte_count;
1058         int i;
1059         drm_buf_t **temp_buflist;
1060
1061         if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
1062                 return -EINVAL;
1063     
1064         if (!dma)
1065                 return -EINVAL;
1066
1067         count = request->count;
1068         order = drm_order(request->size);
1069         size = 1 << order;
1070
1071         alignment = (request->flags & _DRM_PAGE_ALIGN)
1072             ? PAGE_ALIGN(size) : size;
1073         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
1074         total = PAGE_SIZE << page_order;
1075
1076         byte_count = 0;
1077         agp_offset = request->agp_start;
1078
1079         DRM_DEBUG("count:      %d\n", count);
1080         DRM_DEBUG("order:      %d\n", order);
1081         DRM_DEBUG("size:       %d\n", size);
1082         DRM_DEBUG("agp_offset: %lu\n", agp_offset);
1083         DRM_DEBUG("alignment:  %d\n", alignment);
1084         DRM_DEBUG("page_order: %d\n", page_order);
1085         DRM_DEBUG("total:      %d\n", total);
1086
1087         if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
1088                 return -EINVAL;
1089         if (dev->queue_count)
1090                 return -EBUSY;  /* Not while in use */
1091
1092         spin_lock(&dev->count_lock);
1093         if (dev->buf_use) {
1094                 spin_unlock(&dev->count_lock);
1095                 return -EBUSY;
1096         }
1097         atomic_inc(&dev->buf_alloc);
1098         spin_unlock(&dev->count_lock);
1099
1100         down(&dev->struct_sem);
1101         entry = &dma->bufs[order];
1102         if (entry->buf_count) {
1103                 up(&dev->struct_sem);
1104                 atomic_dec(&dev->buf_alloc);
1105                 return -ENOMEM; /* May only call once for each order */
1106         }
1107
1108         if (count < 0 || count > 4096) {
1109                 up(&dev->struct_sem);
1110                 atomic_dec(&dev->buf_alloc);
1111                 return -EINVAL;
1112         }
1113
1114         entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
1115                                    DRM_MEM_BUFS);
1116         if (!entry->buflist) {
1117                 up(&dev->struct_sem);
1118                 atomic_dec(&dev->buf_alloc);
1119                 return -ENOMEM;
1120         }
1121         memset(entry->buflist, 0, count * sizeof(*entry->buflist));
1122
1123         entry->buf_size = size;
1124         entry->page_order = page_order;
1125
1126         offset = 0;
1127
1128         while (entry->buf_count < count) {
1129                 buf = &entry->buflist[entry->buf_count];
1130                 buf->idx = dma->buf_count + entry->buf_count;
1131                 buf->total = alignment;
1132                 buf->order = order;
1133                 buf->used = 0;
1134
1135                 buf->offset = (dma->byte_count + offset);
1136                 buf->bus_address = agp_offset + offset;
1137                 buf->address = (void *)(agp_offset + offset);
1138                 buf->next = NULL;
1139                 buf->waiting = 0;
1140                 buf->pending = 0;
1141                 init_waitqueue_head(&buf->dma_wait);
1142                 buf->filp = NULL;
1143
1144                 buf->dev_priv_size = dev->driver->dev_priv_size;
1145                 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
1146                 if (!buf->dev_private) {
1147                         /* Set count correctly so we free the proper amount. */
1148                         entry->buf_count = count;
1149                         drm_cleanup_buf_error(dev, entry);
1150                         up(&dev->struct_sem);
1151                         atomic_dec(&dev->buf_alloc);
1152                         return -ENOMEM;
1153                 }
1154                 memset(buf->dev_private, 0, buf->dev_priv_size);
1155
1156                 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1157
1158                 offset += alignment;
1159                 entry->buf_count++;
1160                 byte_count += PAGE_SIZE << page_order;
1161         }
1162
1163         DRM_DEBUG("byte_count: %d\n", byte_count);
1164
1165         temp_buflist = drm_realloc(dma->buflist,
1166                                    dma->buf_count * sizeof(*dma->buflist),
1167                                    (dma->buf_count + entry->buf_count)
1168                                    * sizeof(*dma->buflist), DRM_MEM_BUFS);
1169         if (!temp_buflist) {
1170                 /* Free the entry because it isn't valid */
1171                 drm_cleanup_buf_error(dev, entry);
1172                 up(&dev->struct_sem);
1173                 atomic_dec(&dev->buf_alloc);
1174                 return -ENOMEM;
1175         }
1176         dma->buflist = temp_buflist;
1177
1178         for (i = 0; i < entry->buf_count; i++) {
1179                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1180         }
1181
1182         dma->buf_count += entry->buf_count;
1183         dma->byte_count += byte_count;
1184
1185         DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1186         DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1187
1188         up(&dev->struct_sem);
1189
1190         request->count = entry->buf_count;
1191         request->size = size;
1192
1193         dma->flags = _DRM_DMA_USE_FB;
1194
1195         atomic_dec(&dev->buf_alloc);
1196         return 0;
1197 }
1198
1199 /**
1200  * Add buffers for DMA transfers (ioctl).
1201  *
1202  * \param inode device inode.
1203  * \param filp file pointer.
1204  * \param cmd command.
1205  * \param arg pointer to a drm_buf_desc_t request.
1206  * \return zero on success or a negative number on failure.
1207  *
1208  * According with the memory type specified in drm_buf_desc::flags and the
1209  * build options, it dispatches the call either to addbufs_agp(),
1210  * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1211  * PCI memory respectively.
1212  */
1213 int drm_addbufs( struct inode *inode, struct file *filp,
1214                   unsigned int cmd, unsigned long arg )
1215 {
1216         drm_buf_desc_t request;
1217         drm_file_t *priv = filp->private_data;
1218         drm_device_t *dev = priv->head->dev;
1219         int ret;
1220         
1221         if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1222                 return -EINVAL;
1223
1224         if ( copy_from_user( &request, (drm_buf_desc_t __user *)arg,
1225                              sizeof(request) ) )
1226                 return -EFAULT;
1227
1228 #if __OS_HAS_AGP
1229         if ( request.flags & _DRM_AGP_BUFFER )
1230                 ret=drm_addbufs_agp(dev, &request);
1231         else
1232 #endif
1233         if ( request.flags & _DRM_SG_BUFFER )
1234                 ret=drm_addbufs_sg(dev, &request);
1235         else if ( request.flags & _DRM_FB_BUFFER)
1236                 ret=drm_addbufs_fb(dev, &request);
1237         else
1238                 ret=drm_addbufs_pci(dev, &request);
1239
1240         if (ret==0) {
1241                 if (copy_to_user((void __user *)arg, &request,
1242                                  sizeof(request))) {
1243                         ret = -EFAULT;
1244                 }
1245         }
1246         return ret;
1247 }
1248
1249
1250 /**
1251  * Get information about the buffer mappings.
1252  *
1253  * This was originally mean for debugging purposes, or by a sophisticated
1254  * client library to determine how best to use the available buffers (e.g.,
1255  * large buffers can be used for image transfer).
1256  *
1257  * \param inode device inode.
1258  * \param filp file pointer.
1259  * \param cmd command.
1260  * \param arg pointer to a drm_buf_info structure.
1261  * \return zero on success or a negative number on failure.
1262  *
1263  * Increments drm_device::buf_use while holding the drm_device::count_lock
1264  * lock, preventing of allocating more buffers after this call. Information
1265  * about each requested buffer is then copied into user space.
1266  */
1267 int drm_infobufs( struct inode *inode, struct file *filp,
1268                    unsigned int cmd, unsigned long arg )
1269 {
1270         drm_file_t *priv = filp->private_data;
1271         drm_device_t *dev = priv->head->dev;
1272         drm_device_dma_t *dma = dev->dma;
1273         drm_buf_info_t request;
1274         drm_buf_info_t __user *argp = (void __user *)arg;
1275         int i;
1276         int count;
1277
1278         if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1279                 return -EINVAL;
1280
1281         if ( !dma ) return -EINVAL;
1282
1283         spin_lock( &dev->count_lock );
1284         if ( atomic_read( &dev->buf_alloc ) ) {
1285                 spin_unlock( &dev->count_lock );
1286                 return -EBUSY;
1287         }
1288         ++dev->buf_use;         /* Can't allocate more after this call */
1289         spin_unlock( &dev->count_lock );
1290
1291         if ( copy_from_user( &request, argp, sizeof(request) ) )
1292                 return -EFAULT;
1293
1294         for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
1295                 if ( dma->bufs[i].buf_count ) ++count;
1296         }
1297
1298         DRM_DEBUG( "count = %d\n", count );
1299
1300         if ( request.count >= count ) {
1301                 for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
1302                         if ( dma->bufs[i].buf_count ) {
1303                                 drm_buf_desc_t __user *to = &request.list[count];
1304                                 drm_buf_entry_t *from = &dma->bufs[i];
1305                                 drm_freelist_t *list = &dma->bufs[i].freelist;
1306                                 if ( copy_to_user( &to->count,
1307                                                    &from->buf_count,
1308                                                    sizeof(from->buf_count) ) ||
1309                                      copy_to_user( &to->size,
1310                                                    &from->buf_size,
1311                                                    sizeof(from->buf_size) ) ||
1312                                      copy_to_user( &to->low_mark,
1313                                                    &list->low_mark,
1314                                                    sizeof(list->low_mark) ) ||
1315                                      copy_to_user( &to->high_mark,
1316                                                    &list->high_mark,
1317                                                    sizeof(list->high_mark) ) )
1318                                         return -EFAULT;
1319
1320                                 DRM_DEBUG( "%d %d %d %d %d\n",
1321                                            i,
1322                                            dma->bufs[i].buf_count,
1323                                            dma->bufs[i].buf_size,
1324                                            dma->bufs[i].freelist.low_mark,
1325                                            dma->bufs[i].freelist.high_mark );
1326                                 ++count;
1327                         }
1328                 }
1329         }
1330         request.count = count;
1331
1332         if ( copy_to_user( argp, &request, sizeof(request) ) )
1333                 return -EFAULT;
1334
1335         return 0;
1336 }
1337
1338 /**
1339  * Specifies a low and high water mark for buffer allocation
1340  *
1341  * \param inode device inode.
1342  * \param filp file pointer.
1343  * \param cmd command.
1344  * \param arg a pointer to a drm_buf_desc structure.
1345  * \return zero on success or a negative number on failure.
1346  *
1347  * Verifies that the size order is bounded between the admissible orders and
1348  * updates the respective drm_device_dma::bufs entry low and high water mark.
1349  *
1350  * \note This ioctl is deprecated and mostly never used.
1351  */
1352 int drm_markbufs( struct inode *inode, struct file *filp,
1353                    unsigned int cmd, unsigned long arg )
1354 {
1355         drm_file_t *priv = filp->private_data;
1356         drm_device_t *dev = priv->head->dev;
1357         drm_device_dma_t *dma = dev->dma;
1358         drm_buf_desc_t request;
1359         int order;
1360         drm_buf_entry_t *entry;
1361
1362         if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1363                 return -EINVAL;
1364
1365         if ( !dma ) return -EINVAL;
1366
1367         if ( copy_from_user( &request,
1368                              (drm_buf_desc_t __user *)arg,
1369                              sizeof(request) ) )
1370                 return -EFAULT;
1371
1372         DRM_DEBUG( "%d, %d, %d\n",
1373                    request.size, request.low_mark, request.high_mark );
1374         order = drm_order( request.size );
1375         if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
1376         entry = &dma->bufs[order];
1377
1378         if ( request.low_mark < 0 || request.low_mark > entry->buf_count )
1379                 return -EINVAL;
1380         if ( request.high_mark < 0 || request.high_mark > entry->buf_count )
1381                 return -EINVAL;
1382
1383         entry->freelist.low_mark  = request.low_mark;
1384         entry->freelist.high_mark = request.high_mark;
1385
1386         return 0;
1387 }
1388
1389 /**
1390  * Unreserve the buffers in list, previously reserved using drmDMA. 
1391  *
1392  * \param inode device inode.
1393  * \param filp file pointer.
1394  * \param cmd command.
1395  * \param arg pointer to a drm_buf_free structure.
1396  * \return zero on success or a negative number on failure.
1397  * 
1398  * Calls free_buffer() for each used buffer.
1399  * This function is primarily used for debugging.
1400  */
1401 int drm_freebufs( struct inode *inode, struct file *filp,
1402                    unsigned int cmd, unsigned long arg )
1403 {
1404         drm_file_t *priv = filp->private_data;
1405         drm_device_t *dev = priv->head->dev;
1406         drm_device_dma_t *dma = dev->dma;
1407         drm_buf_free_t request;
1408         int i;
1409         int idx;
1410         drm_buf_t *buf;
1411
1412         if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1413                 return -EINVAL;
1414
1415         if ( !dma ) return -EINVAL;
1416
1417         if ( copy_from_user( &request,
1418                              (drm_buf_free_t __user *)arg,
1419                              sizeof(request) ) )
1420                 return -EFAULT;
1421
1422         DRM_DEBUG( "%d\n", request.count );
1423         for ( i = 0 ; i < request.count ; i++ ) {
1424                 if ( copy_from_user( &idx,
1425                                      &request.list[i],
1426                                      sizeof(idx) ) )
1427                         return -EFAULT;
1428                 if ( idx < 0 || idx >= dma->buf_count ) {
1429                         DRM_ERROR( "Index %d (of %d max)\n",
1430                                    idx, dma->buf_count - 1 );
1431                         return -EINVAL;
1432                 }
1433                 buf = dma->buflist[idx];
1434                 if ( buf->filp != filp ) {
1435                         DRM_ERROR( "Process %d freeing buffer not owned\n",
1436                                    current->pid );
1437                         return -EINVAL;
1438                 }
1439                 drm_free_buffer( dev, buf );
1440         }
1441
1442         return 0;
1443 }
1444
1445 /**
1446  * Maps all of the DMA buffers into client-virtual space (ioctl).
1447  *
1448  * \param inode device inode.
1449  * \param filp file pointer.
1450  * \param cmd command.
1451  * \param arg pointer to a drm_buf_map structure.
1452  * \return zero on success or a negative number on failure.
1453  *
1454  * Maps the AGP or SG buffer region with do_mmap(), and copies information
1455  * about each buffer into user space. The PCI buffers are already mapped on the
1456  * addbufs_pci() call.
1457  */
1458 int drm_mapbufs( struct inode *inode, struct file *filp,
1459                   unsigned int cmd, unsigned long arg )
1460 {
1461         drm_file_t *priv = filp->private_data;
1462         drm_device_t *dev = priv->head->dev;
1463         drm_device_dma_t *dma = dev->dma;
1464         drm_buf_map_t __user *argp = (void __user *)arg;
1465         int retcode = 0;
1466         const int zero = 0;
1467         unsigned long virtual;
1468         unsigned long address;
1469         drm_buf_map_t request;
1470         int i;
1471
1472         if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1473                 return -EINVAL;
1474
1475         if ( !dma ) return -EINVAL;
1476
1477         spin_lock( &dev->count_lock );
1478         if ( atomic_read( &dev->buf_alloc ) ) {
1479                 spin_unlock( &dev->count_lock );
1480                 return -EBUSY;
1481         }
1482         dev->buf_use++;         /* Can't allocate more after this call */
1483         spin_unlock( &dev->count_lock );
1484
1485         if ( copy_from_user( &request, argp, sizeof(request) ) )
1486                 return -EFAULT;
1487
1488         if ( request.count >= dma->buf_count ) {
1489                 if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
1490                     || (drm_core_check_feature(dev, DRIVER_SG) 
1491                         && (dma->flags & _DRM_DMA_USE_SG))
1492                     || (drm_core_check_feature(dev, DRIVER_FB_DMA)
1493                         && (dma->flags & _DRM_DMA_USE_FB))) {
1494                         drm_map_t *map = dev->agp_buffer_map;
1495                         unsigned long token = dev->agp_buffer_token;
1496
1497                         if ( !map ) {
1498                                 retcode = -EINVAL;
1499                                 goto done;
1500                         }
1501
1502                         down_write( &current->mm->mmap_sem );
1503                         virtual = do_mmap( filp, 0, map->size,
1504                                            PROT_READ | PROT_WRITE,
1505                                            MAP_SHARED,
1506                                            token );
1507                         up_write( &current->mm->mmap_sem );
1508                 } else {
1509                         down_write( &current->mm->mmap_sem );
1510                         virtual = do_mmap( filp, 0, dma->byte_count,
1511                                            PROT_READ | PROT_WRITE,
1512                                            MAP_SHARED, 0 );
1513                         up_write( &current->mm->mmap_sem );
1514                 }
1515                 if ( virtual > -1024UL ) {
1516                         /* Real error */
1517                         retcode = (signed long)virtual;
1518                         goto done;
1519                 }
1520                 request.virtual = (void __user *)virtual;
1521
1522                 for ( i = 0 ; i < dma->buf_count ; i++ ) {
1523                         if ( copy_to_user( &request.list[i].idx,
1524                                            &dma->buflist[i]->idx,
1525                                            sizeof(request.list[0].idx) ) ) {
1526                                 retcode = -EFAULT;
1527                                 goto done;
1528                         }
1529                         if ( copy_to_user( &request.list[i].total,
1530                                            &dma->buflist[i]->total,
1531                                            sizeof(request.list[0].total) ) ) {
1532                                 retcode = -EFAULT;
1533                                 goto done;
1534                         }
1535                         if ( copy_to_user( &request.list[i].used,
1536                                            &zero,
1537                                            sizeof(zero) ) ) {
1538                                 retcode = -EFAULT;
1539                                 goto done;
1540                         }
1541                         address = virtual + dma->buflist[i]->offset; /* *** */
1542                         if ( copy_to_user( &request.list[i].address,
1543                                            &address,
1544                                            sizeof(address) ) ) {
1545                                 retcode = -EFAULT;
1546                                 goto done;
1547                         }
1548                 }
1549         }
1550  done:
1551         request.count = dma->buf_count;
1552         DRM_DEBUG( "%d buffers, retcode = %d\n", request.count, retcode );
1553
1554         if ( copy_to_user( argp, &request, sizeof(request) ) )
1555                 return -EFAULT;
1556
1557         return retcode;
1558 }
1559
1560 /**
1561  * Compute size order.  Returns the exponent of the smaller power of two which
1562  * is greater or equal to given number.
1563  * 
1564  * \param size size.
1565  * \return order.
1566  *
1567  * \todo Can be made faster.
1568  */
1569 int drm_order( unsigned long size )
1570 {
1571         int order;
1572         unsigned long tmp;
1573
1574         for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++)
1575                 ;
1576
1577         if (size & (size - 1))
1578                 ++order;
1579
1580         return order;
1581 }
1582 EXPORT_SYMBOL(drm_order);