drm: update support for drm pci buffers
[cascardo/linux.git] / drivers / char / drm / drm_bufs.c
1 /**
2  * \file drm_bufs.h 
3  * Generic buffer template
4  * 
5  * \author Rickard E. (Rik) Faith <faith@valinux.com>
6  * \author Gareth Hughes <gareth@valinux.com>
7  */
8
9 /*
10  * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
11  *
12  * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13  * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14  * All Rights Reserved.
15  *
16  * Permission is hereby granted, free of charge, to any person obtaining a
17  * copy of this software and associated documentation files (the "Software"),
18  * to deal in the Software without restriction, including without limitation
19  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20  * and/or sell copies of the Software, and to permit persons to whom the
21  * Software is furnished to do so, subject to the following conditions:
22  *
23  * The above copyright notice and this permission notice (including the next
24  * paragraph) shall be included in all copies or substantial portions of the
25  * Software.
26  *
27  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
30  * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33  * OTHER DEALINGS IN THE SOFTWARE.
34  */
35
36 #include <linux/vmalloc.h>
37 #include "drmP.h"
38
39 /**
40  * Compute size order.  Returns the exponent of the smaller power of two which
41  * is greater or equal to given number.
42  * 
43  * \param size size.
44  * \return order.
45  *
46  * \todo Can be made faster.
47  */
48 int drm_order( unsigned long size )
49 {
50         int order;
51         unsigned long tmp;
52
53         for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++)
54                 ;
55
56         if (size & (size - 1))
57                 ++order;
58
59         return order;
60 }
61 EXPORT_SYMBOL(drm_order);
62
63 #ifdef CONFIG_COMPAT
64 /*
65  * Used to allocate 32-bit handles for _DRM_SHM regions
66  * The 0x10000000 value is chosen to be out of the way of
67  * FB/register and GART physical addresses.
68  */
69 static unsigned int map32_handle = 0x10000000;
70 #endif
71
72 /**
73  * Ioctl to specify a range of memory that is available for mapping by a non-root process.
74  *
75  * \param inode device inode.
76  * \param filp file pointer.
77  * \param cmd command.
78  * \param arg pointer to a drm_map structure.
79  * \return zero on success or a negative value on error.
80  *
81  * Adjusts the memory offset to its absolute value according to the mapping
82  * type.  Adds the map to the map list drm_device::maplist. Adds MTRR's where
83  * applicable and if supported by the kernel.
84  */
85 int drm_addmap( struct inode *inode, struct file *filp,
86                  unsigned int cmd, unsigned long arg )
87 {
88         drm_file_t *priv = filp->private_data;
89         drm_device_t *dev = priv->head->dev;
90         drm_map_t *map;
91         drm_map_t __user *argp = (void __user *)arg;
92         drm_map_list_t *list;
93         drm_dma_handle_t *dmah;
94
95         if ( !(filp->f_mode & 3) ) return -EACCES; /* Require read/write */
96
97         map = drm_alloc( sizeof(*map), DRM_MEM_MAPS );
98         if ( !map )
99                 return -ENOMEM;
100
101         if ( copy_from_user( map, argp, sizeof(*map) ) ) {
102                 drm_free( map, sizeof(*map), DRM_MEM_MAPS );
103                 return -EFAULT;
104         }
105
106         /* Only allow shared memory to be removable since we only keep enough
107          * book keeping information about shared memory to allow for removal
108          * when processes fork.
109          */
110         if ( (map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM ) {
111                 drm_free( map, sizeof(*map), DRM_MEM_MAPS );
112                 return -EINVAL;
113         }
114         DRM_DEBUG( "offset = 0x%08lx, size = 0x%08lx, type = %d\n",
115                    map->offset, map->size, map->type );
116         if ( (map->offset & (~PAGE_MASK)) || (map->size & (~PAGE_MASK)) ) {
117                 drm_free( map, sizeof(*map), DRM_MEM_MAPS );
118                 return -EINVAL;
119         }
120         map->mtrr   = -1;
121         map->handle = NULL;
122
123         switch ( map->type ) {
124         case _DRM_REGISTERS:
125         case _DRM_FRAME_BUFFER:
126 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__)
127                 if ( map->offset + map->size < map->offset ||
128                      map->offset < virt_to_phys(high_memory) ) {
129                         drm_free( map, sizeof(*map), DRM_MEM_MAPS );
130                         return -EINVAL;
131                 }
132 #endif
133 #ifdef __alpha__
134                 map->offset += dev->hose->mem_space->start;
135 #endif
136                 if (drm_core_has_MTRR(dev)) {
137                         if ( map->type == _DRM_FRAME_BUFFER ||
138                              (map->flags & _DRM_WRITE_COMBINING) ) {
139                                 map->mtrr = mtrr_add( map->offset, map->size,
140                                                       MTRR_TYPE_WRCOMB, 1 );
141                         }
142                 }
143                 if (map->type == _DRM_REGISTERS)
144                         map->handle = drm_ioremap( map->offset, map->size,
145                                                     dev );
146                 break;
147
148         case _DRM_SHM:
149                 map->handle = vmalloc_32(map->size);
150                 DRM_DEBUG( "%lu %d %p\n",
151                            map->size, drm_order( map->size ), map->handle );
152                 if ( !map->handle ) {
153                         drm_free( map, sizeof(*map), DRM_MEM_MAPS );
154                         return -ENOMEM;
155                 }
156                 map->offset = (unsigned long)map->handle;
157                 if ( map->flags & _DRM_CONTAINS_LOCK ) {
158                         /* Prevent a 2nd X Server from creating a 2nd lock */
159                         if (dev->lock.hw_lock != NULL) {
160                                 vfree( map->handle );
161                                 drm_free( map, sizeof(*map), DRM_MEM_MAPS );
162                                 return -EBUSY;
163                         }
164                         dev->sigdata.lock =
165                         dev->lock.hw_lock = map->handle; /* Pointer to lock */
166                 }
167                 break;
168         case _DRM_AGP:
169                 if (drm_core_has_AGP(dev)) {
170 #ifdef __alpha__
171                         map->offset += dev->hose->mem_space->start;
172 #endif
173                         map->offset += dev->agp->base;
174                         map->mtrr   = dev->agp->agp_mtrr; /* for getmap */
175                 }
176                 break;
177         case _DRM_SCATTER_GATHER:
178                 if (!dev->sg) {
179                         drm_free(map, sizeof(*map), DRM_MEM_MAPS);
180                         return -EINVAL;
181                 }
182                 map->offset += dev->sg->handle;
183                 break;
184         case _DRM_CONSISTENT: 
185                 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
186                  * As we're limiting the address to 2^32-1 (or less),
187                  * casting it down to 32 bits is no problem, but we
188                  * need to point to a 64bit variable first. */
189                 dmah = drm_pci_alloc(dev, map->size, map->size, 0xffffffffUL);
190                 if (!dmah) {
191                         drm_free(map, sizeof(*map), DRM_MEM_MAPS);
192                         return -ENOMEM;
193                 }
194                 map->handle = dmah->vaddr;
195                 map->offset = (unsigned long)dmah->busaddr;
196                 kfree(dmah);
197                 break;
198         default:
199                 drm_free( map, sizeof(*map), DRM_MEM_MAPS );
200                 return -EINVAL;
201         }
202
203         list = drm_alloc(sizeof(*list), DRM_MEM_MAPS);
204         if(!list) {
205                 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
206                 return -EINVAL;
207         }
208         memset(list, 0, sizeof(*list));
209         list->map = map;
210
211         down(&dev->struct_sem);
212         list_add(&list->head, &dev->maplist->head);
213 #ifdef CONFIG_COMPAT
214         /* Assign a 32-bit handle for _DRM_SHM mappings */
215         /* We do it here so that dev->struct_sem protects the increment */
216         if (map->type == _DRM_SHM)
217                 map->offset = map32_handle += PAGE_SIZE;
218 #endif
219         up(&dev->struct_sem);
220
221         if ( copy_to_user( argp, map, sizeof(*map) ) )
222                 return -EFAULT;
223         if (copy_to_user(&argp->handle, &map->offset, sizeof(map->offset)))
224                 return -EFAULT;
225         return 0;
226 }
227
228
229 /**
230  * Remove a map private from list and deallocate resources if the mapping
231  * isn't in use.
232  *
233  * \param inode device inode.
234  * \param filp file pointer.
235  * \param cmd command.
236  * \param arg pointer to a drm_map_t structure.
237  * \return zero on success or a negative value on error.
238  *
239  * Searches the map on drm_device::maplist, removes it from the list, see if
240  * its being used, and free any associate resource (such as MTRR's) if it's not
241  * being on use.
242  *
243  * \sa addmap().
244  */
245 int drm_rmmap(struct inode *inode, struct file *filp,
246                unsigned int cmd, unsigned long arg)
247 {
248         drm_file_t      *priv   = filp->private_data;
249         drm_device_t    *dev    = priv->head->dev;
250         struct list_head *list;
251         drm_map_list_t *r_list = NULL;
252         drm_vma_entry_t *pt, *prev;
253         drm_map_t *map;
254         drm_map_t request;
255         int found_maps = 0;
256
257         if (copy_from_user(&request, (drm_map_t __user *)arg,
258                            sizeof(request))) {
259                 return -EFAULT;
260         }
261
262         down(&dev->struct_sem);
263         list = &dev->maplist->head;
264         list_for_each(list, &dev->maplist->head) {
265                 r_list = list_entry(list, drm_map_list_t, head);
266
267                 if(r_list->map &&
268                    r_list->map->offset == (unsigned long) request.handle &&
269                    r_list->map->flags & _DRM_REMOVABLE) break;
270         }
271
272         /* List has wrapped around to the head pointer, or its empty we didn't
273          * find anything.
274          */
275         if(list == (&dev->maplist->head)) {
276                 up(&dev->struct_sem);
277                 return -EINVAL;
278         }
279         map = r_list->map;
280         list_del(list);
281         drm_free(list, sizeof(*list), DRM_MEM_MAPS);
282
283         for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
284                 if (pt->vma->vm_private_data == map) found_maps++;
285         }
286
287         if(!found_maps) {
288                 drm_dma_handle_t dmah;
289
290                 switch (map->type) {
291                 case _DRM_REGISTERS:
292                 case _DRM_FRAME_BUFFER:
293                   if (drm_core_has_MTRR(dev)) {
294                                 if (map->mtrr >= 0) {
295                                         int retcode;
296                                         retcode = mtrr_del(map->mtrr,
297                                                            map->offset,
298                                                            map->size);
299                                         DRM_DEBUG("mtrr_del = %d\n", retcode);
300                                 }
301                         }
302                         drm_ioremapfree(map->handle, map->size, dev);
303                         break;
304                 case _DRM_SHM:
305                         vfree(map->handle);
306                         break;
307                 case _DRM_AGP:
308                 case _DRM_SCATTER_GATHER:
309                         break;
310                 case _DRM_CONSISTENT:
311                         dmah.vaddr = map->handle;
312                         dmah.busaddr = map->offset;
313                         dmah.size = map->size;
314                         __drm_pci_free(dev, &dmah);
315                         break;
316                 }
317                 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
318         }
319         up(&dev->struct_sem);
320         return 0;
321 }
322
323 /**
324  * Cleanup after an error on one of the addbufs() functions.
325  *
326  * \param entry buffer entry where the error occurred.
327  *
328  * Frees any pages and buffers associated with the given entry.
329  */
330 static void drm_cleanup_buf_error(drm_device_t *dev, drm_buf_entry_t *entry)
331 {
332         int i;
333
334         if (entry->seg_count) {
335                 for (i = 0; i < entry->seg_count; i++) {
336                         if (entry->seglist[i]) {
337                                 drm_free_pages(entry->seglist[i],
338                                                 entry->page_order,
339                                                 DRM_MEM_DMA);
340                         }
341                 }
342                 drm_free(entry->seglist,
343                           entry->seg_count *
344                           sizeof(*entry->seglist),
345                           DRM_MEM_SEGS);
346
347                 entry->seg_count = 0;
348         }
349
350         if (entry->buf_count) {
351                 for (i = 0; i < entry->buf_count; i++) {
352                         if (entry->buflist[i].dev_private) {
353                                 drm_free(entry->buflist[i].dev_private,
354                                           entry->buflist[i].dev_priv_size,
355                                           DRM_MEM_BUFS);
356                         }
357                 }
358                 drm_free(entry->buflist,
359                           entry->buf_count *
360                           sizeof(*entry->buflist),
361                           DRM_MEM_BUFS);
362
363                 entry->buf_count = 0;
364         }
365 }
366
367 #if __OS_HAS_AGP
368 /**
369  * Add AGP buffers for DMA transfers.
370  *
371  * \param dev drm_device_t to which the buffers are to be added.
372  * \param request pointer to a drm_buf_desc_t describing the request.
373  * \return zero on success or a negative number on failure.
374  * 
375  * After some sanity checks creates a drm_buf structure for each buffer and
376  * reallocates the buffer list of the same size order to accommodate the new
377  * buffers.
378  */
379 static int drm_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request)
380 {
381         drm_device_dma_t *dma = dev->dma;
382         drm_buf_entry_t *entry;
383         drm_buf_t *buf;
384         unsigned long offset;
385         unsigned long agp_offset;
386         int count;
387         int order;
388         int size;
389         int alignment;
390         int page_order;
391         int total;
392         int byte_count;
393         int i;
394         drm_buf_t **temp_buflist;
395
396         if ( !dma ) return -EINVAL;
397
398         count = request->count;
399         order = drm_order(request->size);
400         size = 1 << order;
401
402         alignment  = (request->flags & _DRM_PAGE_ALIGN)
403                 ? PAGE_ALIGN(size) : size;
404         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
405         total = PAGE_SIZE << page_order;
406
407         byte_count = 0;
408         agp_offset = dev->agp->base + request->agp_start;
409
410         DRM_DEBUG( "count:      %d\n",  count );
411         DRM_DEBUG( "order:      %d\n",  order );
412         DRM_DEBUG( "size:       %d\n",  size );
413         DRM_DEBUG( "agp_offset: %lu\n", agp_offset );
414         DRM_DEBUG( "alignment:  %d\n",  alignment );
415         DRM_DEBUG( "page_order: %d\n",  page_order );
416         DRM_DEBUG( "total:      %d\n",  total );
417
418         if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
419         if ( dev->queue_count ) return -EBUSY; /* Not while in use */
420
421         spin_lock( &dev->count_lock );
422         if ( dev->buf_use ) {
423                 spin_unlock( &dev->count_lock );
424                 return -EBUSY;
425         }
426         atomic_inc( &dev->buf_alloc );
427         spin_unlock( &dev->count_lock );
428
429         down( &dev->struct_sem );
430         entry = &dma->bufs[order];
431         if ( entry->buf_count ) {
432                 up( &dev->struct_sem );
433                 atomic_dec( &dev->buf_alloc );
434                 return -ENOMEM; /* May only call once for each order */
435         }
436
437         if (count < 0 || count > 4096) {
438                 up( &dev->struct_sem );
439                 atomic_dec( &dev->buf_alloc );
440                 return -EINVAL;
441         }
442
443         entry->buflist = drm_alloc( count * sizeof(*entry->buflist),
444                                     DRM_MEM_BUFS );
445         if ( !entry->buflist ) {
446                 up( &dev->struct_sem );
447                 atomic_dec( &dev->buf_alloc );
448                 return -ENOMEM;
449         }
450         memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
451
452         entry->buf_size = size;
453         entry->page_order = page_order;
454
455         offset = 0;
456
457         while ( entry->buf_count < count ) {
458                 buf          = &entry->buflist[entry->buf_count];
459                 buf->idx     = dma->buf_count + entry->buf_count;
460                 buf->total   = alignment;
461                 buf->order   = order;
462                 buf->used    = 0;
463
464                 buf->offset  = (dma->byte_count + offset);
465                 buf->bus_address = agp_offset + offset;
466                 buf->address = (void *)(agp_offset + offset);
467                 buf->next    = NULL;
468                 buf->waiting = 0;
469                 buf->pending = 0;
470                 init_waitqueue_head( &buf->dma_wait );
471                 buf->filp    = NULL;
472
473                 buf->dev_priv_size = dev->driver->dev_priv_size;
474                 buf->dev_private = drm_alloc( buf->dev_priv_size,
475                                                DRM_MEM_BUFS );
476                 if(!buf->dev_private) {
477                         /* Set count correctly so we free the proper amount. */
478                         entry->buf_count = count;
479                         drm_cleanup_buf_error(dev,entry);
480                         up( &dev->struct_sem );
481                         atomic_dec( &dev->buf_alloc );
482                         return -ENOMEM;
483                 }
484                 memset( buf->dev_private, 0, buf->dev_priv_size );
485
486                 DRM_DEBUG( "buffer %d @ %p\n",
487                            entry->buf_count, buf->address );
488
489                 offset += alignment;
490                 entry->buf_count++;
491                 byte_count += PAGE_SIZE << page_order;
492         }
493
494         DRM_DEBUG( "byte_count: %d\n", byte_count );
495
496         temp_buflist = drm_realloc( dma->buflist,
497                                      dma->buf_count * sizeof(*dma->buflist),
498                                      (dma->buf_count + entry->buf_count)
499                                      * sizeof(*dma->buflist),
500                                      DRM_MEM_BUFS );
501         if(!temp_buflist) {
502                 /* Free the entry because it isn't valid */
503                 drm_cleanup_buf_error(dev,entry);
504                 up( &dev->struct_sem );
505                 atomic_dec( &dev->buf_alloc );
506                 return -ENOMEM;
507         }
508         dma->buflist = temp_buflist;
509
510         for ( i = 0 ; i < entry->buf_count ; i++ ) {
511                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
512         }
513
514         dma->buf_count += entry->buf_count;
515         dma->byte_count += byte_count;
516
517         DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
518         DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
519
520         up( &dev->struct_sem );
521
522         request->count = entry->buf_count;
523         request->size = size;
524
525         dma->flags = _DRM_DMA_USE_AGP;
526
527         atomic_dec( &dev->buf_alloc );
528         return 0;
529 }
530 #endif /* __OS_HAS_AGP */
531
532 static int drm_addbufs_pci(drm_device_t *dev, drm_buf_desc_t *request)
533 {
534         drm_device_dma_t *dma = dev->dma;
535         int count;
536         int order;
537         int size;
538         int total;
539         int page_order;
540         drm_buf_entry_t *entry;
541         unsigned long page;
542         drm_buf_t *buf;
543         int alignment;
544         unsigned long offset;
545         int i;
546         int byte_count;
547         int page_count;
548         unsigned long *temp_pagelist;
549         drm_buf_t **temp_buflist;
550
551         if (!drm_core_check_feature(dev, DRIVER_PCI_DMA)) return -EINVAL;
552         if ( !dma ) return -EINVAL;
553
554         count = request->count;
555         order = drm_order(request->size);
556         size = 1 << order;
557
558         DRM_DEBUG( "count=%d, size=%d (%d), order=%d, queue_count=%d\n",
559                    request->count, request->size, size,
560                    order, dev->queue_count );
561
562         if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
563         if ( dev->queue_count ) return -EBUSY; /* Not while in use */
564
565         alignment = (request->flags & _DRM_PAGE_ALIGN)
566                 ? PAGE_ALIGN(size) : size;
567         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
568         total = PAGE_SIZE << page_order;
569
570         spin_lock( &dev->count_lock );
571         if ( dev->buf_use ) {
572                 spin_unlock( &dev->count_lock );
573                 return -EBUSY;
574         }
575         atomic_inc( &dev->buf_alloc );
576         spin_unlock( &dev->count_lock );
577
578         down( &dev->struct_sem );
579         entry = &dma->bufs[order];
580         if ( entry->buf_count ) {
581                 up( &dev->struct_sem );
582                 atomic_dec( &dev->buf_alloc );
583                 return -ENOMEM; /* May only call once for each order */
584         }
585
586         if (count < 0 || count > 4096) {
587                 up( &dev->struct_sem );
588                 atomic_dec( &dev->buf_alloc );
589                 return -EINVAL;
590         }
591
592         entry->buflist = drm_alloc( count * sizeof(*entry->buflist),
593                                     DRM_MEM_BUFS );
594         if ( !entry->buflist ) {
595                 up( &dev->struct_sem );
596                 atomic_dec( &dev->buf_alloc );
597                 return -ENOMEM;
598         }
599         memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
600
601         entry->seglist = drm_alloc( count * sizeof(*entry->seglist),
602                                     DRM_MEM_SEGS );
603         if ( !entry->seglist ) {
604                 drm_free( entry->buflist,
605                           count * sizeof(*entry->buflist),
606                           DRM_MEM_BUFS );
607                 up( &dev->struct_sem );
608                 atomic_dec( &dev->buf_alloc );
609                 return -ENOMEM;
610         }
611         memset( entry->seglist, 0, count * sizeof(*entry->seglist) );
612
613         /* Keep the original pagelist until we know all the allocations
614          * have succeeded
615          */
616         temp_pagelist = drm_alloc( (dma->page_count + (count << page_order))
617                                     * sizeof(*dma->pagelist),
618                                     DRM_MEM_PAGES );
619         if (!temp_pagelist) {
620                 drm_free( entry->buflist,
621                            count * sizeof(*entry->buflist),
622                            DRM_MEM_BUFS );
623                 drm_free( entry->seglist,
624                            count * sizeof(*entry->seglist),
625                            DRM_MEM_SEGS );
626                 up( &dev->struct_sem );
627                 atomic_dec( &dev->buf_alloc );
628                 return -ENOMEM;
629         }
630         memcpy(temp_pagelist,
631                dma->pagelist,
632                dma->page_count * sizeof(*dma->pagelist));
633         DRM_DEBUG( "pagelist: %d entries\n",
634                    dma->page_count + (count << page_order) );
635
636         entry->buf_size = size;
637         entry->page_order = page_order;
638         byte_count = 0;
639         page_count = 0;
640
641         while ( entry->buf_count < count ) {
642                 page = drm_alloc_pages( page_order, DRM_MEM_DMA );
643                 if ( !page ) {
644                         /* Set count correctly so we free the proper amount. */
645                         entry->buf_count = count;
646                         entry->seg_count = count;
647                         drm_cleanup_buf_error(dev, entry);
648                         drm_free( temp_pagelist,
649                                    (dma->page_count + (count << page_order))
650                                    * sizeof(*dma->pagelist),
651                                    DRM_MEM_PAGES );
652                         up( &dev->struct_sem );
653                         atomic_dec( &dev->buf_alloc );
654                         return -ENOMEM;
655                 }
656                 entry->seglist[entry->seg_count++] = page;
657                 for ( i = 0 ; i < (1 << page_order) ; i++ ) {
658                         DRM_DEBUG( "page %d @ 0x%08lx\n",
659                                    dma->page_count + page_count,
660                                    page + PAGE_SIZE * i );
661                         temp_pagelist[dma->page_count + page_count++]
662                                 = page + PAGE_SIZE * i;
663                 }
664                 for ( offset = 0 ;
665                       offset + size <= total && entry->buf_count < count ;
666                       offset += alignment, ++entry->buf_count ) {
667                         buf          = &entry->buflist[entry->buf_count];
668                         buf->idx     = dma->buf_count + entry->buf_count;
669                         buf->total   = alignment;
670                         buf->order   = order;
671                         buf->used    = 0;
672                         buf->offset  = (dma->byte_count + byte_count + offset);
673                         buf->address = (void *)(page + offset);
674                         buf->next    = NULL;
675                         buf->waiting = 0;
676                         buf->pending = 0;
677                         init_waitqueue_head( &buf->dma_wait );
678                         buf->filp    = NULL;
679
680                         buf->dev_priv_size = dev->driver->dev_priv_size;
681                         buf->dev_private = drm_alloc( buf->dev_priv_size,
682                                                        DRM_MEM_BUFS );
683                         if(!buf->dev_private) {
684                                 /* Set count correctly so we free the proper amount. */
685                                 entry->buf_count = count;
686                                 entry->seg_count = count;
687                                 drm_cleanup_buf_error(dev,entry);
688                                 drm_free( temp_pagelist,
689                                            (dma->page_count + (count << page_order))
690                                            * sizeof(*dma->pagelist),
691                                            DRM_MEM_PAGES );
692                                 up( &dev->struct_sem );
693                                 atomic_dec( &dev->buf_alloc );
694                                 return -ENOMEM;
695                         }
696                         memset( buf->dev_private, 0, buf->dev_priv_size );
697
698                         DRM_DEBUG( "buffer %d @ %p\n",
699                                    entry->buf_count, buf->address );
700                 }
701                 byte_count += PAGE_SIZE << page_order;
702         }
703
704         temp_buflist = drm_realloc( dma->buflist,
705                                      dma->buf_count * sizeof(*dma->buflist),
706                                      (dma->buf_count + entry->buf_count)
707                                      * sizeof(*dma->buflist),
708                                      DRM_MEM_BUFS );
709         if (!temp_buflist) {
710                 /* Free the entry because it isn't valid */
711                 drm_cleanup_buf_error(dev,entry);
712                 drm_free( temp_pagelist,
713                            (dma->page_count + (count << page_order))
714                            * sizeof(*dma->pagelist),
715                            DRM_MEM_PAGES );
716                 up( &dev->struct_sem );
717                 atomic_dec( &dev->buf_alloc );
718                 return -ENOMEM;
719         }
720         dma->buflist = temp_buflist;
721
722         for ( i = 0 ; i < entry->buf_count ; i++ ) {
723                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
724         }
725
726         /* No allocations failed, so now we can replace the orginal pagelist
727          * with the new one.
728          */
729         if (dma->page_count) {
730                 drm_free(dma->pagelist,
731                           dma->page_count * sizeof(*dma->pagelist),
732                           DRM_MEM_PAGES);
733         }
734         dma->pagelist = temp_pagelist;
735
736         dma->buf_count += entry->buf_count;
737         dma->seg_count += entry->seg_count;
738         dma->page_count += entry->seg_count << page_order;
739         dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
740
741         up( &dev->struct_sem );
742
743         request->count = entry->buf_count;
744         request->size = size;
745
746         atomic_dec( &dev->buf_alloc );
747         return 0;
748
749 }
750
751 static int drm_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request)
752 {
753         drm_device_dma_t *dma = dev->dma;
754         drm_buf_entry_t *entry;
755         drm_buf_t *buf;
756         unsigned long offset;
757         unsigned long agp_offset;
758         int count;
759         int order;
760         int size;
761         int alignment;
762         int page_order;
763         int total;
764         int byte_count;
765         int i;
766         drm_buf_t **temp_buflist;
767
768         if (!drm_core_check_feature(dev, DRIVER_SG)) return -EINVAL;
769         
770         if ( !dma ) return -EINVAL;
771
772         count = request->count;
773         order = drm_order(request->size);
774         size = 1 << order;
775
776         alignment  = (request->flags & _DRM_PAGE_ALIGN)
777                         ? PAGE_ALIGN(size) : size;
778         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
779         total = PAGE_SIZE << page_order;
780
781         byte_count = 0;
782         agp_offset = request->agp_start;
783
784         DRM_DEBUG( "count:      %d\n",  count );
785         DRM_DEBUG( "order:      %d\n",  order );
786         DRM_DEBUG( "size:       %d\n",  size );
787         DRM_DEBUG( "agp_offset: %lu\n", agp_offset );
788         DRM_DEBUG( "alignment:  %d\n",  alignment );
789         DRM_DEBUG( "page_order: %d\n",  page_order );
790         DRM_DEBUG( "total:      %d\n",  total );
791
792         if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
793         if ( dev->queue_count ) return -EBUSY; /* Not while in use */
794
795         spin_lock( &dev->count_lock );
796         if ( dev->buf_use ) {
797                 spin_unlock( &dev->count_lock );
798                 return -EBUSY;
799         }
800         atomic_inc( &dev->buf_alloc );
801         spin_unlock( &dev->count_lock );
802
803         down( &dev->struct_sem );
804         entry = &dma->bufs[order];
805         if ( entry->buf_count ) {
806                 up( &dev->struct_sem );
807                 atomic_dec( &dev->buf_alloc );
808                 return -ENOMEM; /* May only call once for each order */
809         }
810
811         if (count < 0 || count > 4096) {
812                 up( &dev->struct_sem );
813                 atomic_dec( &dev->buf_alloc );
814                 return -EINVAL;
815         }
816
817         entry->buflist = drm_alloc( count * sizeof(*entry->buflist),
818                                      DRM_MEM_BUFS );
819         if ( !entry->buflist ) {
820                 up( &dev->struct_sem );
821                 atomic_dec( &dev->buf_alloc );
822                 return -ENOMEM;
823         }
824         memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
825
826         entry->buf_size = size;
827         entry->page_order = page_order;
828
829         offset = 0;
830
831         while ( entry->buf_count < count ) {
832                 buf          = &entry->buflist[entry->buf_count];
833                 buf->idx     = dma->buf_count + entry->buf_count;
834                 buf->total   = alignment;
835                 buf->order   = order;
836                 buf->used    = 0;
837
838                 buf->offset  = (dma->byte_count + offset);
839                 buf->bus_address = agp_offset + offset;
840                 buf->address = (void *)(agp_offset + offset + dev->sg->handle);
841                 buf->next    = NULL;
842                 buf->waiting = 0;
843                 buf->pending = 0;
844                 init_waitqueue_head( &buf->dma_wait );
845                 buf->filp    = NULL;
846
847                 buf->dev_priv_size = dev->driver->dev_priv_size;
848                 buf->dev_private = drm_alloc( buf->dev_priv_size,
849                                                DRM_MEM_BUFS );
850                 if(!buf->dev_private) {
851                         /* Set count correctly so we free the proper amount. */
852                         entry->buf_count = count;
853                         drm_cleanup_buf_error(dev,entry);
854                         up( &dev->struct_sem );
855                         atomic_dec( &dev->buf_alloc );
856                         return -ENOMEM;
857                 }
858
859                 memset( buf->dev_private, 0, buf->dev_priv_size );
860
861                 DRM_DEBUG( "buffer %d @ %p\n",
862                            entry->buf_count, buf->address );
863
864                 offset += alignment;
865                 entry->buf_count++;
866                 byte_count += PAGE_SIZE << page_order;
867         }
868
869         DRM_DEBUG( "byte_count: %d\n", byte_count );
870
871         temp_buflist = drm_realloc( dma->buflist,
872                                      dma->buf_count * sizeof(*dma->buflist),
873                                      (dma->buf_count + entry->buf_count)
874                                      * sizeof(*dma->buflist),
875                                      DRM_MEM_BUFS );
876         if(!temp_buflist) {
877                 /* Free the entry because it isn't valid */
878                 drm_cleanup_buf_error(dev,entry);
879                 up( &dev->struct_sem );
880                 atomic_dec( &dev->buf_alloc );
881                 return -ENOMEM;
882         }
883         dma->buflist = temp_buflist;
884
885         for ( i = 0 ; i < entry->buf_count ; i++ ) {
886                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
887         }
888
889         dma->buf_count += entry->buf_count;
890         dma->byte_count += byte_count;
891
892         DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
893         DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
894
895         up( &dev->struct_sem );
896
897         request->count = entry->buf_count;
898         request->size = size;
899
900         dma->flags = _DRM_DMA_USE_SG;
901
902         atomic_dec( &dev->buf_alloc );
903         return 0;
904 }
905
906 int drm_addbufs_fb(drm_device_t *dev, drm_buf_desc_t *request)
907 {
908         drm_device_dma_t *dma = dev->dma;
909         drm_buf_entry_t *entry;
910         drm_buf_t *buf;
911         unsigned long offset;
912         unsigned long agp_offset;
913         int count;
914         int order;
915         int size;
916         int alignment;
917         int page_order;
918         int total;
919         int byte_count;
920         int i;
921         drm_buf_t **temp_buflist;
922
923         if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
924                 return -EINVAL;
925     
926         if (!dma)
927                 return -EINVAL;
928
929         count = request->count;
930         order = drm_order(request->size);
931         size = 1 << order;
932
933         alignment = (request->flags & _DRM_PAGE_ALIGN)
934             ? PAGE_ALIGN(size) : size;
935         page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
936         total = PAGE_SIZE << page_order;
937
938         byte_count = 0;
939         agp_offset = request->agp_start;
940
941         DRM_DEBUG("count:      %d\n", count);
942         DRM_DEBUG("order:      %d\n", order);
943         DRM_DEBUG("size:       %d\n", size);
944         DRM_DEBUG("agp_offset: %lu\n", agp_offset);
945         DRM_DEBUG("alignment:  %d\n", alignment);
946         DRM_DEBUG("page_order: %d\n", page_order);
947         DRM_DEBUG("total:      %d\n", total);
948
949         if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
950                 return -EINVAL;
951         if (dev->queue_count)
952                 return -EBUSY;  /* Not while in use */
953
954         spin_lock(&dev->count_lock);
955         if (dev->buf_use) {
956                 spin_unlock(&dev->count_lock);
957                 return -EBUSY;
958         }
959         atomic_inc(&dev->buf_alloc);
960         spin_unlock(&dev->count_lock);
961
962         down(&dev->struct_sem);
963         entry = &dma->bufs[order];
964         if (entry->buf_count) {
965                 up(&dev->struct_sem);
966                 atomic_dec(&dev->buf_alloc);
967                 return -ENOMEM; /* May only call once for each order */
968         }
969
970         if (count < 0 || count > 4096) {
971                 up(&dev->struct_sem);
972                 atomic_dec(&dev->buf_alloc);
973                 return -EINVAL;
974         }
975
976         entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
977                                    DRM_MEM_BUFS);
978         if (!entry->buflist) {
979                 up(&dev->struct_sem);
980                 atomic_dec(&dev->buf_alloc);
981                 return -ENOMEM;
982         }
983         memset(entry->buflist, 0, count * sizeof(*entry->buflist));
984
985         entry->buf_size = size;
986         entry->page_order = page_order;
987
988         offset = 0;
989
990         while (entry->buf_count < count) {
991                 buf = &entry->buflist[entry->buf_count];
992                 buf->idx = dma->buf_count + entry->buf_count;
993                 buf->total = alignment;
994                 buf->order = order;
995                 buf->used = 0;
996
997                 buf->offset = (dma->byte_count + offset);
998                 buf->bus_address = agp_offset + offset;
999                 buf->address = (void *)(agp_offset + offset);
1000                 buf->next = NULL;
1001                 buf->waiting = 0;
1002                 buf->pending = 0;
1003                 init_waitqueue_head(&buf->dma_wait);
1004                 buf->filp = NULL;
1005
1006                 buf->dev_priv_size = dev->driver->dev_priv_size;
1007                 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
1008                 if (!buf->dev_private) {
1009                         /* Set count correctly so we free the proper amount. */
1010                         entry->buf_count = count;
1011                         drm_cleanup_buf_error(dev, entry);
1012                         up(&dev->struct_sem);
1013                         atomic_dec(&dev->buf_alloc);
1014                         return -ENOMEM;
1015                 }
1016                 memset(buf->dev_private, 0, buf->dev_priv_size);
1017
1018                 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1019
1020                 offset += alignment;
1021                 entry->buf_count++;
1022                 byte_count += PAGE_SIZE << page_order;
1023         }
1024
1025         DRM_DEBUG("byte_count: %d\n", byte_count);
1026
1027         temp_buflist = drm_realloc(dma->buflist,
1028                                    dma->buf_count * sizeof(*dma->buflist),
1029                                    (dma->buf_count + entry->buf_count)
1030                                    * sizeof(*dma->buflist), DRM_MEM_BUFS);
1031         if (!temp_buflist) {
1032                 /* Free the entry because it isn't valid */
1033                 drm_cleanup_buf_error(dev, entry);
1034                 up(&dev->struct_sem);
1035                 atomic_dec(&dev->buf_alloc);
1036                 return -ENOMEM;
1037         }
1038         dma->buflist = temp_buflist;
1039
1040         for (i = 0; i < entry->buf_count; i++) {
1041                 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1042         }
1043
1044         dma->buf_count += entry->buf_count;
1045         dma->byte_count += byte_count;
1046
1047         DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1048         DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1049
1050         up(&dev->struct_sem);
1051
1052         request->count = entry->buf_count;
1053         request->size = size;
1054
1055         dma->flags = _DRM_DMA_USE_FB;
1056
1057         atomic_dec(&dev->buf_alloc);
1058         return 0;
1059 }
1060
1061 /**
1062  * Add buffers for DMA transfers (ioctl).
1063  *
1064  * \param inode device inode.
1065  * \param filp file pointer.
1066  * \param cmd command.
1067  * \param arg pointer to a drm_buf_desc_t request.
1068  * \return zero on success or a negative number on failure.
1069  *
1070  * According with the memory type specified in drm_buf_desc::flags and the
1071  * build options, it dispatches the call either to addbufs_agp(),
1072  * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1073  * PCI memory respectively.
1074  */
1075 int drm_addbufs( struct inode *inode, struct file *filp,
1076                   unsigned int cmd, unsigned long arg )
1077 {
1078         drm_buf_desc_t request;
1079         drm_file_t *priv = filp->private_data;
1080         drm_device_t *dev = priv->head->dev;
1081         int ret;
1082         
1083         if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1084                 return -EINVAL;
1085
1086         if ( copy_from_user( &request, (drm_buf_desc_t __user *)arg,
1087                              sizeof(request) ) )
1088                 return -EFAULT;
1089
1090 #if __OS_HAS_AGP
1091         if ( request.flags & _DRM_AGP_BUFFER )
1092                 ret=drm_addbufs_agp(dev, &request);
1093         else
1094 #endif
1095         if ( request.flags & _DRM_SG_BUFFER )
1096                 ret=drm_addbufs_sg(dev, &request);
1097         else if ( request.flags & _DRM_FB_BUFFER)
1098                 ret=drm_addbufs_fb(dev, &request);
1099         else
1100                 ret=drm_addbufs_pci(dev, &request);
1101
1102         if (ret==0) {
1103                 if (copy_to_user((void __user *)arg, &request,
1104                                  sizeof(request))) {
1105                         ret = -EFAULT;
1106                 }
1107         }
1108         return ret;
1109 }
1110
1111
1112 /**
1113  * Get information about the buffer mappings.
1114  *
1115  * This was originally mean for debugging purposes, or by a sophisticated
1116  * client library to determine how best to use the available buffers (e.g.,
1117  * large buffers can be used for image transfer).
1118  *
1119  * \param inode device inode.
1120  * \param filp file pointer.
1121  * \param cmd command.
1122  * \param arg pointer to a drm_buf_info structure.
1123  * \return zero on success or a negative number on failure.
1124  *
1125  * Increments drm_device::buf_use while holding the drm_device::count_lock
1126  * lock, preventing of allocating more buffers after this call. Information
1127  * about each requested buffer is then copied into user space.
1128  */
1129 int drm_infobufs( struct inode *inode, struct file *filp,
1130                    unsigned int cmd, unsigned long arg )
1131 {
1132         drm_file_t *priv = filp->private_data;
1133         drm_device_t *dev = priv->head->dev;
1134         drm_device_dma_t *dma = dev->dma;
1135         drm_buf_info_t request;
1136         drm_buf_info_t __user *argp = (void __user *)arg;
1137         int i;
1138         int count;
1139
1140         if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1141                 return -EINVAL;
1142
1143         if ( !dma ) return -EINVAL;
1144
1145         spin_lock( &dev->count_lock );
1146         if ( atomic_read( &dev->buf_alloc ) ) {
1147                 spin_unlock( &dev->count_lock );
1148                 return -EBUSY;
1149         }
1150         ++dev->buf_use;         /* Can't allocate more after this call */
1151         spin_unlock( &dev->count_lock );
1152
1153         if ( copy_from_user( &request, argp, sizeof(request) ) )
1154                 return -EFAULT;
1155
1156         for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
1157                 if ( dma->bufs[i].buf_count ) ++count;
1158         }
1159
1160         DRM_DEBUG( "count = %d\n", count );
1161
1162         if ( request.count >= count ) {
1163                 for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
1164                         if ( dma->bufs[i].buf_count ) {
1165                                 drm_buf_desc_t __user *to = &request.list[count];
1166                                 drm_buf_entry_t *from = &dma->bufs[i];
1167                                 drm_freelist_t *list = &dma->bufs[i].freelist;
1168                                 if ( copy_to_user( &to->count,
1169                                                    &from->buf_count,
1170                                                    sizeof(from->buf_count) ) ||
1171                                      copy_to_user( &to->size,
1172                                                    &from->buf_size,
1173                                                    sizeof(from->buf_size) ) ||
1174                                      copy_to_user( &to->low_mark,
1175                                                    &list->low_mark,
1176                                                    sizeof(list->low_mark) ) ||
1177                                      copy_to_user( &to->high_mark,
1178                                                    &list->high_mark,
1179                                                    sizeof(list->high_mark) ) )
1180                                         return -EFAULT;
1181
1182                                 DRM_DEBUG( "%d %d %d %d %d\n",
1183                                            i,
1184                                            dma->bufs[i].buf_count,
1185                                            dma->bufs[i].buf_size,
1186                                            dma->bufs[i].freelist.low_mark,
1187                                            dma->bufs[i].freelist.high_mark );
1188                                 ++count;
1189                         }
1190                 }
1191         }
1192         request.count = count;
1193
1194         if ( copy_to_user( argp, &request, sizeof(request) ) )
1195                 return -EFAULT;
1196
1197         return 0;
1198 }
1199
1200 /**
1201  * Specifies a low and high water mark for buffer allocation
1202  *
1203  * \param inode device inode.
1204  * \param filp file pointer.
1205  * \param cmd command.
1206  * \param arg a pointer to a drm_buf_desc structure.
1207  * \return zero on success or a negative number on failure.
1208  *
1209  * Verifies that the size order is bounded between the admissible orders and
1210  * updates the respective drm_device_dma::bufs entry low and high water mark.
1211  *
1212  * \note This ioctl is deprecated and mostly never used.
1213  */
1214 int drm_markbufs( struct inode *inode, struct file *filp,
1215                    unsigned int cmd, unsigned long arg )
1216 {
1217         drm_file_t *priv = filp->private_data;
1218         drm_device_t *dev = priv->head->dev;
1219         drm_device_dma_t *dma = dev->dma;
1220         drm_buf_desc_t request;
1221         int order;
1222         drm_buf_entry_t *entry;
1223
1224         if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1225                 return -EINVAL;
1226
1227         if ( !dma ) return -EINVAL;
1228
1229         if ( copy_from_user( &request,
1230                              (drm_buf_desc_t __user *)arg,
1231                              sizeof(request) ) )
1232                 return -EFAULT;
1233
1234         DRM_DEBUG( "%d, %d, %d\n",
1235                    request.size, request.low_mark, request.high_mark );
1236         order = drm_order( request.size );
1237         if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
1238         entry = &dma->bufs[order];
1239
1240         if ( request.low_mark < 0 || request.low_mark > entry->buf_count )
1241                 return -EINVAL;
1242         if ( request.high_mark < 0 || request.high_mark > entry->buf_count )
1243                 return -EINVAL;
1244
1245         entry->freelist.low_mark  = request.low_mark;
1246         entry->freelist.high_mark = request.high_mark;
1247
1248         return 0;
1249 }
1250
1251 /**
1252  * Unreserve the buffers in list, previously reserved using drmDMA. 
1253  *
1254  * \param inode device inode.
1255  * \param filp file pointer.
1256  * \param cmd command.
1257  * \param arg pointer to a drm_buf_free structure.
1258  * \return zero on success or a negative number on failure.
1259  * 
1260  * Calls free_buffer() for each used buffer.
1261  * This function is primarily used for debugging.
1262  */
1263 int drm_freebufs( struct inode *inode, struct file *filp,
1264                    unsigned int cmd, unsigned long arg )
1265 {
1266         drm_file_t *priv = filp->private_data;
1267         drm_device_t *dev = priv->head->dev;
1268         drm_device_dma_t *dma = dev->dma;
1269         drm_buf_free_t request;
1270         int i;
1271         int idx;
1272         drm_buf_t *buf;
1273
1274         if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1275                 return -EINVAL;
1276
1277         if ( !dma ) return -EINVAL;
1278
1279         if ( copy_from_user( &request,
1280                              (drm_buf_free_t __user *)arg,
1281                              sizeof(request) ) )
1282                 return -EFAULT;
1283
1284         DRM_DEBUG( "%d\n", request.count );
1285         for ( i = 0 ; i < request.count ; i++ ) {
1286                 if ( copy_from_user( &idx,
1287                                      &request.list[i],
1288                                      sizeof(idx) ) )
1289                         return -EFAULT;
1290                 if ( idx < 0 || idx >= dma->buf_count ) {
1291                         DRM_ERROR( "Index %d (of %d max)\n",
1292                                    idx, dma->buf_count - 1 );
1293                         return -EINVAL;
1294                 }
1295                 buf = dma->buflist[idx];
1296                 if ( buf->filp != filp ) {
1297                         DRM_ERROR( "Process %d freeing buffer not owned\n",
1298                                    current->pid );
1299                         return -EINVAL;
1300                 }
1301                 drm_free_buffer( dev, buf );
1302         }
1303
1304         return 0;
1305 }
1306
1307 /**
1308  * Maps all of the DMA buffers into client-virtual space (ioctl).
1309  *
1310  * \param inode device inode.
1311  * \param filp file pointer.
1312  * \param cmd command.
1313  * \param arg pointer to a drm_buf_map structure.
1314  * \return zero on success or a negative number on failure.
1315  *
1316  * Maps the AGP or SG buffer region with do_mmap(), and copies information
1317  * about each buffer into user space. The PCI buffers are already mapped on the
1318  * addbufs_pci() call.
1319  */
1320 int drm_mapbufs( struct inode *inode, struct file *filp,
1321                   unsigned int cmd, unsigned long arg )
1322 {
1323         drm_file_t *priv = filp->private_data;
1324         drm_device_t *dev = priv->head->dev;
1325         drm_device_dma_t *dma = dev->dma;
1326         drm_buf_map_t __user *argp = (void __user *)arg;
1327         int retcode = 0;
1328         const int zero = 0;
1329         unsigned long virtual;
1330         unsigned long address;
1331         drm_buf_map_t request;
1332         int i;
1333
1334         if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1335                 return -EINVAL;
1336
1337         if ( !dma ) return -EINVAL;
1338
1339         spin_lock( &dev->count_lock );
1340         if ( atomic_read( &dev->buf_alloc ) ) {
1341                 spin_unlock( &dev->count_lock );
1342                 return -EBUSY;
1343         }
1344         dev->buf_use++;         /* Can't allocate more after this call */
1345         spin_unlock( &dev->count_lock );
1346
1347         if ( copy_from_user( &request, argp, sizeof(request) ) )
1348                 return -EFAULT;
1349
1350         if ( request.count >= dma->buf_count ) {
1351                 if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
1352                     || (drm_core_check_feature(dev, DRIVER_SG) 
1353                         && (dma->flags & _DRM_DMA_USE_SG))
1354                     || (drm_core_check_feature(dev, DRIVER_FB_DMA)
1355                         && (dma->flags & _DRM_DMA_USE_FB))) {
1356                         drm_map_t *map = dev->agp_buffer_map;
1357
1358                         if ( !map ) {
1359                                 retcode = -EINVAL;
1360                                 goto done;
1361                         }
1362
1363 #if LINUX_VERSION_CODE <= 0x020402
1364                         down( &current->mm->mmap_sem );
1365 #else
1366                         down_write( &current->mm->mmap_sem );
1367 #endif
1368                         virtual = do_mmap( filp, 0, map->size,
1369                                            PROT_READ | PROT_WRITE,
1370                                            MAP_SHARED,
1371                                            (unsigned long)map->offset );
1372 #if LINUX_VERSION_CODE <= 0x020402
1373                         up( &current->mm->mmap_sem );
1374 #else
1375                         up_write( &current->mm->mmap_sem );
1376 #endif
1377                 } else {
1378 #if LINUX_VERSION_CODE <= 0x020402
1379                         down( &current->mm->mmap_sem );
1380 #else
1381                         down_write( &current->mm->mmap_sem );
1382 #endif
1383                         virtual = do_mmap( filp, 0, dma->byte_count,
1384                                            PROT_READ | PROT_WRITE,
1385                                            MAP_SHARED, 0 );
1386 #if LINUX_VERSION_CODE <= 0x020402
1387                         up( &current->mm->mmap_sem );
1388 #else
1389                         up_write( &current->mm->mmap_sem );
1390 #endif
1391                 }
1392                 if ( virtual > -1024UL ) {
1393                         /* Real error */
1394                         retcode = (signed long)virtual;
1395                         goto done;
1396                 }
1397                 request.virtual = (void __user *)virtual;
1398
1399                 for ( i = 0 ; i < dma->buf_count ; i++ ) {
1400                         if ( copy_to_user( &request.list[i].idx,
1401                                            &dma->buflist[i]->idx,
1402                                            sizeof(request.list[0].idx) ) ) {
1403                                 retcode = -EFAULT;
1404                                 goto done;
1405                         }
1406                         if ( copy_to_user( &request.list[i].total,
1407                                            &dma->buflist[i]->total,
1408                                            sizeof(request.list[0].total) ) ) {
1409                                 retcode = -EFAULT;
1410                                 goto done;
1411                         }
1412                         if ( copy_to_user( &request.list[i].used,
1413                                            &zero,
1414                                            sizeof(zero) ) ) {
1415                                 retcode = -EFAULT;
1416                                 goto done;
1417                         }
1418                         address = virtual + dma->buflist[i]->offset; /* *** */
1419                         if ( copy_to_user( &request.list[i].address,
1420                                            &address,
1421                                            sizeof(address) ) ) {
1422                                 retcode = -EFAULT;
1423                                 goto done;
1424                         }
1425                 }
1426         }
1427  done:
1428         request.count = dma->buf_count;
1429         DRM_DEBUG( "%d buffers, retcode = %d\n", request.count, retcode );
1430
1431         if ( copy_to_user( argp, &request, sizeof(request) ) )
1432                 return -EFAULT;
1433
1434         return retcode;
1435 }
1436