3 * Generic buffer template
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
10 * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
12 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
36 #include <linux/vmalloc.h>
40 * Compute size order. Returns the exponent of the smaller power of two which
41 * is greater or equal to given number.
46 * \todo Can be made faster.
48 int drm_order( unsigned long size )
53 for (order = 0, tmp = size >> 1; tmp; tmp >>= 1, order++)
56 if (size & (size - 1))
61 EXPORT_SYMBOL(drm_order);
65 * Used to allocate 32-bit handles for _DRM_SHM regions
66 * The 0x10000000 value is chosen to be out of the way of
67 * FB/register and GART physical addresses.
69 static unsigned int map32_handle = 0x10000000;
73 * Ioctl to specify a range of memory that is available for mapping by a non-root process.
75 * \param inode device inode.
76 * \param filp file pointer.
78 * \param arg pointer to a drm_map structure.
79 * \return zero on success or a negative value on error.
81 * Adjusts the memory offset to its absolute value according to the mapping
82 * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
83 * applicable and if supported by the kernel.
85 int drm_addmap( struct inode *inode, struct file *filp,
86 unsigned int cmd, unsigned long arg )
88 drm_file_t *priv = filp->private_data;
89 drm_device_t *dev = priv->head->dev;
91 drm_map_t __user *argp = (void __user *)arg;
93 drm_dma_handle_t *dmah;
95 if ( !(filp->f_mode & 3) ) return -EACCES; /* Require read/write */
97 map = drm_alloc( sizeof(*map), DRM_MEM_MAPS );
101 if ( copy_from_user( map, argp, sizeof(*map) ) ) {
102 drm_free( map, sizeof(*map), DRM_MEM_MAPS );
106 /* Only allow shared memory to be removable since we only keep enough
107 * book keeping information about shared memory to allow for removal
108 * when processes fork.
110 if ( (map->flags & _DRM_REMOVABLE) && map->type != _DRM_SHM ) {
111 drm_free( map, sizeof(*map), DRM_MEM_MAPS );
114 DRM_DEBUG( "offset = 0x%08lx, size = 0x%08lx, type = %d\n",
115 map->offset, map->size, map->type );
116 if ( (map->offset & (~PAGE_MASK)) || (map->size & (~PAGE_MASK)) ) {
117 drm_free( map, sizeof(*map), DRM_MEM_MAPS );
123 switch ( map->type ) {
125 case _DRM_FRAME_BUFFER:
126 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__)
127 if ( map->offset + map->size < map->offset ||
128 map->offset < virt_to_phys(high_memory) ) {
129 drm_free( map, sizeof(*map), DRM_MEM_MAPS );
134 map->offset += dev->hose->mem_space->start;
136 if (drm_core_has_MTRR(dev)) {
137 if ( map->type == _DRM_FRAME_BUFFER ||
138 (map->flags & _DRM_WRITE_COMBINING) ) {
139 map->mtrr = mtrr_add( map->offset, map->size,
140 MTRR_TYPE_WRCOMB, 1 );
143 if (map->type == _DRM_REGISTERS)
144 map->handle = drm_ioremap( map->offset, map->size,
149 map->handle = vmalloc_32(map->size);
150 DRM_DEBUG( "%lu %d %p\n",
151 map->size, drm_order( map->size ), map->handle );
152 if ( !map->handle ) {
153 drm_free( map, sizeof(*map), DRM_MEM_MAPS );
156 map->offset = (unsigned long)map->handle;
157 if ( map->flags & _DRM_CONTAINS_LOCK ) {
158 /* Prevent a 2nd X Server from creating a 2nd lock */
159 if (dev->lock.hw_lock != NULL) {
160 vfree( map->handle );
161 drm_free( map, sizeof(*map), DRM_MEM_MAPS );
165 dev->lock.hw_lock = map->handle; /* Pointer to lock */
169 if (drm_core_has_AGP(dev)) {
171 map->offset += dev->hose->mem_space->start;
173 map->offset += dev->agp->base;
174 map->mtrr = dev->agp->agp_mtrr; /* for getmap */
177 case _DRM_SCATTER_GATHER:
179 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
182 map->offset += dev->sg->handle;
184 case _DRM_CONSISTENT:
185 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
186 * As we're limiting the address to 2^32-1 (or less),
187 * casting it down to 32 bits is no problem, but we
188 * need to point to a 64bit variable first. */
189 dmah = drm_pci_alloc(dev, map->size, map->size, 0xffffffffUL);
191 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
194 map->handle = dmah->vaddr;
195 map->offset = (unsigned long)dmah->busaddr;
199 drm_free( map, sizeof(*map), DRM_MEM_MAPS );
203 list = drm_alloc(sizeof(*list), DRM_MEM_MAPS);
205 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
208 memset(list, 0, sizeof(*list));
211 down(&dev->struct_sem);
212 list_add(&list->head, &dev->maplist->head);
214 /* Assign a 32-bit handle for _DRM_SHM mappings */
215 /* We do it here so that dev->struct_sem protects the increment */
216 if (map->type == _DRM_SHM)
217 map->offset = map32_handle += PAGE_SIZE;
219 up(&dev->struct_sem);
221 if ( copy_to_user( argp, map, sizeof(*map) ) )
223 if (copy_to_user(&argp->handle, &map->offset, sizeof(map->offset)))
230 * Remove a map private from list and deallocate resources if the mapping
233 * \param inode device inode.
234 * \param filp file pointer.
235 * \param cmd command.
236 * \param arg pointer to a drm_map_t structure.
237 * \return zero on success or a negative value on error.
239 * Searches the map on drm_device::maplist, removes it from the list, see if
240 * its being used, and free any associate resource (such as MTRR's) if it's not
245 int drm_rmmap(struct inode *inode, struct file *filp,
246 unsigned int cmd, unsigned long arg)
248 drm_file_t *priv = filp->private_data;
249 drm_device_t *dev = priv->head->dev;
250 struct list_head *list;
251 drm_map_list_t *r_list = NULL;
252 drm_vma_entry_t *pt, *prev;
257 if (copy_from_user(&request, (drm_map_t __user *)arg,
262 down(&dev->struct_sem);
263 list = &dev->maplist->head;
264 list_for_each(list, &dev->maplist->head) {
265 r_list = list_entry(list, drm_map_list_t, head);
268 r_list->map->offset == (unsigned long) request.handle &&
269 r_list->map->flags & _DRM_REMOVABLE) break;
272 /* List has wrapped around to the head pointer, or its empty we didn't
275 if(list == (&dev->maplist->head)) {
276 up(&dev->struct_sem);
281 drm_free(list, sizeof(*list), DRM_MEM_MAPS);
283 for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
284 if (pt->vma->vm_private_data == map) found_maps++;
288 drm_dma_handle_t dmah;
292 case _DRM_FRAME_BUFFER:
293 if (drm_core_has_MTRR(dev)) {
294 if (map->mtrr >= 0) {
296 retcode = mtrr_del(map->mtrr,
299 DRM_DEBUG("mtrr_del = %d\n", retcode);
302 drm_ioremapfree(map->handle, map->size, dev);
308 case _DRM_SCATTER_GATHER:
310 case _DRM_CONSISTENT:
311 dmah.vaddr = map->handle;
312 dmah.busaddr = map->offset;
313 dmah.size = map->size;
314 __drm_pci_free(dev, &dmah);
317 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
319 up(&dev->struct_sem);
324 * Cleanup after an error on one of the addbufs() functions.
326 * \param entry buffer entry where the error occurred.
328 * Frees any pages and buffers associated with the given entry.
330 static void drm_cleanup_buf_error(drm_device_t *dev, drm_buf_entry_t *entry)
334 if (entry->seg_count) {
335 for (i = 0; i < entry->seg_count; i++) {
336 if (entry->seglist[i]) {
337 drm_free_pages(entry->seglist[i],
342 drm_free(entry->seglist,
344 sizeof(*entry->seglist),
347 entry->seg_count = 0;
350 if (entry->buf_count) {
351 for (i = 0; i < entry->buf_count; i++) {
352 if (entry->buflist[i].dev_private) {
353 drm_free(entry->buflist[i].dev_private,
354 entry->buflist[i].dev_priv_size,
358 drm_free(entry->buflist,
360 sizeof(*entry->buflist),
363 entry->buf_count = 0;
369 * Add AGP buffers for DMA transfers.
371 * \param dev drm_device_t to which the buffers are to be added.
372 * \param request pointer to a drm_buf_desc_t describing the request.
373 * \return zero on success or a negative number on failure.
375 * After some sanity checks creates a drm_buf structure for each buffer and
376 * reallocates the buffer list of the same size order to accommodate the new
379 static int drm_addbufs_agp(drm_device_t *dev, drm_buf_desc_t *request)
381 drm_device_dma_t *dma = dev->dma;
382 drm_buf_entry_t *entry;
384 unsigned long offset;
385 unsigned long agp_offset;
394 drm_buf_t **temp_buflist;
396 if ( !dma ) return -EINVAL;
398 count = request->count;
399 order = drm_order(request->size);
402 alignment = (request->flags & _DRM_PAGE_ALIGN)
403 ? PAGE_ALIGN(size) : size;
404 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
405 total = PAGE_SIZE << page_order;
408 agp_offset = dev->agp->base + request->agp_start;
410 DRM_DEBUG( "count: %d\n", count );
411 DRM_DEBUG( "order: %d\n", order );
412 DRM_DEBUG( "size: %d\n", size );
413 DRM_DEBUG( "agp_offset: %lu\n", agp_offset );
414 DRM_DEBUG( "alignment: %d\n", alignment );
415 DRM_DEBUG( "page_order: %d\n", page_order );
416 DRM_DEBUG( "total: %d\n", total );
418 if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
419 if ( dev->queue_count ) return -EBUSY; /* Not while in use */
421 spin_lock( &dev->count_lock );
422 if ( dev->buf_use ) {
423 spin_unlock( &dev->count_lock );
426 atomic_inc( &dev->buf_alloc );
427 spin_unlock( &dev->count_lock );
429 down( &dev->struct_sem );
430 entry = &dma->bufs[order];
431 if ( entry->buf_count ) {
432 up( &dev->struct_sem );
433 atomic_dec( &dev->buf_alloc );
434 return -ENOMEM; /* May only call once for each order */
437 if (count < 0 || count > 4096) {
438 up( &dev->struct_sem );
439 atomic_dec( &dev->buf_alloc );
443 entry->buflist = drm_alloc( count * sizeof(*entry->buflist),
445 if ( !entry->buflist ) {
446 up( &dev->struct_sem );
447 atomic_dec( &dev->buf_alloc );
450 memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
452 entry->buf_size = size;
453 entry->page_order = page_order;
457 while ( entry->buf_count < count ) {
458 buf = &entry->buflist[entry->buf_count];
459 buf->idx = dma->buf_count + entry->buf_count;
460 buf->total = alignment;
464 buf->offset = (dma->byte_count + offset);
465 buf->bus_address = agp_offset + offset;
466 buf->address = (void *)(agp_offset + offset);
470 init_waitqueue_head( &buf->dma_wait );
473 buf->dev_priv_size = dev->driver->dev_priv_size;
474 buf->dev_private = drm_alloc( buf->dev_priv_size,
476 if(!buf->dev_private) {
477 /* Set count correctly so we free the proper amount. */
478 entry->buf_count = count;
479 drm_cleanup_buf_error(dev,entry);
480 up( &dev->struct_sem );
481 atomic_dec( &dev->buf_alloc );
484 memset( buf->dev_private, 0, buf->dev_priv_size );
486 DRM_DEBUG( "buffer %d @ %p\n",
487 entry->buf_count, buf->address );
491 byte_count += PAGE_SIZE << page_order;
494 DRM_DEBUG( "byte_count: %d\n", byte_count );
496 temp_buflist = drm_realloc( dma->buflist,
497 dma->buf_count * sizeof(*dma->buflist),
498 (dma->buf_count + entry->buf_count)
499 * sizeof(*dma->buflist),
502 /* Free the entry because it isn't valid */
503 drm_cleanup_buf_error(dev,entry);
504 up( &dev->struct_sem );
505 atomic_dec( &dev->buf_alloc );
508 dma->buflist = temp_buflist;
510 for ( i = 0 ; i < entry->buf_count ; i++ ) {
511 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
514 dma->buf_count += entry->buf_count;
515 dma->byte_count += byte_count;
517 DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
518 DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
520 up( &dev->struct_sem );
522 request->count = entry->buf_count;
523 request->size = size;
525 dma->flags = _DRM_DMA_USE_AGP;
527 atomic_dec( &dev->buf_alloc );
530 #endif /* __OS_HAS_AGP */
532 static int drm_addbufs_pci(drm_device_t *dev, drm_buf_desc_t *request)
534 drm_device_dma_t *dma = dev->dma;
540 drm_buf_entry_t *entry;
544 unsigned long offset;
548 unsigned long *temp_pagelist;
549 drm_buf_t **temp_buflist;
551 if (!drm_core_check_feature(dev, DRIVER_PCI_DMA)) return -EINVAL;
552 if ( !dma ) return -EINVAL;
554 count = request->count;
555 order = drm_order(request->size);
558 DRM_DEBUG( "count=%d, size=%d (%d), order=%d, queue_count=%d\n",
559 request->count, request->size, size,
560 order, dev->queue_count );
562 if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
563 if ( dev->queue_count ) return -EBUSY; /* Not while in use */
565 alignment = (request->flags & _DRM_PAGE_ALIGN)
566 ? PAGE_ALIGN(size) : size;
567 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
568 total = PAGE_SIZE << page_order;
570 spin_lock( &dev->count_lock );
571 if ( dev->buf_use ) {
572 spin_unlock( &dev->count_lock );
575 atomic_inc( &dev->buf_alloc );
576 spin_unlock( &dev->count_lock );
578 down( &dev->struct_sem );
579 entry = &dma->bufs[order];
580 if ( entry->buf_count ) {
581 up( &dev->struct_sem );
582 atomic_dec( &dev->buf_alloc );
583 return -ENOMEM; /* May only call once for each order */
586 if (count < 0 || count > 4096) {
587 up( &dev->struct_sem );
588 atomic_dec( &dev->buf_alloc );
592 entry->buflist = drm_alloc( count * sizeof(*entry->buflist),
594 if ( !entry->buflist ) {
595 up( &dev->struct_sem );
596 atomic_dec( &dev->buf_alloc );
599 memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
601 entry->seglist = drm_alloc( count * sizeof(*entry->seglist),
603 if ( !entry->seglist ) {
604 drm_free( entry->buflist,
605 count * sizeof(*entry->buflist),
607 up( &dev->struct_sem );
608 atomic_dec( &dev->buf_alloc );
611 memset( entry->seglist, 0, count * sizeof(*entry->seglist) );
613 /* Keep the original pagelist until we know all the allocations
616 temp_pagelist = drm_alloc( (dma->page_count + (count << page_order))
617 * sizeof(*dma->pagelist),
619 if (!temp_pagelist) {
620 drm_free( entry->buflist,
621 count * sizeof(*entry->buflist),
623 drm_free( entry->seglist,
624 count * sizeof(*entry->seglist),
626 up( &dev->struct_sem );
627 atomic_dec( &dev->buf_alloc );
630 memcpy(temp_pagelist,
632 dma->page_count * sizeof(*dma->pagelist));
633 DRM_DEBUG( "pagelist: %d entries\n",
634 dma->page_count + (count << page_order) );
636 entry->buf_size = size;
637 entry->page_order = page_order;
641 while ( entry->buf_count < count ) {
642 page = drm_alloc_pages( page_order, DRM_MEM_DMA );
644 /* Set count correctly so we free the proper amount. */
645 entry->buf_count = count;
646 entry->seg_count = count;
647 drm_cleanup_buf_error(dev, entry);
648 drm_free( temp_pagelist,
649 (dma->page_count + (count << page_order))
650 * sizeof(*dma->pagelist),
652 up( &dev->struct_sem );
653 atomic_dec( &dev->buf_alloc );
656 entry->seglist[entry->seg_count++] = page;
657 for ( i = 0 ; i < (1 << page_order) ; i++ ) {
658 DRM_DEBUG( "page %d @ 0x%08lx\n",
659 dma->page_count + page_count,
660 page + PAGE_SIZE * i );
661 temp_pagelist[dma->page_count + page_count++]
662 = page + PAGE_SIZE * i;
665 offset + size <= total && entry->buf_count < count ;
666 offset += alignment, ++entry->buf_count ) {
667 buf = &entry->buflist[entry->buf_count];
668 buf->idx = dma->buf_count + entry->buf_count;
669 buf->total = alignment;
672 buf->offset = (dma->byte_count + byte_count + offset);
673 buf->address = (void *)(page + offset);
677 init_waitqueue_head( &buf->dma_wait );
680 buf->dev_priv_size = dev->driver->dev_priv_size;
681 buf->dev_private = drm_alloc( buf->dev_priv_size,
683 if(!buf->dev_private) {
684 /* Set count correctly so we free the proper amount. */
685 entry->buf_count = count;
686 entry->seg_count = count;
687 drm_cleanup_buf_error(dev,entry);
688 drm_free( temp_pagelist,
689 (dma->page_count + (count << page_order))
690 * sizeof(*dma->pagelist),
692 up( &dev->struct_sem );
693 atomic_dec( &dev->buf_alloc );
696 memset( buf->dev_private, 0, buf->dev_priv_size );
698 DRM_DEBUG( "buffer %d @ %p\n",
699 entry->buf_count, buf->address );
701 byte_count += PAGE_SIZE << page_order;
704 temp_buflist = drm_realloc( dma->buflist,
705 dma->buf_count * sizeof(*dma->buflist),
706 (dma->buf_count + entry->buf_count)
707 * sizeof(*dma->buflist),
710 /* Free the entry because it isn't valid */
711 drm_cleanup_buf_error(dev,entry);
712 drm_free( temp_pagelist,
713 (dma->page_count + (count << page_order))
714 * sizeof(*dma->pagelist),
716 up( &dev->struct_sem );
717 atomic_dec( &dev->buf_alloc );
720 dma->buflist = temp_buflist;
722 for ( i = 0 ; i < entry->buf_count ; i++ ) {
723 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
726 /* No allocations failed, so now we can replace the orginal pagelist
729 if (dma->page_count) {
730 drm_free(dma->pagelist,
731 dma->page_count * sizeof(*dma->pagelist),
734 dma->pagelist = temp_pagelist;
736 dma->buf_count += entry->buf_count;
737 dma->seg_count += entry->seg_count;
738 dma->page_count += entry->seg_count << page_order;
739 dma->byte_count += PAGE_SIZE * (entry->seg_count << page_order);
741 up( &dev->struct_sem );
743 request->count = entry->buf_count;
744 request->size = size;
746 atomic_dec( &dev->buf_alloc );
751 static int drm_addbufs_sg(drm_device_t *dev, drm_buf_desc_t *request)
753 drm_device_dma_t *dma = dev->dma;
754 drm_buf_entry_t *entry;
756 unsigned long offset;
757 unsigned long agp_offset;
766 drm_buf_t **temp_buflist;
768 if (!drm_core_check_feature(dev, DRIVER_SG)) return -EINVAL;
770 if ( !dma ) return -EINVAL;
772 count = request->count;
773 order = drm_order(request->size);
776 alignment = (request->flags & _DRM_PAGE_ALIGN)
777 ? PAGE_ALIGN(size) : size;
778 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
779 total = PAGE_SIZE << page_order;
782 agp_offset = request->agp_start;
784 DRM_DEBUG( "count: %d\n", count );
785 DRM_DEBUG( "order: %d\n", order );
786 DRM_DEBUG( "size: %d\n", size );
787 DRM_DEBUG( "agp_offset: %lu\n", agp_offset );
788 DRM_DEBUG( "alignment: %d\n", alignment );
789 DRM_DEBUG( "page_order: %d\n", page_order );
790 DRM_DEBUG( "total: %d\n", total );
792 if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
793 if ( dev->queue_count ) return -EBUSY; /* Not while in use */
795 spin_lock( &dev->count_lock );
796 if ( dev->buf_use ) {
797 spin_unlock( &dev->count_lock );
800 atomic_inc( &dev->buf_alloc );
801 spin_unlock( &dev->count_lock );
803 down( &dev->struct_sem );
804 entry = &dma->bufs[order];
805 if ( entry->buf_count ) {
806 up( &dev->struct_sem );
807 atomic_dec( &dev->buf_alloc );
808 return -ENOMEM; /* May only call once for each order */
811 if (count < 0 || count > 4096) {
812 up( &dev->struct_sem );
813 atomic_dec( &dev->buf_alloc );
817 entry->buflist = drm_alloc( count * sizeof(*entry->buflist),
819 if ( !entry->buflist ) {
820 up( &dev->struct_sem );
821 atomic_dec( &dev->buf_alloc );
824 memset( entry->buflist, 0, count * sizeof(*entry->buflist) );
826 entry->buf_size = size;
827 entry->page_order = page_order;
831 while ( entry->buf_count < count ) {
832 buf = &entry->buflist[entry->buf_count];
833 buf->idx = dma->buf_count + entry->buf_count;
834 buf->total = alignment;
838 buf->offset = (dma->byte_count + offset);
839 buf->bus_address = agp_offset + offset;
840 buf->address = (void *)(agp_offset + offset + dev->sg->handle);
844 init_waitqueue_head( &buf->dma_wait );
847 buf->dev_priv_size = dev->driver->dev_priv_size;
848 buf->dev_private = drm_alloc( buf->dev_priv_size,
850 if(!buf->dev_private) {
851 /* Set count correctly so we free the proper amount. */
852 entry->buf_count = count;
853 drm_cleanup_buf_error(dev,entry);
854 up( &dev->struct_sem );
855 atomic_dec( &dev->buf_alloc );
859 memset( buf->dev_private, 0, buf->dev_priv_size );
861 DRM_DEBUG( "buffer %d @ %p\n",
862 entry->buf_count, buf->address );
866 byte_count += PAGE_SIZE << page_order;
869 DRM_DEBUG( "byte_count: %d\n", byte_count );
871 temp_buflist = drm_realloc( dma->buflist,
872 dma->buf_count * sizeof(*dma->buflist),
873 (dma->buf_count + entry->buf_count)
874 * sizeof(*dma->buflist),
877 /* Free the entry because it isn't valid */
878 drm_cleanup_buf_error(dev,entry);
879 up( &dev->struct_sem );
880 atomic_dec( &dev->buf_alloc );
883 dma->buflist = temp_buflist;
885 for ( i = 0 ; i < entry->buf_count ; i++ ) {
886 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
889 dma->buf_count += entry->buf_count;
890 dma->byte_count += byte_count;
892 DRM_DEBUG( "dma->buf_count : %d\n", dma->buf_count );
893 DRM_DEBUG( "entry->buf_count : %d\n", entry->buf_count );
895 up( &dev->struct_sem );
897 request->count = entry->buf_count;
898 request->size = size;
900 dma->flags = _DRM_DMA_USE_SG;
902 atomic_dec( &dev->buf_alloc );
906 int drm_addbufs_fb(drm_device_t *dev, drm_buf_desc_t *request)
908 drm_device_dma_t *dma = dev->dma;
909 drm_buf_entry_t *entry;
911 unsigned long offset;
912 unsigned long agp_offset;
921 drm_buf_t **temp_buflist;
923 if (!drm_core_check_feature(dev, DRIVER_FB_DMA))
929 count = request->count;
930 order = drm_order(request->size);
933 alignment = (request->flags & _DRM_PAGE_ALIGN)
934 ? PAGE_ALIGN(size) : size;
935 page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;
936 total = PAGE_SIZE << page_order;
939 agp_offset = request->agp_start;
941 DRM_DEBUG("count: %d\n", count);
942 DRM_DEBUG("order: %d\n", order);
943 DRM_DEBUG("size: %d\n", size);
944 DRM_DEBUG("agp_offset: %lu\n", agp_offset);
945 DRM_DEBUG("alignment: %d\n", alignment);
946 DRM_DEBUG("page_order: %d\n", page_order);
947 DRM_DEBUG("total: %d\n", total);
949 if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)
951 if (dev->queue_count)
952 return -EBUSY; /* Not while in use */
954 spin_lock(&dev->count_lock);
956 spin_unlock(&dev->count_lock);
959 atomic_inc(&dev->buf_alloc);
960 spin_unlock(&dev->count_lock);
962 down(&dev->struct_sem);
963 entry = &dma->bufs[order];
964 if (entry->buf_count) {
965 up(&dev->struct_sem);
966 atomic_dec(&dev->buf_alloc);
967 return -ENOMEM; /* May only call once for each order */
970 if (count < 0 || count > 4096) {
971 up(&dev->struct_sem);
972 atomic_dec(&dev->buf_alloc);
976 entry->buflist = drm_alloc(count * sizeof(*entry->buflist),
978 if (!entry->buflist) {
979 up(&dev->struct_sem);
980 atomic_dec(&dev->buf_alloc);
983 memset(entry->buflist, 0, count * sizeof(*entry->buflist));
985 entry->buf_size = size;
986 entry->page_order = page_order;
990 while (entry->buf_count < count) {
991 buf = &entry->buflist[entry->buf_count];
992 buf->idx = dma->buf_count + entry->buf_count;
993 buf->total = alignment;
997 buf->offset = (dma->byte_count + offset);
998 buf->bus_address = agp_offset + offset;
999 buf->address = (void *)(agp_offset + offset);
1003 init_waitqueue_head(&buf->dma_wait);
1006 buf->dev_priv_size = dev->driver->dev_priv_size;
1007 buf->dev_private = drm_alloc(buf->dev_priv_size, DRM_MEM_BUFS);
1008 if (!buf->dev_private) {
1009 /* Set count correctly so we free the proper amount. */
1010 entry->buf_count = count;
1011 drm_cleanup_buf_error(dev, entry);
1012 up(&dev->struct_sem);
1013 atomic_dec(&dev->buf_alloc);
1016 memset(buf->dev_private, 0, buf->dev_priv_size);
1018 DRM_DEBUG("buffer %d @ %p\n", entry->buf_count, buf->address);
1020 offset += alignment;
1022 byte_count += PAGE_SIZE << page_order;
1025 DRM_DEBUG("byte_count: %d\n", byte_count);
1027 temp_buflist = drm_realloc(dma->buflist,
1028 dma->buf_count * sizeof(*dma->buflist),
1029 (dma->buf_count + entry->buf_count)
1030 * sizeof(*dma->buflist), DRM_MEM_BUFS);
1031 if (!temp_buflist) {
1032 /* Free the entry because it isn't valid */
1033 drm_cleanup_buf_error(dev, entry);
1034 up(&dev->struct_sem);
1035 atomic_dec(&dev->buf_alloc);
1038 dma->buflist = temp_buflist;
1040 for (i = 0; i < entry->buf_count; i++) {
1041 dma->buflist[i + dma->buf_count] = &entry->buflist[i];
1044 dma->buf_count += entry->buf_count;
1045 dma->byte_count += byte_count;
1047 DRM_DEBUG("dma->buf_count : %d\n", dma->buf_count);
1048 DRM_DEBUG("entry->buf_count : %d\n", entry->buf_count);
1050 up(&dev->struct_sem);
1052 request->count = entry->buf_count;
1053 request->size = size;
1055 dma->flags = _DRM_DMA_USE_FB;
1057 atomic_dec(&dev->buf_alloc);
1062 * Add buffers for DMA transfers (ioctl).
1064 * \param inode device inode.
1065 * \param filp file pointer.
1066 * \param cmd command.
1067 * \param arg pointer to a drm_buf_desc_t request.
1068 * \return zero on success or a negative number on failure.
1070 * According with the memory type specified in drm_buf_desc::flags and the
1071 * build options, it dispatches the call either to addbufs_agp(),
1072 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1073 * PCI memory respectively.
1075 int drm_addbufs( struct inode *inode, struct file *filp,
1076 unsigned int cmd, unsigned long arg )
1078 drm_buf_desc_t request;
1079 drm_file_t *priv = filp->private_data;
1080 drm_device_t *dev = priv->head->dev;
1083 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1086 if ( copy_from_user( &request, (drm_buf_desc_t __user *)arg,
1091 if ( request.flags & _DRM_AGP_BUFFER )
1092 ret=drm_addbufs_agp(dev, &request);
1095 if ( request.flags & _DRM_SG_BUFFER )
1096 ret=drm_addbufs_sg(dev, &request);
1097 else if ( request.flags & _DRM_FB_BUFFER)
1098 ret=drm_addbufs_fb(dev, &request);
1100 ret=drm_addbufs_pci(dev, &request);
1103 if (copy_to_user((void __user *)arg, &request,
1113 * Get information about the buffer mappings.
1115 * This was originally mean for debugging purposes, or by a sophisticated
1116 * client library to determine how best to use the available buffers (e.g.,
1117 * large buffers can be used for image transfer).
1119 * \param inode device inode.
1120 * \param filp file pointer.
1121 * \param cmd command.
1122 * \param arg pointer to a drm_buf_info structure.
1123 * \return zero on success or a negative number on failure.
1125 * Increments drm_device::buf_use while holding the drm_device::count_lock
1126 * lock, preventing of allocating more buffers after this call. Information
1127 * about each requested buffer is then copied into user space.
1129 int drm_infobufs( struct inode *inode, struct file *filp,
1130 unsigned int cmd, unsigned long arg )
1132 drm_file_t *priv = filp->private_data;
1133 drm_device_t *dev = priv->head->dev;
1134 drm_device_dma_t *dma = dev->dma;
1135 drm_buf_info_t request;
1136 drm_buf_info_t __user *argp = (void __user *)arg;
1140 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1143 if ( !dma ) return -EINVAL;
1145 spin_lock( &dev->count_lock );
1146 if ( atomic_read( &dev->buf_alloc ) ) {
1147 spin_unlock( &dev->count_lock );
1150 ++dev->buf_use; /* Can't allocate more after this call */
1151 spin_unlock( &dev->count_lock );
1153 if ( copy_from_user( &request, argp, sizeof(request) ) )
1156 for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
1157 if ( dma->bufs[i].buf_count ) ++count;
1160 DRM_DEBUG( "count = %d\n", count );
1162 if ( request.count >= count ) {
1163 for ( i = 0, count = 0 ; i < DRM_MAX_ORDER + 1 ; i++ ) {
1164 if ( dma->bufs[i].buf_count ) {
1165 drm_buf_desc_t __user *to = &request.list[count];
1166 drm_buf_entry_t *from = &dma->bufs[i];
1167 drm_freelist_t *list = &dma->bufs[i].freelist;
1168 if ( copy_to_user( &to->count,
1170 sizeof(from->buf_count) ) ||
1171 copy_to_user( &to->size,
1173 sizeof(from->buf_size) ) ||
1174 copy_to_user( &to->low_mark,
1176 sizeof(list->low_mark) ) ||
1177 copy_to_user( &to->high_mark,
1179 sizeof(list->high_mark) ) )
1182 DRM_DEBUG( "%d %d %d %d %d\n",
1184 dma->bufs[i].buf_count,
1185 dma->bufs[i].buf_size,
1186 dma->bufs[i].freelist.low_mark,
1187 dma->bufs[i].freelist.high_mark );
1192 request.count = count;
1194 if ( copy_to_user( argp, &request, sizeof(request) ) )
1201 * Specifies a low and high water mark for buffer allocation
1203 * \param inode device inode.
1204 * \param filp file pointer.
1205 * \param cmd command.
1206 * \param arg a pointer to a drm_buf_desc structure.
1207 * \return zero on success or a negative number on failure.
1209 * Verifies that the size order is bounded between the admissible orders and
1210 * updates the respective drm_device_dma::bufs entry low and high water mark.
1212 * \note This ioctl is deprecated and mostly never used.
1214 int drm_markbufs( struct inode *inode, struct file *filp,
1215 unsigned int cmd, unsigned long arg )
1217 drm_file_t *priv = filp->private_data;
1218 drm_device_t *dev = priv->head->dev;
1219 drm_device_dma_t *dma = dev->dma;
1220 drm_buf_desc_t request;
1222 drm_buf_entry_t *entry;
1224 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1227 if ( !dma ) return -EINVAL;
1229 if ( copy_from_user( &request,
1230 (drm_buf_desc_t __user *)arg,
1234 DRM_DEBUG( "%d, %d, %d\n",
1235 request.size, request.low_mark, request.high_mark );
1236 order = drm_order( request.size );
1237 if ( order < DRM_MIN_ORDER || order > DRM_MAX_ORDER ) return -EINVAL;
1238 entry = &dma->bufs[order];
1240 if ( request.low_mark < 0 || request.low_mark > entry->buf_count )
1242 if ( request.high_mark < 0 || request.high_mark > entry->buf_count )
1245 entry->freelist.low_mark = request.low_mark;
1246 entry->freelist.high_mark = request.high_mark;
1252 * Unreserve the buffers in list, previously reserved using drmDMA.
1254 * \param inode device inode.
1255 * \param filp file pointer.
1256 * \param cmd command.
1257 * \param arg pointer to a drm_buf_free structure.
1258 * \return zero on success or a negative number on failure.
1260 * Calls free_buffer() for each used buffer.
1261 * This function is primarily used for debugging.
1263 int drm_freebufs( struct inode *inode, struct file *filp,
1264 unsigned int cmd, unsigned long arg )
1266 drm_file_t *priv = filp->private_data;
1267 drm_device_t *dev = priv->head->dev;
1268 drm_device_dma_t *dma = dev->dma;
1269 drm_buf_free_t request;
1274 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1277 if ( !dma ) return -EINVAL;
1279 if ( copy_from_user( &request,
1280 (drm_buf_free_t __user *)arg,
1284 DRM_DEBUG( "%d\n", request.count );
1285 for ( i = 0 ; i < request.count ; i++ ) {
1286 if ( copy_from_user( &idx,
1290 if ( idx < 0 || idx >= dma->buf_count ) {
1291 DRM_ERROR( "Index %d (of %d max)\n",
1292 idx, dma->buf_count - 1 );
1295 buf = dma->buflist[idx];
1296 if ( buf->filp != filp ) {
1297 DRM_ERROR( "Process %d freeing buffer not owned\n",
1301 drm_free_buffer( dev, buf );
1308 * Maps all of the DMA buffers into client-virtual space (ioctl).
1310 * \param inode device inode.
1311 * \param filp file pointer.
1312 * \param cmd command.
1313 * \param arg pointer to a drm_buf_map structure.
1314 * \return zero on success or a negative number on failure.
1316 * Maps the AGP or SG buffer region with do_mmap(), and copies information
1317 * about each buffer into user space. The PCI buffers are already mapped on the
1318 * addbufs_pci() call.
1320 int drm_mapbufs( struct inode *inode, struct file *filp,
1321 unsigned int cmd, unsigned long arg )
1323 drm_file_t *priv = filp->private_data;
1324 drm_device_t *dev = priv->head->dev;
1325 drm_device_dma_t *dma = dev->dma;
1326 drm_buf_map_t __user *argp = (void __user *)arg;
1329 unsigned long virtual;
1330 unsigned long address;
1331 drm_buf_map_t request;
1334 if (!drm_core_check_feature(dev, DRIVER_HAVE_DMA))
1337 if ( !dma ) return -EINVAL;
1339 spin_lock( &dev->count_lock );
1340 if ( atomic_read( &dev->buf_alloc ) ) {
1341 spin_unlock( &dev->count_lock );
1344 dev->buf_use++; /* Can't allocate more after this call */
1345 spin_unlock( &dev->count_lock );
1347 if ( copy_from_user( &request, argp, sizeof(request) ) )
1350 if ( request.count >= dma->buf_count ) {
1351 if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP))
1352 || (drm_core_check_feature(dev, DRIVER_SG)
1353 && (dma->flags & _DRM_DMA_USE_SG))
1354 || (drm_core_check_feature(dev, DRIVER_FB_DMA)
1355 && (dma->flags & _DRM_DMA_USE_FB))) {
1356 drm_map_t *map = dev->agp_buffer_map;
1363 #if LINUX_VERSION_CODE <= 0x020402
1364 down( ¤t->mm->mmap_sem );
1366 down_write( ¤t->mm->mmap_sem );
1368 virtual = do_mmap( filp, 0, map->size,
1369 PROT_READ | PROT_WRITE,
1371 (unsigned long)map->offset );
1372 #if LINUX_VERSION_CODE <= 0x020402
1373 up( ¤t->mm->mmap_sem );
1375 up_write( ¤t->mm->mmap_sem );
1378 #if LINUX_VERSION_CODE <= 0x020402
1379 down( ¤t->mm->mmap_sem );
1381 down_write( ¤t->mm->mmap_sem );
1383 virtual = do_mmap( filp, 0, dma->byte_count,
1384 PROT_READ | PROT_WRITE,
1386 #if LINUX_VERSION_CODE <= 0x020402
1387 up( ¤t->mm->mmap_sem );
1389 up_write( ¤t->mm->mmap_sem );
1392 if ( virtual > -1024UL ) {
1394 retcode = (signed long)virtual;
1397 request.virtual = (void __user *)virtual;
1399 for ( i = 0 ; i < dma->buf_count ; i++ ) {
1400 if ( copy_to_user( &request.list[i].idx,
1401 &dma->buflist[i]->idx,
1402 sizeof(request.list[0].idx) ) ) {
1406 if ( copy_to_user( &request.list[i].total,
1407 &dma->buflist[i]->total,
1408 sizeof(request.list[0].total) ) ) {
1412 if ( copy_to_user( &request.list[i].used,
1418 address = virtual + dma->buflist[i]->offset; /* *** */
1419 if ( copy_to_user( &request.list[i].address,
1421 sizeof(address) ) ) {
1428 request.count = dma->buf_count;
1429 DRM_DEBUG( "%d buffers, retcode = %d\n", request.count, retcode );
1431 if ( copy_to_user( argp, &request, sizeof(request) ) )