static void *dma_direct_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
#ifdef NOT_COHERENT_CACHE
return consistent_alloc(flag, size, dma_handle);
static void dma_direct_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
#ifdef NOT_COHERENT_CACHE
consistent_free(size, vaddr);
static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
struct scatterlist *sg;
int i;
unsigned long offset,
size_t size,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
__dma_sync(page_to_phys(page) + offset, size, direction);
return page_to_phys(page) + offset;
dma_addr_t dma_address,
size_t size,
enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
/* There is not necessary to do cache cleanup
*
static
int dma_direct_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t handle, size_t size,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
#ifdef CONFIG_MMU
unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;