X-Git-Url: http://git.cascardo.info/?a=blobdiff_plain;f=sound%2Fcore%2Fpcm_native.c;h=ca224fa2a33a043b1006dbd6c4de36fd9ac5291e;hb=aa8edd8ca6a110349b00e360823c64e8cd106289;hp=bfe1cf6b492f08489a2f7a5dd7bc9e54ae61a4cc;hpb=811deedebab38f8360a700a52b0b75688c9a10f7;p=cascardo%2Flinux.git diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index bfe1cf6b492f..ca224fa2a33a 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c @@ -35,9 +35,6 @@ #include #include #include -#if defined(CONFIG_MIPS) && defined(CONFIG_DMA_NONCOHERENT) -#include -#endif /* * Compatibility @@ -77,6 +74,14 @@ static int snd_pcm_open(struct file *file, struct snd_pcm *pcm, int stream); static DEFINE_RWLOCK(snd_pcm_link_rwlock); static DECLARE_RWSEM(snd_pcm_link_rwsem); +/** + * snd_pcm_stream_lock - Lock the PCM stream + * @substream: PCM substream + * + * This locks the PCM stream's spinlock or mutex depending on the nonatomic + * flag of the given substream. This also takes the global link rw lock + * (or rw sem), too, for avoiding the race with linked streams. + */ void snd_pcm_stream_lock(struct snd_pcm_substream *substream) { if (substream->pcm->nonatomic) { @@ -89,6 +94,12 @@ void snd_pcm_stream_lock(struct snd_pcm_substream *substream) } EXPORT_SYMBOL_GPL(snd_pcm_stream_lock); +/** + * snd_pcm_stream_lock - Unlock the PCM stream + * @substream: PCM substream + * + * This unlocks the PCM stream that has been locked via snd_pcm_stream_lock(). + */ void snd_pcm_stream_unlock(struct snd_pcm_substream *substream) { if (substream->pcm->nonatomic) { @@ -101,6 +112,14 @@ void snd_pcm_stream_unlock(struct snd_pcm_substream *substream) } EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock); +/** + * snd_pcm_stream_lock_irq - Lock the PCM stream + * @substream: PCM substream + * + * This locks the PCM stream like snd_pcm_stream_lock() and disables the local + * IRQ (only when nonatomic is false). In nonatomic case, this is identical + * as snd_pcm_stream_lock(). + */ void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream) { if (!substream->pcm->nonatomic) @@ -109,6 +128,12 @@ void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream) } EXPORT_SYMBOL_GPL(snd_pcm_stream_lock_irq); +/** + * snd_pcm_stream_unlock_irq - Unlock the PCM stream + * @substream: PCM substream + * + * This is a counter-part of snd_pcm_stream_lock_irq(). + */ void snd_pcm_stream_unlock_irq(struct snd_pcm_substream *substream) { snd_pcm_stream_unlock(substream); @@ -127,6 +152,13 @@ unsigned long _snd_pcm_stream_lock_irqsave(struct snd_pcm_substream *substream) } EXPORT_SYMBOL_GPL(_snd_pcm_stream_lock_irqsave); +/** + * snd_pcm_stream_unlock_irqrestore - Unlock the PCM stream + * @substream: PCM substream + * @flags: irq flags + * + * This is a counter-part of snd_pcm_stream_lock_irqsave(). + */ void snd_pcm_stream_unlock_irqrestore(struct snd_pcm_substream *substream, unsigned long flags) { @@ -195,6 +227,21 @@ int snd_pcm_info_user(struct snd_pcm_substream *substream, return err; } +static bool hw_support_mmap(struct snd_pcm_substream *substream) +{ + if (!(substream->runtime->hw.info & SNDRV_PCM_INFO_MMAP)) + return false; + /* check architectures that return -EINVAL from dma_mmap_coherent() */ + /* FIXME: this should be some global flag */ +#if defined(CONFIG_C6X) || defined(CONFIG_FRV) || defined(CONFIG_MN10300) ||\ + defined(CONFIG_PARISC) || defined(CONFIG_XTENSA) + if (!substream->ops->mmap && + substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV) + return false; +#endif + return true; +} + #undef RULES_DEBUG #ifdef RULES_DEBUG @@ -372,8 +419,12 @@ int snd_pcm_hw_refine(struct snd_pcm_substream *substream, } hw = &substream->runtime->hw; - if (!params->info) + if (!params->info) { params->info = hw->info & ~SNDRV_PCM_INFO_FIFO_IN_FRAMES; + if (!hw_support_mmap(substream)) + params->info &= ~(SNDRV_PCM_INFO_MMAP | + SNDRV_PCM_INFO_MMAP_VALID); + } if (!params->fifo_size) { m = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); i = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); @@ -781,16 +832,15 @@ static int snd_pcm_action_group(struct action_ops *ops, { struct snd_pcm_substream *s = NULL; struct snd_pcm_substream *s1; - int res = 0; + int res = 0, depth = 1; snd_pcm_group_for_each_entry(s, substream) { if (do_lock && s != substream) { if (s->pcm->nonatomic) - mutex_lock_nested(&s->self_group.mutex, - SINGLE_DEPTH_NESTING); + mutex_lock_nested(&s->self_group.mutex, depth); else - spin_lock_nested(&s->self_group.lock, - SINGLE_DEPTH_NESTING); + spin_lock_nested(&s->self_group.lock, depth); + depth++; } res = ops->pre_action(s, state); if (res < 0) @@ -850,14 +900,19 @@ static int snd_pcm_action_single(struct action_ops *ops, return res; } -/* call in mutex-protected context */ -static int snd_pcm_action_mutex(struct action_ops *ops, - struct snd_pcm_substream *substream, - int state) +/* + * Note: call with stream lock + */ +static int snd_pcm_action(struct action_ops *ops, + struct snd_pcm_substream *substream, + int state) { int res; - if (snd_pcm_stream_linked(substream)) { + if (!snd_pcm_stream_linked(substream)) + return snd_pcm_action_single(ops, substream, state); + + if (substream->pcm->nonatomic) { if (!mutex_trylock(&substream->group->mutex)) { mutex_unlock(&substream->self_group.mutex); mutex_lock(&substream->group->mutex); @@ -866,24 +921,6 @@ static int snd_pcm_action_mutex(struct action_ops *ops, res = snd_pcm_action_group(ops, substream, state, 1); mutex_unlock(&substream->group->mutex); } else { - res = snd_pcm_action_single(ops, substream, state); - } - return res; -} - -/* - * Note: call with stream lock - */ -static int snd_pcm_action(struct action_ops *ops, - struct snd_pcm_substream *substream, - int state) -{ - int res; - - if (substream->pcm->nonatomic) - return snd_pcm_action_mutex(ops, substream, state); - - if (snd_pcm_stream_linked(substream)) { if (!spin_trylock(&substream->group->lock)) { spin_unlock(&substream->self_group.lock); spin_lock(&substream->group->lock); @@ -891,35 +928,10 @@ static int snd_pcm_action(struct action_ops *ops, } res = snd_pcm_action_group(ops, substream, state, 1); spin_unlock(&substream->group->lock); - } else { - res = snd_pcm_action_single(ops, substream, state); } return res; } -static int snd_pcm_action_lock_mutex(struct action_ops *ops, - struct snd_pcm_substream *substream, - int state) -{ - int res; - - down_read(&snd_pcm_link_rwsem); - if (snd_pcm_stream_linked(substream)) { - mutex_lock(&substream->group->mutex); - mutex_lock_nested(&substream->self_group.mutex, - SINGLE_DEPTH_NESTING); - res = snd_pcm_action_group(ops, substream, state, 1); - mutex_unlock(&substream->self_group.mutex); - mutex_unlock(&substream->group->mutex); - } else { - mutex_lock(&substream->self_group.mutex); - res = snd_pcm_action_single(ops, substream, state); - mutex_unlock(&substream->self_group.mutex); - } - up_read(&snd_pcm_link_rwsem); - return res; -} - /* * Note: don't use any locks before */ @@ -929,22 +941,9 @@ static int snd_pcm_action_lock_irq(struct action_ops *ops, { int res; - if (substream->pcm->nonatomic) - return snd_pcm_action_lock_mutex(ops, substream, state); - - read_lock_irq(&snd_pcm_link_rwlock); - if (snd_pcm_stream_linked(substream)) { - spin_lock(&substream->group->lock); - spin_lock(&substream->self_group.lock); - res = snd_pcm_action_group(ops, substream, state, 1); - spin_unlock(&substream->self_group.lock); - spin_unlock(&substream->group->lock); - } else { - spin_lock(&substream->self_group.lock); - res = snd_pcm_action_single(ops, substream, state); - spin_unlock(&substream->self_group.lock); - } - read_unlock_irq(&snd_pcm_link_rwlock); + snd_pcm_stream_lock_irq(substream); + res = snd_pcm_action(ops, substream, state); + snd_pcm_stream_unlock_irq(substream); return res; } @@ -2072,7 +2071,7 @@ int snd_pcm_hw_constraints_complete(struct snd_pcm_substream *substream) mask |= 1 << SNDRV_PCM_ACCESS_RW_INTERLEAVED; if (hw->info & SNDRV_PCM_INFO_NONINTERLEAVED) mask |= 1 << SNDRV_PCM_ACCESS_RW_NONINTERLEAVED; - if (hw->info & SNDRV_PCM_INFO_MMAP) { + if (hw_support_mmap(substream)) { if (hw->info & SNDRV_PCM_INFO_INTERLEAVED) mask |= 1 << SNDRV_PCM_ACCESS_MMAP_INTERLEAVED; if (hw->info & SNDRV_PCM_INFO_NONINTERLEAVED) @@ -3251,20 +3250,6 @@ static inline struct page * snd_pcm_default_page_ops(struct snd_pcm_substream *substream, unsigned long ofs) { void *vaddr = substream->runtime->dma_area + ofs; -#if defined(CONFIG_MIPS) && defined(CONFIG_DMA_NONCOHERENT) - if (substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV) - return virt_to_page(CAC_ADDR(vaddr)); -#endif -#if defined(CONFIG_PPC32) && defined(CONFIG_NOT_COHERENT_CACHE) - if (substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV) { - dma_addr_t addr = substream->runtime->dma_addr + ofs; - addr -= get_dma_offset(substream->dma_buffer.dev.dev); - /* assume dma_handle set via pfn_to_phys() in - * mm/dma-noncoherent.c - */ - return pfn_to_page(addr >> PAGE_SHIFT); - } -#endif return virt_to_page(vaddr); } @@ -3309,16 +3294,18 @@ static const struct vm_operations_struct snd_pcm_vm_ops_data_fault = { .fault = snd_pcm_mmap_data_fault, }; -#ifndef ARCH_HAS_DMA_MMAP_COHERENT -/* This should be defined / handled globally! */ -#ifdef CONFIG_ARM -#define ARCH_HAS_DMA_MMAP_COHERENT -#endif -#endif - /* * mmap the DMA buffer on RAM */ + +/** + * snd_pcm_lib_default_mmap - Default PCM data mmap function + * @substream: PCM substream + * @area: VMA + * + * This is the default mmap handler for PCM data. When mmap pcm_ops is NULL, + * this function is invoked implicitly. + */ int snd_pcm_lib_default_mmap(struct snd_pcm_substream *substream, struct vm_area_struct *area) { @@ -3331,7 +3318,7 @@ int snd_pcm_lib_default_mmap(struct snd_pcm_substream *substream, area->vm_end - area->vm_start, area->vm_page_prot); } #endif /* CONFIG_GENERIC_ALLOCATOR */ -#ifdef ARCH_HAS_DMA_MMAP_COHERENT +#ifndef CONFIG_X86 /* for avoiding warnings arch/x86/mm/pat.c */ if (!substream->ops->page && substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV) return dma_mmap_coherent(substream->dma_buffer.dev.dev, @@ -3339,11 +3326,7 @@ int snd_pcm_lib_default_mmap(struct snd_pcm_substream *substream, substream->runtime->dma_area, substream->runtime->dma_addr, area->vm_end - area->vm_start); -#elif defined(CONFIG_MIPS) && defined(CONFIG_DMA_NONCOHERENT) - if (substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV && - !plat_device_is_coherent(substream->dma_buffer.dev.dev)) - area->vm_page_prot = pgprot_noncached(area->vm_page_prot); -#endif /* ARCH_HAS_DMA_MMAP_COHERENT */ +#endif /* CONFIG_X86 */ /* mmap with fault handler */ area->vm_ops = &snd_pcm_vm_ops_data_fault; return 0; @@ -3354,6 +3337,15 @@ EXPORT_SYMBOL_GPL(snd_pcm_lib_default_mmap); * mmap the DMA buffer on I/O memory area */ #if SNDRV_PCM_INFO_MMAP_IOMEM +/** + * snd_pcm_lib_mmap_iomem - Default PCM data mmap function for I/O mem + * @substream: PCM substream + * @area: VMA + * + * When your hardware uses the iomapped pages as the hardware buffer and + * wants to mmap it, pass this function as mmap pcm_ops. Note that this + * is supposed to work only on limited architectures. + */ int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream, struct vm_area_struct *area) {