Merge tag 'usb-4.5-rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/usb
[cascardo/linux.git] / drivers / infiniband / core / fmr_pool.c
1 /*
2  * Copyright (c) 2004 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4  *
5  * This software is available to you under a choice of one of two
6  * licenses.  You may choose to be licensed under the terms of the GNU
7  * General Public License (GPL) Version 2, available from the file
8  * COPYING in the main directory of this source tree, or the
9  * OpenIB.org BSD license below:
10  *
11  *     Redistribution and use in source and binary forms, with or
12  *     without modification, are permitted provided that the following
13  *     conditions are met:
14  *
15  *      - Redistributions of source code must retain the above
16  *        copyright notice, this list of conditions and the following
17  *        disclaimer.
18  *
19  *      - Redistributions in binary form must reproduce the above
20  *        copyright notice, this list of conditions and the following
21  *        disclaimer in the documentation and/or other materials
22  *        provided with the distribution.
23  *
24  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31  * SOFTWARE.
32  */
33
34 #include <linux/errno.h>
35 #include <linux/spinlock.h>
36 #include <linux/export.h>
37 #include <linux/slab.h>
38 #include <linux/jhash.h>
39 #include <linux/kthread.h>
40
41 #include <rdma/ib_fmr_pool.h>
42
43 #include "core_priv.h"
44
45 #define PFX "fmr_pool: "
46
47 enum {
48         IB_FMR_MAX_REMAPS = 32,
49
50         IB_FMR_HASH_BITS  = 8,
51         IB_FMR_HASH_SIZE  = 1 << IB_FMR_HASH_BITS,
52         IB_FMR_HASH_MASK  = IB_FMR_HASH_SIZE - 1
53 };
54
55 /*
56  * If an FMR is not in use, then the list member will point to either
57  * its pool's free_list (if the FMR can be mapped again; that is,
58  * remap_count < pool->max_remaps) or its pool's dirty_list (if the
59  * FMR needs to be unmapped before being remapped).  In either of
60  * these cases it is a bug if the ref_count is not 0.  In other words,
61  * if ref_count is > 0, then the list member must not be linked into
62  * either free_list or dirty_list.
63  *
64  * The cache_node member is used to link the FMR into a cache bucket
65  * (if caching is enabled).  This is independent of the reference
66  * count of the FMR.  When a valid FMR is released, its ref_count is
67  * decremented, and if ref_count reaches 0, the FMR is placed in
68  * either free_list or dirty_list as appropriate.  However, it is not
69  * removed from the cache and may be "revived" if a call to
70  * ib_fmr_register_physical() occurs before the FMR is remapped.  In
71  * this case we just increment the ref_count and remove the FMR from
72  * free_list/dirty_list.
73  *
74  * Before we remap an FMR from free_list, we remove it from the cache
75  * (to prevent another user from obtaining a stale FMR).  When an FMR
76  * is released, we add it to the tail of the free list, so that our
77  * cache eviction policy is "least recently used."
78  *
79  * All manipulation of ref_count, list and cache_node is protected by
80  * pool_lock to maintain consistency.
81  */
82
83 struct ib_fmr_pool {
84         spinlock_t                pool_lock;
85
86         int                       pool_size;
87         int                       max_pages;
88         int                       max_remaps;
89         int                       dirty_watermark;
90         int                       dirty_len;
91         struct list_head          free_list;
92         struct list_head          dirty_list;
93         struct hlist_head        *cache_bucket;
94
95         void                     (*flush_function)(struct ib_fmr_pool *pool,
96                                                    void *              arg);
97         void                     *flush_arg;
98
99         struct task_struct       *thread;
100
101         atomic_t                  req_ser;
102         atomic_t                  flush_ser;
103
104         wait_queue_head_t         force_wait;
105 };
106
107 static inline u32 ib_fmr_hash(u64 first_page)
108 {
109         return jhash_2words((u32) first_page, (u32) (first_page >> 32), 0) &
110                 (IB_FMR_HASH_SIZE - 1);
111 }
112
113 /* Caller must hold pool_lock */
114 static inline struct ib_pool_fmr *ib_fmr_cache_lookup(struct ib_fmr_pool *pool,
115                                                       u64 *page_list,
116                                                       int  page_list_len,
117                                                       u64  io_virtual_address)
118 {
119         struct hlist_head *bucket;
120         struct ib_pool_fmr *fmr;
121
122         if (!pool->cache_bucket)
123                 return NULL;
124
125         bucket = pool->cache_bucket + ib_fmr_hash(*page_list);
126
127         hlist_for_each_entry(fmr, bucket, cache_node)
128                 if (io_virtual_address == fmr->io_virtual_address &&
129                     page_list_len      == fmr->page_list_len      &&
130                     !memcmp(page_list, fmr->page_list,
131                             page_list_len * sizeof *page_list))
132                         return fmr;
133
134         return NULL;
135 }
136
137 static void ib_fmr_batch_release(struct ib_fmr_pool *pool)
138 {
139         int                 ret;
140         struct ib_pool_fmr *fmr;
141         LIST_HEAD(unmap_list);
142         LIST_HEAD(fmr_list);
143
144         spin_lock_irq(&pool->pool_lock);
145
146         list_for_each_entry(fmr, &pool->dirty_list, list) {
147                 hlist_del_init(&fmr->cache_node);
148                 fmr->remap_count = 0;
149                 list_add_tail(&fmr->fmr->list, &fmr_list);
150
151 #ifdef DEBUG
152                 if (fmr->ref_count !=0) {
153                         printk(KERN_WARNING PFX "Unmapping FMR 0x%08x with ref count %d\n",
154                                fmr, fmr->ref_count);
155                 }
156 #endif
157         }
158
159         list_splice_init(&pool->dirty_list, &unmap_list);
160         pool->dirty_len = 0;
161
162         spin_unlock_irq(&pool->pool_lock);
163
164         if (list_empty(&unmap_list)) {
165                 return;
166         }
167
168         ret = ib_unmap_fmr(&fmr_list);
169         if (ret)
170                 printk(KERN_WARNING PFX "ib_unmap_fmr returned %d\n", ret);
171
172         spin_lock_irq(&pool->pool_lock);
173         list_splice(&unmap_list, &pool->free_list);
174         spin_unlock_irq(&pool->pool_lock);
175 }
176
177 static int ib_fmr_cleanup_thread(void *pool_ptr)
178 {
179         struct ib_fmr_pool *pool = pool_ptr;
180
181         do {
182                 if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
183                         ib_fmr_batch_release(pool);
184
185                         atomic_inc(&pool->flush_ser);
186                         wake_up_interruptible(&pool->force_wait);
187
188                         if (pool->flush_function)
189                                 pool->flush_function(pool, pool->flush_arg);
190                 }
191
192                 set_current_state(TASK_INTERRUPTIBLE);
193                 if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
194                     !kthread_should_stop())
195                         schedule();
196                 __set_current_state(TASK_RUNNING);
197         } while (!kthread_should_stop());
198
199         return 0;
200 }
201
202 /**
203  * ib_create_fmr_pool - Create an FMR pool
204  * @pd:Protection domain for FMRs
205  * @params:FMR pool parameters
206  *
207  * Create a pool of FMRs.  Return value is pointer to new pool or
208  * error code if creation failed.
209  */
210 struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd             *pd,
211                                        struct ib_fmr_pool_param *params)
212 {
213         struct ib_device   *device;
214         struct ib_fmr_pool *pool;
215         int i;
216         int ret;
217         int max_remaps;
218
219         if (!params)
220                 return ERR_PTR(-EINVAL);
221
222         device = pd->device;
223         if (!device->alloc_fmr    || !device->dealloc_fmr  ||
224             !device->map_phys_fmr || !device->unmap_fmr) {
225                 printk(KERN_INFO PFX "Device %s does not support FMRs\n",
226                        device->name);
227                 return ERR_PTR(-ENOSYS);
228         }
229
230         if (!device->attrs.max_map_per_fmr)
231                 max_remaps = IB_FMR_MAX_REMAPS;
232         else
233                 max_remaps = device->attrs.max_map_per_fmr;
234
235         pool = kmalloc(sizeof *pool, GFP_KERNEL);
236         if (!pool) {
237                 printk(KERN_WARNING PFX "couldn't allocate pool struct\n");
238                 return ERR_PTR(-ENOMEM);
239         }
240
241         pool->cache_bucket   = NULL;
242
243         pool->flush_function = params->flush_function;
244         pool->flush_arg      = params->flush_arg;
245
246         INIT_LIST_HEAD(&pool->free_list);
247         INIT_LIST_HEAD(&pool->dirty_list);
248
249         if (params->cache) {
250                 pool->cache_bucket =
251                         kmalloc(IB_FMR_HASH_SIZE * sizeof *pool->cache_bucket,
252                                 GFP_KERNEL);
253                 if (!pool->cache_bucket) {
254                         printk(KERN_WARNING PFX "Failed to allocate cache in pool\n");
255                         ret = -ENOMEM;
256                         goto out_free_pool;
257                 }
258
259                 for (i = 0; i < IB_FMR_HASH_SIZE; ++i)
260                         INIT_HLIST_HEAD(pool->cache_bucket + i);
261         }
262
263         pool->pool_size       = 0;
264         pool->max_pages       = params->max_pages_per_fmr;
265         pool->max_remaps      = max_remaps;
266         pool->dirty_watermark = params->dirty_watermark;
267         pool->dirty_len       = 0;
268         spin_lock_init(&pool->pool_lock);
269         atomic_set(&pool->req_ser,   0);
270         atomic_set(&pool->flush_ser, 0);
271         init_waitqueue_head(&pool->force_wait);
272
273         pool->thread = kthread_run(ib_fmr_cleanup_thread,
274                                    pool,
275                                    "ib_fmr(%s)",
276                                    device->name);
277         if (IS_ERR(pool->thread)) {
278                 printk(KERN_WARNING PFX "couldn't start cleanup thread\n");
279                 ret = PTR_ERR(pool->thread);
280                 goto out_free_pool;
281         }
282
283         {
284                 struct ib_pool_fmr *fmr;
285                 struct ib_fmr_attr fmr_attr = {
286                         .max_pages  = params->max_pages_per_fmr,
287                         .max_maps   = pool->max_remaps,
288                         .page_shift = params->page_shift
289                 };
290                 int bytes_per_fmr = sizeof *fmr;
291
292                 if (pool->cache_bucket)
293                         bytes_per_fmr += params->max_pages_per_fmr * sizeof (u64);
294
295                 for (i = 0; i < params->pool_size; ++i) {
296                         fmr = kmalloc(bytes_per_fmr, GFP_KERNEL);
297                         if (!fmr) {
298                                 printk(KERN_WARNING PFX "failed to allocate fmr "
299                                        "struct for FMR %d\n", i);
300                                 goto out_fail;
301                         }
302
303                         fmr->pool             = pool;
304                         fmr->remap_count      = 0;
305                         fmr->ref_count        = 0;
306                         INIT_HLIST_NODE(&fmr->cache_node);
307
308                         fmr->fmr = ib_alloc_fmr(pd, params->access, &fmr_attr);
309                         if (IS_ERR(fmr->fmr)) {
310                                 printk(KERN_WARNING PFX "fmr_create failed "
311                                        "for FMR %d\n", i);
312                                 kfree(fmr);
313                                 goto out_fail;
314                         }
315
316                         list_add_tail(&fmr->list, &pool->free_list);
317                         ++pool->pool_size;
318                 }
319         }
320
321         return pool;
322
323  out_free_pool:
324         kfree(pool->cache_bucket);
325         kfree(pool);
326
327         return ERR_PTR(ret);
328
329  out_fail:
330         ib_destroy_fmr_pool(pool);
331
332         return ERR_PTR(-ENOMEM);
333 }
334 EXPORT_SYMBOL(ib_create_fmr_pool);
335
336 /**
337  * ib_destroy_fmr_pool - Free FMR pool
338  * @pool:FMR pool to free
339  *
340  * Destroy an FMR pool and free all associated resources.
341  */
342 void ib_destroy_fmr_pool(struct ib_fmr_pool *pool)
343 {
344         struct ib_pool_fmr *fmr;
345         struct ib_pool_fmr *tmp;
346         LIST_HEAD(fmr_list);
347         int                 i;
348
349         kthread_stop(pool->thread);
350         ib_fmr_batch_release(pool);
351
352         i = 0;
353         list_for_each_entry_safe(fmr, tmp, &pool->free_list, list) {
354                 if (fmr->remap_count) {
355                         INIT_LIST_HEAD(&fmr_list);
356                         list_add_tail(&fmr->fmr->list, &fmr_list);
357                         ib_unmap_fmr(&fmr_list);
358                 }
359                 ib_dealloc_fmr(fmr->fmr);
360                 list_del(&fmr->list);
361                 kfree(fmr);
362                 ++i;
363         }
364
365         if (i < pool->pool_size)
366                 printk(KERN_WARNING PFX "pool still has %d regions registered\n",
367                        pool->pool_size - i);
368
369         kfree(pool->cache_bucket);
370         kfree(pool);
371 }
372 EXPORT_SYMBOL(ib_destroy_fmr_pool);
373
374 /**
375  * ib_flush_fmr_pool - Invalidate all unmapped FMRs
376  * @pool:FMR pool to flush
377  *
378  * Ensure that all unmapped FMRs are fully invalidated.
379  */
380 int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
381 {
382         int serial;
383         struct ib_pool_fmr *fmr, *next;
384
385         /*
386          * The free_list holds FMRs that may have been used
387          * but have not been remapped enough times to be dirty.
388          * Put them on the dirty list now so that the cleanup
389          * thread will reap them too.
390          */
391         spin_lock_irq(&pool->pool_lock);
392         list_for_each_entry_safe(fmr, next, &pool->free_list, list) {
393                 if (fmr->remap_count > 0)
394                         list_move(&fmr->list, &pool->dirty_list);
395         }
396         spin_unlock_irq(&pool->pool_lock);
397
398         serial = atomic_inc_return(&pool->req_ser);
399         wake_up_process(pool->thread);
400
401         if (wait_event_interruptible(pool->force_wait,
402                                      atomic_read(&pool->flush_ser) - serial >= 0))
403                 return -EINTR;
404
405         return 0;
406 }
407 EXPORT_SYMBOL(ib_flush_fmr_pool);
408
409 /**
410  * ib_fmr_pool_map_phys -
411  * @pool:FMR pool to allocate FMR from
412  * @page_list:List of pages to map
413  * @list_len:Number of pages in @page_list
414  * @io_virtual_address:I/O virtual address for new FMR
415  *
416  * Map an FMR from an FMR pool.
417  */
418 struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle,
419                                          u64                *page_list,
420                                          int                 list_len,
421                                          u64                 io_virtual_address)
422 {
423         struct ib_fmr_pool *pool = pool_handle;
424         struct ib_pool_fmr *fmr;
425         unsigned long       flags;
426         int                 result;
427
428         if (list_len < 1 || list_len > pool->max_pages)
429                 return ERR_PTR(-EINVAL);
430
431         spin_lock_irqsave(&pool->pool_lock, flags);
432         fmr = ib_fmr_cache_lookup(pool,
433                                   page_list,
434                                   list_len,
435                                   io_virtual_address);
436         if (fmr) {
437                 /* found in cache */
438                 ++fmr->ref_count;
439                 if (fmr->ref_count == 1) {
440                         list_del(&fmr->list);
441                 }
442
443                 spin_unlock_irqrestore(&pool->pool_lock, flags);
444
445                 return fmr;
446         }
447
448         if (list_empty(&pool->free_list)) {
449                 spin_unlock_irqrestore(&pool->pool_lock, flags);
450                 return ERR_PTR(-EAGAIN);
451         }
452
453         fmr = list_entry(pool->free_list.next, struct ib_pool_fmr, list);
454         list_del(&fmr->list);
455         hlist_del_init(&fmr->cache_node);
456         spin_unlock_irqrestore(&pool->pool_lock, flags);
457
458         result = ib_map_phys_fmr(fmr->fmr, page_list, list_len,
459                                  io_virtual_address);
460
461         if (result) {
462                 spin_lock_irqsave(&pool->pool_lock, flags);
463                 list_add(&fmr->list, &pool->free_list);
464                 spin_unlock_irqrestore(&pool->pool_lock, flags);
465
466                 printk(KERN_WARNING PFX "fmr_map returns %d\n", result);
467
468                 return ERR_PTR(result);
469         }
470
471         ++fmr->remap_count;
472         fmr->ref_count = 1;
473
474         if (pool->cache_bucket) {
475                 fmr->io_virtual_address = io_virtual_address;
476                 fmr->page_list_len      = list_len;
477                 memcpy(fmr->page_list, page_list, list_len * sizeof(*page_list));
478
479                 spin_lock_irqsave(&pool->pool_lock, flags);
480                 hlist_add_head(&fmr->cache_node,
481                                pool->cache_bucket + ib_fmr_hash(fmr->page_list[0]));
482                 spin_unlock_irqrestore(&pool->pool_lock, flags);
483         }
484
485         return fmr;
486 }
487 EXPORT_SYMBOL(ib_fmr_pool_map_phys);
488
489 /**
490  * ib_fmr_pool_unmap - Unmap FMR
491  * @fmr:FMR to unmap
492  *
493  * Unmap an FMR.  The FMR mapping may remain valid until the FMR is
494  * reused (or until ib_flush_fmr_pool() is called).
495  */
496 int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
497 {
498         struct ib_fmr_pool *pool;
499         unsigned long flags;
500
501         pool = fmr->pool;
502
503         spin_lock_irqsave(&pool->pool_lock, flags);
504
505         --fmr->ref_count;
506         if (!fmr->ref_count) {
507                 if (fmr->remap_count < pool->max_remaps) {
508                         list_add_tail(&fmr->list, &pool->free_list);
509                 } else {
510                         list_add_tail(&fmr->list, &pool->dirty_list);
511                         if (++pool->dirty_len >= pool->dirty_watermark) {
512                                 atomic_inc(&pool->req_ser);
513                                 wake_up_process(pool->thread);
514                         }
515                 }
516         }
517
518 #ifdef DEBUG
519         if (fmr->ref_count < 0)
520                 printk(KERN_WARNING PFX "FMR %p has ref count %d < 0\n",
521                        fmr, fmr->ref_count);
522 #endif
523
524         spin_unlock_irqrestore(&pool->pool_lock, flags);
525
526         return 0;
527 }
528 EXPORT_SYMBOL(ib_fmr_pool_unmap);