2 * Copyright (c) Red Hat Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sub license,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the
12 * next paragraph) shall be included in all copies or substantial portions
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
23 * Authors: Dave Airlie <airlied@redhat.com>
24 * Jerome Glisse <jglisse@redhat.com>
25 * Pauli Nieminen <suokkos@gmail.com>
28 /* simple list based uncached page pool
29 * - Pool collects resently freed pages for reuse
30 * - Use page->lru to keep a free list
31 * - doesn't track currently in use pages
34 #define pr_fmt(fmt) "[TTM] " fmt
36 #include <linux/list.h>
37 #include <linux/spinlock.h>
38 #include <linux/highmem.h>
39 #include <linux/mm_types.h>
40 #include <linux/module.h>
42 #include <linux/seq_file.h> /* for seq_printf */
43 #include <linux/slab.h>
44 #include <linux/dma-mapping.h>
46 #include <linux/atomic.h>
48 #include <drm/ttm/ttm_bo_driver.h>
49 #include <drm/ttm/ttm_page_alloc.h>
55 #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
56 #define SMALL_ALLOCATION 16
57 #define FREE_ALL_PAGES (~0U)
58 /* times are in msecs */
59 #define PAGE_FREE_INTERVAL 1000
62 * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
64 * @lock: Protects the shared pool from concurrnet access. Must be used with
65 * irqsave/irqrestore variants because pool allocator maybe called from
67 * @fill_lock: Prevent concurrent calls to fill.
68 * @list: Pool of free uc/wc pages for fast reuse.
69 * @gfp_flags: Flags to pass for alloc_page.
70 * @npages: Number of pages in pool.
72 struct ttm_page_pool {
75 struct list_head list;
80 unsigned long nrefills;
84 * Limits for the pool. They are handled without locks because only place where
85 * they may change is in sysfs store. They won't have immediate effect anyway
86 * so forcing serialization to access them is pointless.
89 struct ttm_pool_opts {
98 * struct ttm_pool_manager - Holds memory pools for fst allocation
100 * Manager is read only object for pool code so it doesn't need locking.
102 * @free_interval: minimum number of jiffies between freeing pages from pool.
103 * @page_alloc_inited: reference counting for pool allocation.
104 * @work: Work that is used to shrink the pool. Work is only run when there is
105 * some pages to free.
106 * @small_allocation: Limit in number of pages what is small allocation.
108 * @pools: All pool objects in use.
110 struct ttm_pool_manager {
112 struct shrinker mm_shrink;
113 struct ttm_pool_opts options;
116 struct ttm_page_pool pools[NUM_POOLS];
118 struct ttm_page_pool wc_pool;
119 struct ttm_page_pool uc_pool;
120 struct ttm_page_pool wc_pool_dma32;
121 struct ttm_page_pool uc_pool_dma32;
126 static struct attribute ttm_page_pool_max = {
127 .name = "pool_max_size",
128 .mode = S_IRUGO | S_IWUSR
130 static struct attribute ttm_page_pool_small = {
131 .name = "pool_small_allocation",
132 .mode = S_IRUGO | S_IWUSR
134 static struct attribute ttm_page_pool_alloc_size = {
135 .name = "pool_allocation_size",
136 .mode = S_IRUGO | S_IWUSR
139 static struct attribute *ttm_pool_attrs[] = {
141 &ttm_page_pool_small,
142 &ttm_page_pool_alloc_size,
146 static void ttm_pool_kobj_release(struct kobject *kobj)
148 struct ttm_pool_manager *m =
149 container_of(kobj, struct ttm_pool_manager, kobj);
153 static ssize_t ttm_pool_store(struct kobject *kobj,
154 struct attribute *attr, const char *buffer, size_t size)
156 struct ttm_pool_manager *m =
157 container_of(kobj, struct ttm_pool_manager, kobj);
160 chars = sscanf(buffer, "%u", &val);
164 /* Convert kb to number of pages */
165 val = val / (PAGE_SIZE >> 10);
167 if (attr == &ttm_page_pool_max)
168 m->options.max_size = val;
169 else if (attr == &ttm_page_pool_small)
170 m->options.small = val;
171 else if (attr == &ttm_page_pool_alloc_size) {
172 if (val > NUM_PAGES_TO_ALLOC*8) {
173 pr_err("Setting allocation size to %lu is not allowed. Recommended size is %lu\n",
174 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
175 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
177 } else if (val > NUM_PAGES_TO_ALLOC) {
178 pr_warn("Setting allocation size to larger than %lu is not recommended\n",
179 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
181 m->options.alloc_size = val;
187 static ssize_t ttm_pool_show(struct kobject *kobj,
188 struct attribute *attr, char *buffer)
190 struct ttm_pool_manager *m =
191 container_of(kobj, struct ttm_pool_manager, kobj);
194 if (attr == &ttm_page_pool_max)
195 val = m->options.max_size;
196 else if (attr == &ttm_page_pool_small)
197 val = m->options.small;
198 else if (attr == &ttm_page_pool_alloc_size)
199 val = m->options.alloc_size;
201 val = val * (PAGE_SIZE >> 10);
203 return snprintf(buffer, PAGE_SIZE, "%u\n", val);
206 static const struct sysfs_ops ttm_pool_sysfs_ops = {
207 .show = &ttm_pool_show,
208 .store = &ttm_pool_store,
211 static struct kobj_type ttm_pool_kobj_type = {
212 .release = &ttm_pool_kobj_release,
213 .sysfs_ops = &ttm_pool_sysfs_ops,
214 .default_attrs = ttm_pool_attrs,
217 static struct ttm_pool_manager *_manager;
220 static int set_pages_array_wb(struct page **pages, int addrinarray)
225 for (i = 0; i < addrinarray; i++)
226 unmap_page_from_agp(pages[i]);
231 static int set_pages_array_wc(struct page **pages, int addrinarray)
236 for (i = 0; i < addrinarray; i++)
237 map_page_into_agp(pages[i]);
242 static int set_pages_array_uc(struct page **pages, int addrinarray)
247 for (i = 0; i < addrinarray; i++)
248 map_page_into_agp(pages[i]);
255 * Select the right pool or requested caching state and ttm flags. */
256 static struct ttm_page_pool *ttm_get_pool(int flags,
257 enum ttm_caching_state cstate)
261 if (cstate == tt_cached)
269 if (flags & TTM_PAGE_FLAG_DMA32)
272 return &_manager->pools[pool_index];
275 /* set memory back to wb and free the pages. */
276 static void ttm_pages_put(struct page *pages[], unsigned npages)
279 if (set_pages_array_wb(pages, npages))
280 pr_err("Failed to set %d pages to wb!\n", npages);
281 for (i = 0; i < npages; ++i)
282 __free_page(pages[i]);
285 static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
286 unsigned freed_pages)
288 pool->npages -= freed_pages;
289 pool->nfrees += freed_pages;
293 * Free pages from pool.
295 * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
296 * number of pages in one go.
298 * @pool: to free the pages from
299 * @free_all: If set to true will free all pages in pool
302 static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
305 unsigned long irq_flags;
307 struct page **pages_to_free;
308 unsigned freed_pages = 0,
309 npages_to_free = nr_free;
311 if (NUM_PAGES_TO_ALLOC < nr_free)
312 npages_to_free = NUM_PAGES_TO_ALLOC;
314 pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), gfp);
315 if (!pages_to_free) {
316 pr_err("Failed to allocate memory for pool free operation\n");
321 spin_lock_irqsave(&pool->lock, irq_flags);
323 list_for_each_entry_reverse(p, &pool->list, lru) {
324 if (freed_pages >= npages_to_free)
327 pages_to_free[freed_pages++] = p;
328 /* We can only remove NUM_PAGES_TO_ALLOC at a time. */
329 if (freed_pages >= NUM_PAGES_TO_ALLOC) {
330 /* remove range of pages from the pool */
331 __list_del(p->lru.prev, &pool->list);
333 ttm_pool_update_free_locked(pool, freed_pages);
335 * Because changing page caching is costly
336 * we unlock the pool to prevent stalling.
338 spin_unlock_irqrestore(&pool->lock, irq_flags);
340 ttm_pages_put(pages_to_free, freed_pages);
341 if (likely(nr_free != FREE_ALL_PAGES))
342 nr_free -= freed_pages;
344 if (NUM_PAGES_TO_ALLOC >= nr_free)
345 npages_to_free = nr_free;
347 npages_to_free = NUM_PAGES_TO_ALLOC;
351 /* free all so restart the processing */
355 /* Not allowed to fall through or break because
356 * following context is inside spinlock while we are
364 /* remove range of pages from the pool */
366 __list_del(&p->lru, &pool->list);
368 ttm_pool_update_free_locked(pool, freed_pages);
369 nr_free -= freed_pages;
372 spin_unlock_irqrestore(&pool->lock, irq_flags);
375 ttm_pages_put(pages_to_free, freed_pages);
377 kfree(pages_to_free);
382 * Callback for mm to request pool to reduce number of page held.
384 * XXX: (dchinner) Deadlock warning!
386 * We need to pass sc->gfp_mask to ttm_page_pool_free().
388 * This code is crying out for a shrinker per pool....
391 ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
393 static DEFINE_MUTEX(lock);
394 static unsigned start_pool;
396 unsigned pool_offset;
397 struct ttm_page_pool *pool;
398 int shrink_pages = sc->nr_to_scan;
399 unsigned long freed = 0;
401 if (!mutex_trylock(&lock))
403 pool_offset = ++start_pool % NUM_POOLS;
404 /* select start pool in round robin fashion */
405 for (i = 0; i < NUM_POOLS; ++i) {
406 unsigned nr_free = shrink_pages;
407 if (shrink_pages == 0)
409 pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
410 shrink_pages = ttm_page_pool_free(pool, nr_free,
412 freed += nr_free - shrink_pages;
420 ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
423 unsigned long count = 0;
425 for (i = 0; i < NUM_POOLS; ++i)
426 count += _manager->pools[i].npages;
431 static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
433 manager->mm_shrink.count_objects = ttm_pool_shrink_count;
434 manager->mm_shrink.scan_objects = ttm_pool_shrink_scan;
435 manager->mm_shrink.seeks = 1;
436 register_shrinker(&manager->mm_shrink);
439 static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
441 unregister_shrinker(&manager->mm_shrink);
444 static int ttm_set_pages_caching(struct page **pages,
445 enum ttm_caching_state cstate, unsigned cpages)
448 /* Set page caching */
451 r = set_pages_array_uc(pages, cpages);
453 pr_err("Failed to set %d pages to uc!\n", cpages);
456 r = set_pages_array_wc(pages, cpages);
458 pr_err("Failed to set %d pages to wc!\n", cpages);
467 * Free pages the pages that failed to change the caching state. If there is
468 * any pages that have changed their caching state already put them to the
471 static void ttm_handle_caching_state_failure(struct list_head *pages,
472 int ttm_flags, enum ttm_caching_state cstate,
473 struct page **failed_pages, unsigned cpages)
476 /* Failed pages have to be freed */
477 for (i = 0; i < cpages; ++i) {
478 list_del(&failed_pages[i]->lru);
479 __free_page(failed_pages[i]);
484 * Allocate new pages with correct caching.
486 * This function is reentrant if caller updates count depending on number of
487 * pages returned in pages array.
489 static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags,
490 int ttm_flags, enum ttm_caching_state cstate, unsigned count)
492 struct page **caching_array;
496 unsigned max_cpages = min(count,
497 (unsigned)(PAGE_SIZE/sizeof(struct page *)));
499 /* allocate array for page caching change */
500 caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
502 if (!caching_array) {
503 pr_err("Unable to allocate table for new pages\n");
507 for (i = 0, cpages = 0; i < count; ++i) {
508 p = alloc_page(gfp_flags);
511 pr_err("Unable to get page %u\n", i);
513 /* store already allocated pages in the pool after
514 * setting the caching state */
516 r = ttm_set_pages_caching(caching_array,
519 ttm_handle_caching_state_failure(pages,
521 caching_array, cpages);
527 #ifdef CONFIG_HIGHMEM
528 /* gfp flags of highmem page should never be dma32 so we
529 * we should be fine in such case
534 caching_array[cpages++] = p;
535 if (cpages == max_cpages) {
537 r = ttm_set_pages_caching(caching_array,
540 ttm_handle_caching_state_failure(pages,
542 caching_array, cpages);
549 list_add(&p->lru, pages);
553 r = ttm_set_pages_caching(caching_array, cstate, cpages);
555 ttm_handle_caching_state_failure(pages,
557 caching_array, cpages);
560 kfree(caching_array);
566 * Fill the given pool if there aren't enough pages and the requested number of
569 static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
570 int ttm_flags, enum ttm_caching_state cstate, unsigned count,
571 unsigned long *irq_flags)
577 * Only allow one pool fill operation at a time.
578 * If pool doesn't have enough pages for the allocation new pages are
579 * allocated from outside of pool.
584 pool->fill_lock = true;
586 /* If allocation request is small and there are not enough
587 * pages in a pool we fill the pool up first. */
588 if (count < _manager->options.small
589 && count > pool->npages) {
590 struct list_head new_pages;
591 unsigned alloc_size = _manager->options.alloc_size;
594 * Can't change page caching if in irqsave context. We have to
595 * drop the pool->lock.
597 spin_unlock_irqrestore(&pool->lock, *irq_flags);
599 INIT_LIST_HEAD(&new_pages);
600 r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags,
602 spin_lock_irqsave(&pool->lock, *irq_flags);
605 list_splice(&new_pages, &pool->list);
607 pool->npages += alloc_size;
609 pr_err("Failed to fill pool (%p)\n", pool);
610 /* If we have any pages left put them to the pool. */
611 list_for_each_entry(p, &pool->list, lru) {
614 list_splice(&new_pages, &pool->list);
615 pool->npages += cpages;
619 pool->fill_lock = false;
623 * Cut 'count' number of pages from the pool and put them on the return list.
625 * @return count of pages still required to fulfill the request.
627 static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
628 struct list_head *pages,
630 enum ttm_caching_state cstate,
633 unsigned long irq_flags;
637 spin_lock_irqsave(&pool->lock, irq_flags);
638 ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags);
640 if (count >= pool->npages) {
641 /* take all pages from the pool */
642 list_splice_init(&pool->list, pages);
643 count -= pool->npages;
647 /* find the last pages to include for requested number of pages. Split
648 * pool to begin and halve it to reduce search space. */
649 if (count <= pool->npages/2) {
651 list_for_each(p, &pool->list) {
656 i = pool->npages + 1;
657 list_for_each_prev(p, &pool->list) {
662 /* Cut 'count' number of pages from the pool */
663 list_cut_position(pages, &pool->list, p);
664 pool->npages -= count;
667 spin_unlock_irqrestore(&pool->lock, irq_flags);
671 /* Put all pages in pages list to correct pool to wait for reuse */
672 static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
673 enum ttm_caching_state cstate)
675 unsigned long irq_flags;
676 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
680 /* No pool for this memory type so free the pages */
681 for (i = 0; i < npages; i++) {
683 if (page_count(pages[i]) != 1)
684 pr_err("Erroneous page count. Leaking pages.\n");
685 __free_page(pages[i]);
692 spin_lock_irqsave(&pool->lock, irq_flags);
693 for (i = 0; i < npages; i++) {
695 if (page_count(pages[i]) != 1)
696 pr_err("Erroneous page count. Leaking pages.\n");
697 list_add_tail(&pages[i]->lru, &pool->list);
702 /* Check that we don't go over the pool limit */
704 if (pool->npages > _manager->options.max_size) {
705 npages = pool->npages - _manager->options.max_size;
706 /* free at least NUM_PAGES_TO_ALLOC number of pages
707 * to reduce calls to set_memory_wb */
708 if (npages < NUM_PAGES_TO_ALLOC)
709 npages = NUM_PAGES_TO_ALLOC;
711 spin_unlock_irqrestore(&pool->lock, irq_flags);
713 ttm_page_pool_free(pool, npages, GFP_KERNEL);
717 * On success pages list will hold count number of correctly
720 static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
721 enum ttm_caching_state cstate)
723 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
724 struct list_head plist;
725 struct page *p = NULL;
726 gfp_t gfp_flags = GFP_USER;
730 /* set zero flag for page allocation if required */
731 if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
732 gfp_flags |= __GFP_ZERO;
734 /* No pool for cached pages */
736 if (flags & TTM_PAGE_FLAG_DMA32)
737 gfp_flags |= GFP_DMA32;
739 gfp_flags |= GFP_HIGHUSER;
741 for (r = 0; r < npages; ++r) {
742 p = alloc_page(gfp_flags);
745 pr_err("Unable to allocate page\n");
754 /* combine zero flag to pool flags */
755 gfp_flags |= pool->gfp_flags;
757 /* First we take pages from the pool */
758 INIT_LIST_HEAD(&plist);
759 npages = ttm_page_pool_get_pages(pool, &plist, flags, cstate, npages);
761 list_for_each_entry(p, &plist, lru) {
765 /* clear the pages coming from the pool if requested */
766 if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
767 list_for_each_entry(p, &plist, lru) {
771 clear_page(page_address(p));
775 /* If pool didn't have enough pages allocate new one. */
777 /* ttm_alloc_new_pages doesn't reference pool so we can run
778 * multiple requests in parallel.
780 INIT_LIST_HEAD(&plist);
781 r = ttm_alloc_new_pages(&plist, gfp_flags, flags, cstate, npages);
782 list_for_each_entry(p, &plist, lru) {
786 /* If there is any pages in the list put them back to
788 pr_err("Failed to allocate extra pages for large request\n");
789 ttm_put_pages(pages, count, flags, cstate);
797 static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags,
800 spin_lock_init(&pool->lock);
801 pool->fill_lock = false;
802 INIT_LIST_HEAD(&pool->list);
803 pool->npages = pool->nfrees = 0;
804 pool->gfp_flags = flags;
808 int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
814 pr_info("Initializing pool allocator\n");
816 _manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
818 ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc");
820 ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc");
822 ttm_page_pool_init_locked(&_manager->wc_pool_dma32,
823 GFP_USER | GFP_DMA32, "wc dma");
825 ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
826 GFP_USER | GFP_DMA32, "uc dma");
828 _manager->options.max_size = max_pages;
829 _manager->options.small = SMALL_ALLOCATION;
830 _manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
832 ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
833 &glob->kobj, "pool");
834 if (unlikely(ret != 0)) {
835 kobject_put(&_manager->kobj);
840 ttm_pool_mm_shrink_init(_manager);
845 void ttm_page_alloc_fini(void)
849 pr_info("Finalizing pool allocator\n");
850 ttm_pool_mm_shrink_fini(_manager);
852 for (i = 0; i < NUM_POOLS; ++i)
853 ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES,
856 kobject_put(&_manager->kobj);
860 int ttm_pool_populate(struct ttm_tt *ttm)
862 struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
866 if (ttm->state != tt_unpopulated)
869 for (i = 0; i < ttm->num_pages; ++i) {
870 ret = ttm_get_pages(&ttm->pages[i], 1,
874 ttm_pool_unpopulate(ttm);
878 ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i],
880 if (unlikely(ret != 0)) {
881 ttm_pool_unpopulate(ttm);
886 if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
887 ret = ttm_tt_swapin(ttm);
888 if (unlikely(ret != 0)) {
889 ttm_pool_unpopulate(ttm);
894 ttm->state = tt_unbound;
897 EXPORT_SYMBOL(ttm_pool_populate);
899 void ttm_pool_unpopulate(struct ttm_tt *ttm)
903 for (i = 0; i < ttm->num_pages; ++i) {
905 ttm_mem_global_free_page(ttm->glob->mem_glob,
907 ttm_put_pages(&ttm->pages[i], 1,
912 ttm->state = tt_unpopulated;
914 EXPORT_SYMBOL(ttm_pool_unpopulate);
916 int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
918 struct ttm_page_pool *p;
920 char *h[] = {"pool", "refills", "pages freed", "size"};
922 seq_printf(m, "No pool allocator running.\n");
925 seq_printf(m, "%6s %12s %13s %8s\n",
926 h[0], h[1], h[2], h[3]);
927 for (i = 0; i < NUM_POOLS; ++i) {
928 p = &_manager->pools[i];
930 seq_printf(m, "%6s %12ld %13ld %8d\n",
931 p->name, p->nrefills,
932 p->nfrees, p->npages);
936 EXPORT_SYMBOL(ttm_page_alloc_debugfs);