1fe0ddb6fd0d80cb7888cb8dc127520f51bed3da
[cascardo/linux.git] / kernel / power / snapshot.c
1 /*
2  * linux/kernel/power/snapshot.c
3  *
4  * This file provides system snapshot/restore functionality for swsusp.
5  *
6  * Copyright (C) 1998-2005 Pavel Machek <pavel@ucw.cz>
7  * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
8  *
9  * This file is released under the GPLv2.
10  *
11  */
12
13 #include <linux/version.h>
14 #include <linux/module.h>
15 #include <linux/mm.h>
16 #include <linux/suspend.h>
17 #include <linux/delay.h>
18 #include <linux/bitops.h>
19 #include <linux/spinlock.h>
20 #include <linux/kernel.h>
21 #include <linux/pm.h>
22 #include <linux/device.h>
23 #include <linux/init.h>
24 #include <linux/bootmem.h>
25 #include <linux/syscalls.h>
26 #include <linux/console.h>
27 #include <linux/highmem.h>
28 #include <linux/list.h>
29 #include <linux/slab.h>
30 #include <linux/compiler.h>
31 #include <linux/ktime.h>
32
33 #include <asm/uaccess.h>
34 #include <asm/mmu_context.h>
35 #include <asm/pgtable.h>
36 #include <asm/tlbflush.h>
37 #include <asm/io.h>
38
39 #include "power.h"
40
41 static int swsusp_page_is_free(struct page *);
42 static void swsusp_set_page_forbidden(struct page *);
43 static void swsusp_unset_page_forbidden(struct page *);
44
45 /*
46  * Number of bytes to reserve for memory allocations made by device drivers
47  * from their ->freeze() and ->freeze_noirq() callbacks so that they don't
48  * cause image creation to fail (tunable via /sys/power/reserved_size).
49  */
50 unsigned long reserved_size;
51
52 void __init hibernate_reserved_size_init(void)
53 {
54         reserved_size = SPARE_PAGES * PAGE_SIZE;
55 }
56
57 /*
58  * Preferred image size in bytes (tunable via /sys/power/image_size).
59  * When it is set to N, swsusp will do its best to ensure the image
60  * size will not exceed N bytes, but if that is impossible, it will
61  * try to create the smallest image possible.
62  */
63 unsigned long image_size;
64
65 void __init hibernate_image_size_init(void)
66 {
67         image_size = ((totalram_pages * 2) / 5) * PAGE_SIZE;
68 }
69
70 /* List of PBEs needed for restoring the pages that were allocated before
71  * the suspend and included in the suspend image, but have also been
72  * allocated by the "resume" kernel, so their contents cannot be written
73  * directly to their "original" page frames.
74  */
75 struct pbe *restore_pblist;
76
77 /* struct linked_page is used to build chains of pages */
78
79 #define LINKED_PAGE_DATA_SIZE   (PAGE_SIZE - sizeof(void *))
80
81 struct linked_page {
82         struct linked_page *next;
83         char data[LINKED_PAGE_DATA_SIZE];
84 } __packed;
85
86 /*
87  * List of "safe" pages (ie. pages that were not used by the image kernel
88  * before hibernation) that may be used as temporary storage for image kernel
89  * memory contents.
90  */
91 static struct linked_page *safe_pages_list;
92
93 /* Pointer to an auxiliary buffer (1 page) */
94 static void *buffer;
95
96 /**
97  *      @safe_needed - on resume, for storing the PBE list and the image,
98  *      we can only use memory pages that do not conflict with the pages
99  *      used before suspend.  The unsafe pages have PageNosaveFree set
100  *      and we count them using unsafe_pages.
101  *
102  *      Each allocated image page is marked as PageNosave and PageNosaveFree
103  *      so that swsusp_free() can release it.
104  */
105
106 #define PG_ANY          0
107 #define PG_SAFE         1
108 #define PG_UNSAFE_CLEAR 1
109 #define PG_UNSAFE_KEEP  0
110
111 static unsigned int allocated_unsafe_pages;
112
113 static void *get_image_page(gfp_t gfp_mask, int safe_needed)
114 {
115         void *res;
116
117         res = (void *)get_zeroed_page(gfp_mask);
118         if (safe_needed)
119                 while (res && swsusp_page_is_free(virt_to_page(res))) {
120                         /* The page is unsafe, mark it for swsusp_free() */
121                         swsusp_set_page_forbidden(virt_to_page(res));
122                         allocated_unsafe_pages++;
123                         res = (void *)get_zeroed_page(gfp_mask);
124                 }
125         if (res) {
126                 swsusp_set_page_forbidden(virt_to_page(res));
127                 swsusp_set_page_free(virt_to_page(res));
128         }
129         return res;
130 }
131
132 static void *__get_safe_page(gfp_t gfp_mask)
133 {
134         if (safe_pages_list) {
135                 void *ret = safe_pages_list;
136
137                 safe_pages_list = safe_pages_list->next;
138                 memset(ret, 0, PAGE_SIZE);
139                 return ret;
140         }
141         return get_image_page(gfp_mask, PG_SAFE);
142 }
143
144 unsigned long get_safe_page(gfp_t gfp_mask)
145 {
146         return (unsigned long)__get_safe_page(gfp_mask);
147 }
148
149 static struct page *alloc_image_page(gfp_t gfp_mask)
150 {
151         struct page *page;
152
153         page = alloc_page(gfp_mask);
154         if (page) {
155                 swsusp_set_page_forbidden(page);
156                 swsusp_set_page_free(page);
157         }
158         return page;
159 }
160
161 static void recycle_safe_page(void *page_address)
162 {
163         struct linked_page *lp = page_address;
164
165         lp->next = safe_pages_list;
166         safe_pages_list = lp;
167 }
168
169 /**
170  *      free_image_page - free page represented by @addr, allocated with
171  *      get_image_page (page flags set by it must be cleared)
172  */
173
174 static inline void free_image_page(void *addr, int clear_nosave_free)
175 {
176         struct page *page;
177
178         BUG_ON(!virt_addr_valid(addr));
179
180         page = virt_to_page(addr);
181
182         swsusp_unset_page_forbidden(page);
183         if (clear_nosave_free)
184                 swsusp_unset_page_free(page);
185
186         __free_page(page);
187 }
188
189 static inline void free_list_of_pages(struct linked_page *list,
190                                       int clear_page_nosave)
191 {
192         while (list) {
193                 struct linked_page *lp = list->next;
194
195                 free_image_page(list, clear_page_nosave);
196                 list = lp;
197         }
198 }
199
200 /**
201   *     struct chain_allocator is used for allocating small objects out of
202   *     a linked list of pages called 'the chain'.
203   *
204   *     The chain grows each time when there is no room for a new object in
205   *     the current page.  The allocated objects cannot be freed individually.
206   *     It is only possible to free them all at once, by freeing the entire
207   *     chain.
208   *
209   *     NOTE: The chain allocator may be inefficient if the allocated objects
210   *     are not much smaller than PAGE_SIZE.
211   */
212
213 struct chain_allocator {
214         struct linked_page *chain;      /* the chain */
215         unsigned int used_space;        /* total size of objects allocated out
216                                          * of the current page
217                                          */
218         gfp_t gfp_mask;         /* mask for allocating pages */
219         int safe_needed;        /* if set, only "safe" pages are allocated */
220 };
221
222 static void chain_init(struct chain_allocator *ca, gfp_t gfp_mask,
223                        int safe_needed)
224 {
225         ca->chain = NULL;
226         ca->used_space = LINKED_PAGE_DATA_SIZE;
227         ca->gfp_mask = gfp_mask;
228         ca->safe_needed = safe_needed;
229 }
230
231 static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
232 {
233         void *ret;
234
235         if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
236                 struct linked_page *lp;
237
238                 lp = ca->safe_needed ? __get_safe_page(ca->gfp_mask) :
239                                         get_image_page(ca->gfp_mask, PG_ANY);
240                 if (!lp)
241                         return NULL;
242
243                 lp->next = ca->chain;
244                 ca->chain = lp;
245                 ca->used_space = 0;
246         }
247         ret = ca->chain->data + ca->used_space;
248         ca->used_space += size;
249         return ret;
250 }
251
252 /**
253  *      Data types related to memory bitmaps.
254  *
255  *      Memory bitmap is a structure consiting of many linked lists of
256  *      objects.  The main list's elements are of type struct zone_bitmap
257  *      and each of them corresonds to one zone.  For each zone bitmap
258  *      object there is a list of objects of type struct bm_block that
259  *      represent each blocks of bitmap in which information is stored.
260  *
261  *      struct memory_bitmap contains a pointer to the main list of zone
262  *      bitmap objects, a struct bm_position used for browsing the bitmap,
263  *      and a pointer to the list of pages used for allocating all of the
264  *      zone bitmap objects and bitmap block objects.
265  *
266  *      NOTE: It has to be possible to lay out the bitmap in memory
267  *      using only allocations of order 0.  Additionally, the bitmap is
268  *      designed to work with arbitrary number of zones (this is over the
269  *      top for now, but let's avoid making unnecessary assumptions ;-).
270  *
271  *      struct zone_bitmap contains a pointer to a list of bitmap block
272  *      objects and a pointer to the bitmap block object that has been
273  *      most recently used for setting bits.  Additionally, it contains the
274  *      pfns that correspond to the start and end of the represented zone.
275  *
276  *      struct bm_block contains a pointer to the memory page in which
277  *      information is stored (in the form of a block of bitmap)
278  *      It also contains the pfns that correspond to the start and end of
279  *      the represented memory area.
280  *
281  *      The memory bitmap is organized as a radix tree to guarantee fast random
282  *      access to the bits. There is one radix tree for each zone (as returned
283  *      from create_mem_extents).
284  *
285  *      One radix tree is represented by one struct mem_zone_bm_rtree. There are
286  *      two linked lists for the nodes of the tree, one for the inner nodes and
287  *      one for the leave nodes. The linked leave nodes are used for fast linear
288  *      access of the memory bitmap.
289  *
290  *      The struct rtree_node represents one node of the radix tree.
291  */
292
293 #define BM_END_OF_MAP   (~0UL)
294
295 #define BM_BITS_PER_BLOCK       (PAGE_SIZE * BITS_PER_BYTE)
296 #define BM_BLOCK_SHIFT          (PAGE_SHIFT + 3)
297 #define BM_BLOCK_MASK           ((1UL << BM_BLOCK_SHIFT) - 1)
298
299 /*
300  * struct rtree_node is a wrapper struct to link the nodes
301  * of the rtree together for easy linear iteration over
302  * bits and easy freeing
303  */
304 struct rtree_node {
305         struct list_head list;
306         unsigned long *data;
307 };
308
309 /*
310  * struct mem_zone_bm_rtree represents a bitmap used for one
311  * populated memory zone.
312  */
313 struct mem_zone_bm_rtree {
314         struct list_head list;          /* Link Zones together         */
315         struct list_head nodes;         /* Radix Tree inner nodes      */
316         struct list_head leaves;        /* Radix Tree leaves           */
317         unsigned long start_pfn;        /* Zone start page frame       */
318         unsigned long end_pfn;          /* Zone end page frame + 1     */
319         struct rtree_node *rtree;       /* Radix Tree Root             */
320         int levels;                     /* Number of Radix Tree Levels */
321         unsigned int blocks;            /* Number of Bitmap Blocks     */
322 };
323
324 /* strcut bm_position is used for browsing memory bitmaps */
325
326 struct bm_position {
327         struct mem_zone_bm_rtree *zone;
328         struct rtree_node *node;
329         unsigned long node_pfn;
330         int node_bit;
331 };
332
333 struct memory_bitmap {
334         struct list_head zones;
335         struct linked_page *p_list;     /* list of pages used to store zone
336                                          * bitmap objects and bitmap block
337                                          * objects
338                                          */
339         struct bm_position cur; /* most recently used bit position */
340 };
341
342 /* Functions that operate on memory bitmaps */
343
344 #define BM_ENTRIES_PER_LEVEL    (PAGE_SIZE / sizeof(unsigned long))
345 #if BITS_PER_LONG == 32
346 #define BM_RTREE_LEVEL_SHIFT    (PAGE_SHIFT - 2)
347 #else
348 #define BM_RTREE_LEVEL_SHIFT    (PAGE_SHIFT - 3)
349 #endif
350 #define BM_RTREE_LEVEL_MASK     ((1UL << BM_RTREE_LEVEL_SHIFT) - 1)
351
352 /*
353  *      alloc_rtree_node - Allocate a new node and add it to the radix tree.
354  *
355  *      This function is used to allocate inner nodes as well as the
356  *      leave nodes of the radix tree. It also adds the node to the
357  *      corresponding linked list passed in by the *list parameter.
358  */
359 static struct rtree_node *alloc_rtree_node(gfp_t gfp_mask, int safe_needed,
360                                            struct chain_allocator *ca,
361                                            struct list_head *list)
362 {
363         struct rtree_node *node;
364
365         node = chain_alloc(ca, sizeof(struct rtree_node));
366         if (!node)
367                 return NULL;
368
369         node->data = get_image_page(gfp_mask, safe_needed);
370         if (!node->data)
371                 return NULL;
372
373         list_add_tail(&node->list, list);
374
375         return node;
376 }
377
378 /*
379  *      add_rtree_block - Add a new leave node to the radix tree
380  *
381  *      The leave nodes need to be allocated in order to keep the leaves
382  *      linked list in order. This is guaranteed by the zone->blocks
383  *      counter.
384  */
385 static int add_rtree_block(struct mem_zone_bm_rtree *zone, gfp_t gfp_mask,
386                            int safe_needed, struct chain_allocator *ca)
387 {
388         struct rtree_node *node, *block, **dst;
389         unsigned int levels_needed, block_nr;
390         int i;
391
392         block_nr = zone->blocks;
393         levels_needed = 0;
394
395         /* How many levels do we need for this block nr? */
396         while (block_nr) {
397                 levels_needed += 1;
398                 block_nr >>= BM_RTREE_LEVEL_SHIFT;
399         }
400
401         /* Make sure the rtree has enough levels */
402         for (i = zone->levels; i < levels_needed; i++) {
403                 node = alloc_rtree_node(gfp_mask, safe_needed, ca,
404                                         &zone->nodes);
405                 if (!node)
406                         return -ENOMEM;
407
408                 node->data[0] = (unsigned long)zone->rtree;
409                 zone->rtree = node;
410                 zone->levels += 1;
411         }
412
413         /* Allocate new block */
414         block = alloc_rtree_node(gfp_mask, safe_needed, ca, &zone->leaves);
415         if (!block)
416                 return -ENOMEM;
417
418         /* Now walk the rtree to insert the block */
419         node = zone->rtree;
420         dst = &zone->rtree;
421         block_nr = zone->blocks;
422         for (i = zone->levels; i > 0; i--) {
423                 int index;
424
425                 if (!node) {
426                         node = alloc_rtree_node(gfp_mask, safe_needed, ca,
427                                                 &zone->nodes);
428                         if (!node)
429                                 return -ENOMEM;
430                         *dst = node;
431                 }
432
433                 index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
434                 index &= BM_RTREE_LEVEL_MASK;
435                 dst = (struct rtree_node **)&((*dst)->data[index]);
436                 node = *dst;
437         }
438
439         zone->blocks += 1;
440         *dst = block;
441
442         return 0;
443 }
444
445 static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
446                                int clear_nosave_free);
447
448 /*
449  *      create_zone_bm_rtree - create a radix tree for one zone
450  *
451  *      Allocated the mem_zone_bm_rtree structure and initializes it.
452  *      This function also allocated and builds the radix tree for the
453  *      zone.
454  */
455 static struct mem_zone_bm_rtree *create_zone_bm_rtree(gfp_t gfp_mask,
456                                                       int safe_needed,
457                                                       struct chain_allocator *ca,
458                                                       unsigned long start,
459                                                       unsigned long end)
460 {
461         struct mem_zone_bm_rtree *zone;
462         unsigned int i, nr_blocks;
463         unsigned long pages;
464
465         pages = end - start;
466         zone  = chain_alloc(ca, sizeof(struct mem_zone_bm_rtree));
467         if (!zone)
468                 return NULL;
469
470         INIT_LIST_HEAD(&zone->nodes);
471         INIT_LIST_HEAD(&zone->leaves);
472         zone->start_pfn = start;
473         zone->end_pfn = end;
474         nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK);
475
476         for (i = 0; i < nr_blocks; i++) {
477                 if (add_rtree_block(zone, gfp_mask, safe_needed, ca)) {
478                         free_zone_bm_rtree(zone, PG_UNSAFE_CLEAR);
479                         return NULL;
480                 }
481         }
482
483         return zone;
484 }
485
486 /*
487  *      free_zone_bm_rtree - Free the memory of the radix tree
488  *
489  *      Free all node pages of the radix tree. The mem_zone_bm_rtree
490  *      structure itself is not freed here nor are the rtree_node
491  *      structs.
492  */
493 static void free_zone_bm_rtree(struct mem_zone_bm_rtree *zone,
494                                int clear_nosave_free)
495 {
496         struct rtree_node *node;
497
498         list_for_each_entry(node, &zone->nodes, list)
499                 free_image_page(node->data, clear_nosave_free);
500
501         list_for_each_entry(node, &zone->leaves, list)
502                 free_image_page(node->data, clear_nosave_free);
503 }
504
505 static void memory_bm_position_reset(struct memory_bitmap *bm)
506 {
507         bm->cur.zone = list_entry(bm->zones.next, struct mem_zone_bm_rtree,
508                                   list);
509         bm->cur.node = list_entry(bm->cur.zone->leaves.next,
510                                   struct rtree_node, list);
511         bm->cur.node_pfn = 0;
512         bm->cur.node_bit = 0;
513 }
514
515 static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free);
516
517 struct mem_extent {
518         struct list_head hook;
519         unsigned long start;
520         unsigned long end;
521 };
522
523 /**
524  *      free_mem_extents - free a list of memory extents
525  *      @list - list of extents to empty
526  */
527 static void free_mem_extents(struct list_head *list)
528 {
529         struct mem_extent *ext, *aux;
530
531         list_for_each_entry_safe(ext, aux, list, hook) {
532                 list_del(&ext->hook);
533                 kfree(ext);
534         }
535 }
536
537 /**
538  *      create_mem_extents - create a list of memory extents representing
539  *                           contiguous ranges of PFNs
540  *      @list - list to put the extents into
541  *      @gfp_mask - mask to use for memory allocations
542  */
543 static int create_mem_extents(struct list_head *list, gfp_t gfp_mask)
544 {
545         struct zone *zone;
546
547         INIT_LIST_HEAD(list);
548
549         for_each_populated_zone(zone) {
550                 unsigned long zone_start, zone_end;
551                 struct mem_extent *ext, *cur, *aux;
552
553                 zone_start = zone->zone_start_pfn;
554                 zone_end = zone_end_pfn(zone);
555
556                 list_for_each_entry(ext, list, hook)
557                         if (zone_start <= ext->end)
558                                 break;
559
560                 if (&ext->hook == list || zone_end < ext->start) {
561                         /* New extent is necessary */
562                         struct mem_extent *new_ext;
563
564                         new_ext = kzalloc(sizeof(struct mem_extent), gfp_mask);
565                         if (!new_ext) {
566                                 free_mem_extents(list);
567                                 return -ENOMEM;
568                         }
569                         new_ext->start = zone_start;
570                         new_ext->end = zone_end;
571                         list_add_tail(&new_ext->hook, &ext->hook);
572                         continue;
573                 }
574
575                 /* Merge this zone's range of PFNs with the existing one */
576                 if (zone_start < ext->start)
577                         ext->start = zone_start;
578                 if (zone_end > ext->end)
579                         ext->end = zone_end;
580
581                 /* More merging may be possible */
582                 cur = ext;
583                 list_for_each_entry_safe_continue(cur, aux, list, hook) {
584                         if (zone_end < cur->start)
585                                 break;
586                         if (zone_end < cur->end)
587                                 ext->end = cur->end;
588                         list_del(&cur->hook);
589                         kfree(cur);
590                 }
591         }
592
593         return 0;
594 }
595
596 /**
597   *     memory_bm_create - allocate memory for a memory bitmap
598   */
599 static int memory_bm_create(struct memory_bitmap *bm, gfp_t gfp_mask,
600                             int safe_needed)
601 {
602         struct chain_allocator ca;
603         struct list_head mem_extents;
604         struct mem_extent *ext;
605         int error;
606
607         chain_init(&ca, gfp_mask, safe_needed);
608         INIT_LIST_HEAD(&bm->zones);
609
610         error = create_mem_extents(&mem_extents, gfp_mask);
611         if (error)
612                 return error;
613
614         list_for_each_entry(ext, &mem_extents, hook) {
615                 struct mem_zone_bm_rtree *zone;
616
617                 zone = create_zone_bm_rtree(gfp_mask, safe_needed, &ca,
618                                             ext->start, ext->end);
619                 if (!zone) {
620                         error = -ENOMEM;
621                         goto Error;
622                 }
623                 list_add_tail(&zone->list, &bm->zones);
624         }
625
626         bm->p_list = ca.chain;
627         memory_bm_position_reset(bm);
628  Exit:
629         free_mem_extents(&mem_extents);
630         return error;
631
632  Error:
633         bm->p_list = ca.chain;
634         memory_bm_free(bm, PG_UNSAFE_CLEAR);
635         goto Exit;
636 }
637
638 /**
639   *     memory_bm_free - free memory occupied by the memory bitmap @bm
640   */
641 static void memory_bm_free(struct memory_bitmap *bm, int clear_nosave_free)
642 {
643         struct mem_zone_bm_rtree *zone;
644
645         list_for_each_entry(zone, &bm->zones, list)
646                 free_zone_bm_rtree(zone, clear_nosave_free);
647
648         free_list_of_pages(bm->p_list, clear_nosave_free);
649
650         INIT_LIST_HEAD(&bm->zones);
651 }
652
653 /**
654  *      memory_bm_find_bit - Find the bit for pfn in the memory
655  *                           bitmap
656  *
657  *      Find the bit in the bitmap @bm that corresponds to given pfn.
658  *      The cur.zone, cur.block and cur.node_pfn member of @bm are
659  *      updated.
660  *      It walks the radix tree to find the page which contains the bit for
661  *      pfn and returns the bit position in **addr and *bit_nr.
662  */
663 static int memory_bm_find_bit(struct memory_bitmap *bm, unsigned long pfn,
664                               void **addr, unsigned int *bit_nr)
665 {
666         struct mem_zone_bm_rtree *curr, *zone;
667         struct rtree_node *node;
668         int i, block_nr;
669
670         zone = bm->cur.zone;
671
672         if (pfn >= zone->start_pfn && pfn < zone->end_pfn)
673                 goto zone_found;
674
675         zone = NULL;
676
677         /* Find the right zone */
678         list_for_each_entry(curr, &bm->zones, list) {
679                 if (pfn >= curr->start_pfn && pfn < curr->end_pfn) {
680                         zone = curr;
681                         break;
682                 }
683         }
684
685         if (!zone)
686                 return -EFAULT;
687
688 zone_found:
689         /*
690          * We have a zone. Now walk the radix tree to find the leave
691          * node for our pfn.
692          */
693
694         node = bm->cur.node;
695         if (((pfn - zone->start_pfn) & ~BM_BLOCK_MASK) == bm->cur.node_pfn)
696                 goto node_found;
697
698         node      = zone->rtree;
699         block_nr  = (pfn - zone->start_pfn) >> BM_BLOCK_SHIFT;
700
701         for (i = zone->levels; i > 0; i--) {
702                 int index;
703
704                 index = block_nr >> ((i - 1) * BM_RTREE_LEVEL_SHIFT);
705                 index &= BM_RTREE_LEVEL_MASK;
706                 BUG_ON(node->data[index] == 0);
707                 node = (struct rtree_node *)node->data[index];
708         }
709
710 node_found:
711         /* Update last position */
712         bm->cur.zone = zone;
713         bm->cur.node = node;
714         bm->cur.node_pfn = (pfn - zone->start_pfn) & ~BM_BLOCK_MASK;
715
716         /* Set return values */
717         *addr = node->data;
718         *bit_nr = (pfn - zone->start_pfn) & BM_BLOCK_MASK;
719
720         return 0;
721 }
722
723 static void memory_bm_set_bit(struct memory_bitmap *bm, unsigned long pfn)
724 {
725         void *addr;
726         unsigned int bit;
727         int error;
728
729         error = memory_bm_find_bit(bm, pfn, &addr, &bit);
730         BUG_ON(error);
731         set_bit(bit, addr);
732 }
733
734 static int mem_bm_set_bit_check(struct memory_bitmap *bm, unsigned long pfn)
735 {
736         void *addr;
737         unsigned int bit;
738         int error;
739
740         error = memory_bm_find_bit(bm, pfn, &addr, &bit);
741         if (!error)
742                 set_bit(bit, addr);
743
744         return error;
745 }
746
747 static void memory_bm_clear_bit(struct memory_bitmap *bm, unsigned long pfn)
748 {
749         void *addr;
750         unsigned int bit;
751         int error;
752
753         error = memory_bm_find_bit(bm, pfn, &addr, &bit);
754         BUG_ON(error);
755         clear_bit(bit, addr);
756 }
757
758 static void memory_bm_clear_current(struct memory_bitmap *bm)
759 {
760         int bit;
761
762         bit = max(bm->cur.node_bit - 1, 0);
763         clear_bit(bit, bm->cur.node->data);
764 }
765
766 static int memory_bm_test_bit(struct memory_bitmap *bm, unsigned long pfn)
767 {
768         void *addr;
769         unsigned int bit;
770         int error;
771
772         error = memory_bm_find_bit(bm, pfn, &addr, &bit);
773         BUG_ON(error);
774         return test_bit(bit, addr);
775 }
776
777 static bool memory_bm_pfn_present(struct memory_bitmap *bm, unsigned long pfn)
778 {
779         void *addr;
780         unsigned int bit;
781
782         return !memory_bm_find_bit(bm, pfn, &addr, &bit);
783 }
784
785 /*
786  *      rtree_next_node - Jumps to the next leave node
787  *
788  *      Sets the position to the beginning of the next node in the
789  *      memory bitmap. This is either the next node in the current
790  *      zone's radix tree or the first node in the radix tree of the
791  *      next zone.
792  *
793  *      Returns true if there is a next node, false otherwise.
794  */
795 static bool rtree_next_node(struct memory_bitmap *bm)
796 {
797         bm->cur.node = list_entry(bm->cur.node->list.next,
798                                   struct rtree_node, list);
799         if (&bm->cur.node->list != &bm->cur.zone->leaves) {
800                 bm->cur.node_pfn += BM_BITS_PER_BLOCK;
801                 bm->cur.node_bit  = 0;
802                 touch_softlockup_watchdog();
803                 return true;
804         }
805
806         /* No more nodes, goto next zone */
807         bm->cur.zone = list_entry(bm->cur.zone->list.next,
808                                   struct mem_zone_bm_rtree, list);
809         if (&bm->cur.zone->list != &bm->zones) {
810                 bm->cur.node = list_entry(bm->cur.zone->leaves.next,
811                                           struct rtree_node, list);
812                 bm->cur.node_pfn = 0;
813                 bm->cur.node_bit = 0;
814                 return true;
815         }
816
817         /* No more zones */
818         return false;
819 }
820
821 /**
822  *      memory_bm_rtree_next_pfn - Find the next set bit in the bitmap @bm
823  *
824  *      Starting from the last returned position this function searches
825  *      for the next set bit in the memory bitmap and returns its
826  *      number. If no more bit is set BM_END_OF_MAP is returned.
827  *
828  *      It is required to run memory_bm_position_reset() before the
829  *      first call to this function.
830  */
831 static unsigned long memory_bm_next_pfn(struct memory_bitmap *bm)
832 {
833         unsigned long bits, pfn, pages;
834         int bit;
835
836         do {
837                 pages     = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn;
838                 bits      = min(pages - bm->cur.node_pfn, BM_BITS_PER_BLOCK);
839                 bit       = find_next_bit(bm->cur.node->data, bits,
840                                           bm->cur.node_bit);
841                 if (bit < bits) {
842                         pfn = bm->cur.zone->start_pfn + bm->cur.node_pfn + bit;
843                         bm->cur.node_bit = bit + 1;
844                         return pfn;
845                 }
846         } while (rtree_next_node(bm));
847
848         return BM_END_OF_MAP;
849 }
850
851 /**
852  *      This structure represents a range of page frames the contents of which
853  *      should not be saved during the suspend.
854  */
855
856 struct nosave_region {
857         struct list_head list;
858         unsigned long start_pfn;
859         unsigned long end_pfn;
860 };
861
862 static LIST_HEAD(nosave_regions);
863
864 static void recycle_zone_bm_rtree(struct mem_zone_bm_rtree *zone)
865 {
866         struct rtree_node *node;
867
868         list_for_each_entry(node, &zone->nodes, list)
869                 recycle_safe_page(node->data);
870
871         list_for_each_entry(node, &zone->leaves, list)
872                 recycle_safe_page(node->data);
873 }
874
875 static void memory_bm_recycle(struct memory_bitmap *bm)
876 {
877         struct mem_zone_bm_rtree *zone;
878         struct linked_page *p_list;
879
880         list_for_each_entry(zone, &bm->zones, list)
881                 recycle_zone_bm_rtree(zone);
882
883         p_list = bm->p_list;
884         while (p_list) {
885                 struct linked_page *lp = p_list;
886
887                 p_list = lp->next;
888                 recycle_safe_page(lp);
889         }
890 }
891
892 /**
893  *      register_nosave_region - register a range of page frames the contents
894  *      of which should not be saved during the suspend (to be used in the early
895  *      initialization code)
896  */
897
898 void __init __register_nosave_region(unsigned long start_pfn,
899                                      unsigned long end_pfn, int use_kmalloc)
900 {
901         struct nosave_region *region;
902
903         if (start_pfn >= end_pfn)
904                 return;
905
906         if (!list_empty(&nosave_regions)) {
907                 /* Try to extend the previous region (they should be sorted) */
908                 region = list_entry(nosave_regions.prev,
909                                         struct nosave_region, list);
910                 if (region->end_pfn == start_pfn) {
911                         region->end_pfn = end_pfn;
912                         goto Report;
913                 }
914         }
915         if (use_kmalloc) {
916                 /* during init, this shouldn't fail */
917                 region = kmalloc(sizeof(struct nosave_region), GFP_KERNEL);
918                 BUG_ON(!region);
919         } else
920                 /* This allocation cannot fail */
921                 region = memblock_virt_alloc(sizeof(struct nosave_region), 0);
922         region->start_pfn = start_pfn;
923         region->end_pfn = end_pfn;
924         list_add_tail(&region->list, &nosave_regions);
925  Report:
926         printk(KERN_INFO "PM: Registered nosave memory: [mem %#010llx-%#010llx]\n",
927                 (unsigned long long) start_pfn << PAGE_SHIFT,
928                 ((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
929 }
930
931 /*
932  * Set bits in this map correspond to the page frames the contents of which
933  * should not be saved during the suspend.
934  */
935 static struct memory_bitmap *forbidden_pages_map;
936
937 /* Set bits in this map correspond to free page frames. */
938 static struct memory_bitmap *free_pages_map;
939
940 /*
941  * Each page frame allocated for creating the image is marked by setting the
942  * corresponding bits in forbidden_pages_map and free_pages_map simultaneously
943  */
944
945 void swsusp_set_page_free(struct page *page)
946 {
947         if (free_pages_map)
948                 memory_bm_set_bit(free_pages_map, page_to_pfn(page));
949 }
950
951 static int swsusp_page_is_free(struct page *page)
952 {
953         return free_pages_map ?
954                 memory_bm_test_bit(free_pages_map, page_to_pfn(page)) : 0;
955 }
956
957 void swsusp_unset_page_free(struct page *page)
958 {
959         if (free_pages_map)
960                 memory_bm_clear_bit(free_pages_map, page_to_pfn(page));
961 }
962
963 static void swsusp_set_page_forbidden(struct page *page)
964 {
965         if (forbidden_pages_map)
966                 memory_bm_set_bit(forbidden_pages_map, page_to_pfn(page));
967 }
968
969 int swsusp_page_is_forbidden(struct page *page)
970 {
971         return forbidden_pages_map ?
972                 memory_bm_test_bit(forbidden_pages_map, page_to_pfn(page)) : 0;
973 }
974
975 static void swsusp_unset_page_forbidden(struct page *page)
976 {
977         if (forbidden_pages_map)
978                 memory_bm_clear_bit(forbidden_pages_map, page_to_pfn(page));
979 }
980
981 /**
982  *      mark_nosave_pages - set bits corresponding to the page frames the
983  *      contents of which should not be saved in a given bitmap.
984  */
985
986 static void mark_nosave_pages(struct memory_bitmap *bm)
987 {
988         struct nosave_region *region;
989
990         if (list_empty(&nosave_regions))
991                 return;
992
993         list_for_each_entry(region, &nosave_regions, list) {
994                 unsigned long pfn;
995
996                 pr_debug("PM: Marking nosave pages: [mem %#010llx-%#010llx]\n",
997                          (unsigned long long) region->start_pfn << PAGE_SHIFT,
998                          ((unsigned long long) region->end_pfn << PAGE_SHIFT)
999                                 - 1);
1000
1001                 for (pfn = region->start_pfn; pfn < region->end_pfn; pfn++)
1002                         if (pfn_valid(pfn)) {
1003                                 /*
1004                                  * It is safe to ignore the result of
1005                                  * mem_bm_set_bit_check() here, since we won't
1006                                  * touch the PFNs for which the error is
1007                                  * returned anyway.
1008                                  */
1009                                 mem_bm_set_bit_check(bm, pfn);
1010                         }
1011         }
1012 }
1013
1014 /**
1015  *      create_basic_memory_bitmaps - create bitmaps needed for marking page
1016  *      frames that should not be saved and free page frames.  The pointers
1017  *      forbidden_pages_map and free_pages_map are only modified if everything
1018  *      goes well, because we don't want the bits to be used before both bitmaps
1019  *      are set up.
1020  */
1021
1022 int create_basic_memory_bitmaps(void)
1023 {
1024         struct memory_bitmap *bm1, *bm2;
1025         int error = 0;
1026
1027         if (forbidden_pages_map && free_pages_map)
1028                 return 0;
1029         else
1030                 BUG_ON(forbidden_pages_map || free_pages_map);
1031
1032         bm1 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
1033         if (!bm1)
1034                 return -ENOMEM;
1035
1036         error = memory_bm_create(bm1, GFP_KERNEL, PG_ANY);
1037         if (error)
1038                 goto Free_first_object;
1039
1040         bm2 = kzalloc(sizeof(struct memory_bitmap), GFP_KERNEL);
1041         if (!bm2)
1042                 goto Free_first_bitmap;
1043
1044         error = memory_bm_create(bm2, GFP_KERNEL, PG_ANY);
1045         if (error)
1046                 goto Free_second_object;
1047
1048         forbidden_pages_map = bm1;
1049         free_pages_map = bm2;
1050         mark_nosave_pages(forbidden_pages_map);
1051
1052         pr_debug("PM: Basic memory bitmaps created\n");
1053
1054         return 0;
1055
1056  Free_second_object:
1057         kfree(bm2);
1058  Free_first_bitmap:
1059         memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1060  Free_first_object:
1061         kfree(bm1);
1062         return -ENOMEM;
1063 }
1064
1065 /**
1066  *      free_basic_memory_bitmaps - free memory bitmaps allocated by
1067  *      create_basic_memory_bitmaps().  The auxiliary pointers are necessary
1068  *      so that the bitmaps themselves are not referred to while they are being
1069  *      freed.
1070  */
1071
1072 void free_basic_memory_bitmaps(void)
1073 {
1074         struct memory_bitmap *bm1, *bm2;
1075
1076         if (WARN_ON(!(forbidden_pages_map && free_pages_map)))
1077                 return;
1078
1079         bm1 = forbidden_pages_map;
1080         bm2 = free_pages_map;
1081         forbidden_pages_map = NULL;
1082         free_pages_map = NULL;
1083         memory_bm_free(bm1, PG_UNSAFE_CLEAR);
1084         kfree(bm1);
1085         memory_bm_free(bm2, PG_UNSAFE_CLEAR);
1086         kfree(bm2);
1087
1088         pr_debug("PM: Basic memory bitmaps freed\n");
1089 }
1090
1091 /**
1092  *      snapshot_additional_pages - estimate the number of additional pages
1093  *      be needed for setting up the suspend image data structures for given
1094  *      zone (usually the returned value is greater than the exact number)
1095  */
1096
1097 unsigned int snapshot_additional_pages(struct zone *zone)
1098 {
1099         unsigned int rtree, nodes;
1100
1101         rtree = nodes = DIV_ROUND_UP(zone->spanned_pages, BM_BITS_PER_BLOCK);
1102         rtree += DIV_ROUND_UP(rtree * sizeof(struct rtree_node),
1103                               LINKED_PAGE_DATA_SIZE);
1104         while (nodes > 1) {
1105                 nodes = DIV_ROUND_UP(nodes, BM_ENTRIES_PER_LEVEL);
1106                 rtree += nodes;
1107         }
1108
1109         return 2 * rtree;
1110 }
1111
1112 #ifdef CONFIG_HIGHMEM
1113 /**
1114  *      count_free_highmem_pages - compute the total number of free highmem
1115  *      pages, system-wide.
1116  */
1117
1118 static unsigned int count_free_highmem_pages(void)
1119 {
1120         struct zone *zone;
1121         unsigned int cnt = 0;
1122
1123         for_each_populated_zone(zone)
1124                 if (is_highmem(zone))
1125                         cnt += zone_page_state(zone, NR_FREE_PAGES);
1126
1127         return cnt;
1128 }
1129
1130 /**
1131  *      saveable_highmem_page - Determine whether a highmem page should be
1132  *      included in the suspend image.
1133  *
1134  *      We should save the page if it isn't Nosave or NosaveFree, or Reserved,
1135  *      and it isn't a part of a free chunk of pages.
1136  */
1137 static struct page *saveable_highmem_page(struct zone *zone, unsigned long pfn)
1138 {
1139         struct page *page;
1140
1141         if (!pfn_valid(pfn))
1142                 return NULL;
1143
1144         page = pfn_to_page(pfn);
1145         if (page_zone(page) != zone)
1146                 return NULL;
1147
1148         BUG_ON(!PageHighMem(page));
1149
1150         if (swsusp_page_is_forbidden(page) ||  swsusp_page_is_free(page) ||
1151             PageReserved(page))
1152                 return NULL;
1153
1154         if (page_is_guard(page))
1155                 return NULL;
1156
1157         return page;
1158 }
1159
1160 /**
1161  *      count_highmem_pages - compute the total number of saveable highmem
1162  *      pages.
1163  */
1164
1165 static unsigned int count_highmem_pages(void)
1166 {
1167         struct zone *zone;
1168         unsigned int n = 0;
1169
1170         for_each_populated_zone(zone) {
1171                 unsigned long pfn, max_zone_pfn;
1172
1173                 if (!is_highmem(zone))
1174                         continue;
1175
1176                 mark_free_pages(zone);
1177                 max_zone_pfn = zone_end_pfn(zone);
1178                 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1179                         if (saveable_highmem_page(zone, pfn))
1180                                 n++;
1181         }
1182         return n;
1183 }
1184 #else
1185 static inline void *saveable_highmem_page(struct zone *z, unsigned long p)
1186 {
1187         return NULL;
1188 }
1189 #endif /* CONFIG_HIGHMEM */
1190
1191 /**
1192  *      saveable_page - Determine whether a non-highmem page should be included
1193  *      in the suspend image.
1194  *
1195  *      We should save the page if it isn't Nosave, and is not in the range
1196  *      of pages statically defined as 'unsaveable', and it isn't a part of
1197  *      a free chunk of pages.
1198  */
1199 static struct page *saveable_page(struct zone *zone, unsigned long pfn)
1200 {
1201         struct page *page;
1202
1203         if (!pfn_valid(pfn))
1204                 return NULL;
1205
1206         page = pfn_to_page(pfn);
1207         if (page_zone(page) != zone)
1208                 return NULL;
1209
1210         BUG_ON(PageHighMem(page));
1211
1212         if (swsusp_page_is_forbidden(page) || swsusp_page_is_free(page))
1213                 return NULL;
1214
1215         if (PageReserved(page)
1216             && (!kernel_page_present(page) || pfn_is_nosave(pfn)))
1217                 return NULL;
1218
1219         if (page_is_guard(page))
1220                 return NULL;
1221
1222         return page;
1223 }
1224
1225 /**
1226  *      count_data_pages - compute the total number of saveable non-highmem
1227  *      pages.
1228  */
1229
1230 static unsigned int count_data_pages(void)
1231 {
1232         struct zone *zone;
1233         unsigned long pfn, max_zone_pfn;
1234         unsigned int n = 0;
1235
1236         for_each_populated_zone(zone) {
1237                 if (is_highmem(zone))
1238                         continue;
1239
1240                 mark_free_pages(zone);
1241                 max_zone_pfn = zone_end_pfn(zone);
1242                 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1243                         if (saveable_page(zone, pfn))
1244                                 n++;
1245         }
1246         return n;
1247 }
1248
1249 /* This is needed, because copy_page and memcpy are not usable for copying
1250  * task structs.
1251  */
1252 static inline void do_copy_page(long *dst, long *src)
1253 {
1254         int n;
1255
1256         for (n = PAGE_SIZE / sizeof(long); n; n--)
1257                 *dst++ = *src++;
1258 }
1259
1260
1261 /**
1262  *      safe_copy_page - check if the page we are going to copy is marked as
1263  *              present in the kernel page tables (this always is the case if
1264  *              CONFIG_DEBUG_PAGEALLOC is not set and in that case
1265  *              kernel_page_present() always returns 'true').
1266  */
1267 static void safe_copy_page(void *dst, struct page *s_page)
1268 {
1269         if (kernel_page_present(s_page)) {
1270                 do_copy_page(dst, page_address(s_page));
1271         } else {
1272                 kernel_map_pages(s_page, 1, 1);
1273                 do_copy_page(dst, page_address(s_page));
1274                 kernel_map_pages(s_page, 1, 0);
1275         }
1276 }
1277
1278
1279 #ifdef CONFIG_HIGHMEM
1280 static inline struct page *page_is_saveable(struct zone *zone, unsigned long pfn)
1281 {
1282         return is_highmem(zone) ?
1283                 saveable_highmem_page(zone, pfn) : saveable_page(zone, pfn);
1284 }
1285
1286 static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1287 {
1288         struct page *s_page, *d_page;
1289         void *src, *dst;
1290
1291         s_page = pfn_to_page(src_pfn);
1292         d_page = pfn_to_page(dst_pfn);
1293         if (PageHighMem(s_page)) {
1294                 src = kmap_atomic(s_page);
1295                 dst = kmap_atomic(d_page);
1296                 do_copy_page(dst, src);
1297                 kunmap_atomic(dst);
1298                 kunmap_atomic(src);
1299         } else {
1300                 if (PageHighMem(d_page)) {
1301                         /* Page pointed to by src may contain some kernel
1302                          * data modified by kmap_atomic()
1303                          */
1304                         safe_copy_page(buffer, s_page);
1305                         dst = kmap_atomic(d_page);
1306                         copy_page(dst, buffer);
1307                         kunmap_atomic(dst);
1308                 } else {
1309                         safe_copy_page(page_address(d_page), s_page);
1310                 }
1311         }
1312 }
1313 #else
1314 #define page_is_saveable(zone, pfn)     saveable_page(zone, pfn)
1315
1316 static inline void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1317 {
1318         safe_copy_page(page_address(pfn_to_page(dst_pfn)),
1319                                 pfn_to_page(src_pfn));
1320 }
1321 #endif /* CONFIG_HIGHMEM */
1322
1323 static void copy_data_pages(struct memory_bitmap *copy_bm,
1324                             struct memory_bitmap *orig_bm)
1325 {
1326         struct zone *zone;
1327         unsigned long pfn;
1328
1329         for_each_populated_zone(zone) {
1330                 unsigned long max_zone_pfn;
1331
1332                 mark_free_pages(zone);
1333                 max_zone_pfn = zone_end_pfn(zone);
1334                 for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
1335                         if (page_is_saveable(zone, pfn))
1336                                 memory_bm_set_bit(orig_bm, pfn);
1337         }
1338         memory_bm_position_reset(orig_bm);
1339         memory_bm_position_reset(copy_bm);
1340         for(;;) {
1341                 pfn = memory_bm_next_pfn(orig_bm);
1342                 if (unlikely(pfn == BM_END_OF_MAP))
1343                         break;
1344                 copy_data_page(memory_bm_next_pfn(copy_bm), pfn);
1345         }
1346 }
1347
1348 /* Total number of image pages */
1349 static unsigned int nr_copy_pages;
1350 /* Number of pages needed for saving the original pfns of the image pages */
1351 static unsigned int nr_meta_pages;
1352 /*
1353  * Numbers of normal and highmem page frames allocated for hibernation image
1354  * before suspending devices.
1355  */
1356 unsigned int alloc_normal, alloc_highmem;
1357 /*
1358  * Memory bitmap used for marking saveable pages (during hibernation) or
1359  * hibernation image pages (during restore)
1360  */
1361 static struct memory_bitmap orig_bm;
1362 /*
1363  * Memory bitmap used during hibernation for marking allocated page frames that
1364  * will contain copies of saveable pages.  During restore it is initially used
1365  * for marking hibernation image pages, but then the set bits from it are
1366  * duplicated in @orig_bm and it is released.  On highmem systems it is next
1367  * used for marking "safe" highmem pages, but it has to be reinitialized for
1368  * this purpose.
1369  */
1370 static struct memory_bitmap copy_bm;
1371
1372 /**
1373  *      swsusp_free - free pages allocated for the suspend.
1374  *
1375  *      Suspend pages are alocated before the atomic copy is made, so we
1376  *      need to release them after the resume.
1377  */
1378
1379 void swsusp_free(void)
1380 {
1381         unsigned long fb_pfn, fr_pfn;
1382
1383         if (!forbidden_pages_map || !free_pages_map)
1384                 goto out;
1385
1386         memory_bm_position_reset(forbidden_pages_map);
1387         memory_bm_position_reset(free_pages_map);
1388
1389 loop:
1390         fr_pfn = memory_bm_next_pfn(free_pages_map);
1391         fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1392
1393         /*
1394          * Find the next bit set in both bitmaps. This is guaranteed to
1395          * terminate when fb_pfn == fr_pfn == BM_END_OF_MAP.
1396          */
1397         do {
1398                 if (fb_pfn < fr_pfn)
1399                         fb_pfn = memory_bm_next_pfn(forbidden_pages_map);
1400                 if (fr_pfn < fb_pfn)
1401                         fr_pfn = memory_bm_next_pfn(free_pages_map);
1402         } while (fb_pfn != fr_pfn);
1403
1404         if (fr_pfn != BM_END_OF_MAP && pfn_valid(fr_pfn)) {
1405                 struct page *page = pfn_to_page(fr_pfn);
1406
1407                 memory_bm_clear_current(forbidden_pages_map);
1408                 memory_bm_clear_current(free_pages_map);
1409                 __free_page(page);
1410                 goto loop;
1411         }
1412
1413 out:
1414         nr_copy_pages = 0;
1415         nr_meta_pages = 0;
1416         restore_pblist = NULL;
1417         buffer = NULL;
1418         alloc_normal = 0;
1419         alloc_highmem = 0;
1420 }
1421
1422 /* Helper functions used for the shrinking of memory. */
1423
1424 #define GFP_IMAGE       (GFP_KERNEL | __GFP_NOWARN)
1425
1426 /**
1427  * preallocate_image_pages - Allocate a number of pages for hibernation image
1428  * @nr_pages: Number of page frames to allocate.
1429  * @mask: GFP flags to use for the allocation.
1430  *
1431  * Return value: Number of page frames actually allocated
1432  */
1433 static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask)
1434 {
1435         unsigned long nr_alloc = 0;
1436
1437         while (nr_pages > 0) {
1438                 struct page *page;
1439
1440                 page = alloc_image_page(mask);
1441                 if (!page)
1442                         break;
1443                 memory_bm_set_bit(&copy_bm, page_to_pfn(page));
1444                 if (PageHighMem(page))
1445                         alloc_highmem++;
1446                 else
1447                         alloc_normal++;
1448                 nr_pages--;
1449                 nr_alloc++;
1450         }
1451
1452         return nr_alloc;
1453 }
1454
1455 static unsigned long preallocate_image_memory(unsigned long nr_pages,
1456                                               unsigned long avail_normal)
1457 {
1458         unsigned long alloc;
1459
1460         if (avail_normal <= alloc_normal)
1461                 return 0;
1462
1463         alloc = avail_normal - alloc_normal;
1464         if (nr_pages < alloc)
1465                 alloc = nr_pages;
1466
1467         return preallocate_image_pages(alloc, GFP_IMAGE);
1468 }
1469
1470 #ifdef CONFIG_HIGHMEM
1471 static unsigned long preallocate_image_highmem(unsigned long nr_pages)
1472 {
1473         return preallocate_image_pages(nr_pages, GFP_IMAGE | __GFP_HIGHMEM);
1474 }
1475
1476 /**
1477  *  __fraction - Compute (an approximation of) x * (multiplier / base)
1478  */
1479 static unsigned long __fraction(u64 x, u64 multiplier, u64 base)
1480 {
1481         x *= multiplier;
1482         do_div(x, base);
1483         return (unsigned long)x;
1484 }
1485
1486 static unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1487                                                   unsigned long highmem,
1488                                                   unsigned long total)
1489 {
1490         unsigned long alloc = __fraction(nr_pages, highmem, total);
1491
1492         return preallocate_image_pages(alloc, GFP_IMAGE | __GFP_HIGHMEM);
1493 }
1494 #else /* CONFIG_HIGHMEM */
1495 static inline unsigned long preallocate_image_highmem(unsigned long nr_pages)
1496 {
1497         return 0;
1498 }
1499
1500 static inline unsigned long preallocate_highmem_fraction(unsigned long nr_pages,
1501                                                          unsigned long highmem,
1502                                                          unsigned long total)
1503 {
1504         return 0;
1505 }
1506 #endif /* CONFIG_HIGHMEM */
1507
1508 /**
1509  * free_unnecessary_pages - Release preallocated pages not needed for the image
1510  */
1511 static unsigned long free_unnecessary_pages(void)
1512 {
1513         unsigned long save, to_free_normal, to_free_highmem, free;
1514
1515         save = count_data_pages();
1516         if (alloc_normal >= save) {
1517                 to_free_normal = alloc_normal - save;
1518                 save = 0;
1519         } else {
1520                 to_free_normal = 0;
1521                 save -= alloc_normal;
1522         }
1523         save += count_highmem_pages();
1524         if (alloc_highmem >= save) {
1525                 to_free_highmem = alloc_highmem - save;
1526         } else {
1527                 to_free_highmem = 0;
1528                 save -= alloc_highmem;
1529                 if (to_free_normal > save)
1530                         to_free_normal -= save;
1531                 else
1532                         to_free_normal = 0;
1533         }
1534         free = to_free_normal + to_free_highmem;
1535
1536         memory_bm_position_reset(&copy_bm);
1537
1538         while (to_free_normal > 0 || to_free_highmem > 0) {
1539                 unsigned long pfn = memory_bm_next_pfn(&copy_bm);
1540                 struct page *page = pfn_to_page(pfn);
1541
1542                 if (PageHighMem(page)) {
1543                         if (!to_free_highmem)
1544                                 continue;
1545                         to_free_highmem--;
1546                         alloc_highmem--;
1547                 } else {
1548                         if (!to_free_normal)
1549                                 continue;
1550                         to_free_normal--;
1551                         alloc_normal--;
1552                 }
1553                 memory_bm_clear_bit(&copy_bm, pfn);
1554                 swsusp_unset_page_forbidden(page);
1555                 swsusp_unset_page_free(page);
1556                 __free_page(page);
1557         }
1558
1559         return free;
1560 }
1561
1562 /**
1563  * minimum_image_size - Estimate the minimum acceptable size of an image
1564  * @saveable: Number of saveable pages in the system.
1565  *
1566  * We want to avoid attempting to free too much memory too hard, so estimate the
1567  * minimum acceptable size of a hibernation image to use as the lower limit for
1568  * preallocating memory.
1569  *
1570  * We assume that the minimum image size should be proportional to
1571  *
1572  * [number of saveable pages] - [number of pages that can be freed in theory]
1573  *
1574  * where the second term is the sum of (1) reclaimable slab pages, (2) active
1575  * and (3) inactive anonymous pages, (4) active and (5) inactive file pages,
1576  * minus mapped file pages.
1577  */
1578 static unsigned long minimum_image_size(unsigned long saveable)
1579 {
1580         unsigned long size;
1581
1582         size = global_page_state(NR_SLAB_RECLAIMABLE)
1583                 + global_page_state(NR_ACTIVE_ANON)
1584                 + global_page_state(NR_INACTIVE_ANON)
1585                 + global_page_state(NR_ACTIVE_FILE)
1586                 + global_page_state(NR_INACTIVE_FILE)
1587                 - global_page_state(NR_FILE_MAPPED);
1588
1589         return saveable <= size ? 0 : saveable - size;
1590 }
1591
1592 /**
1593  * hibernate_preallocate_memory - Preallocate memory for hibernation image
1594  *
1595  * To create a hibernation image it is necessary to make a copy of every page
1596  * frame in use.  We also need a number of page frames to be free during
1597  * hibernation for allocations made while saving the image and for device
1598  * drivers, in case they need to allocate memory from their hibernation
1599  * callbacks (these two numbers are given by PAGES_FOR_IO (which is a rough
1600  * estimate) and reserverd_size divided by PAGE_SIZE (which is tunable through
1601  * /sys/power/reserved_size, respectively).  To make this happen, we compute the
1602  * total number of available page frames and allocate at least
1603  *
1604  * ([page frames total] + PAGES_FOR_IO + [metadata pages]) / 2
1605  *  + 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE)
1606  *
1607  * of them, which corresponds to the maximum size of a hibernation image.
1608  *
1609  * If image_size is set below the number following from the above formula,
1610  * the preallocation of memory is continued until the total number of saveable
1611  * pages in the system is below the requested image size or the minimum
1612  * acceptable image size returned by minimum_image_size(), whichever is greater.
1613  */
1614 int hibernate_preallocate_memory(void)
1615 {
1616         struct zone *zone;
1617         unsigned long saveable, size, max_size, count, highmem, pages = 0;
1618         unsigned long alloc, save_highmem, pages_highmem, avail_normal;
1619         ktime_t start, stop;
1620         int error;
1621
1622         printk(KERN_INFO "PM: Preallocating image memory... ");
1623         start = ktime_get();
1624
1625         error = memory_bm_create(&orig_bm, GFP_IMAGE, PG_ANY);
1626         if (error)
1627                 goto err_out;
1628
1629         error = memory_bm_create(&copy_bm, GFP_IMAGE, PG_ANY);
1630         if (error)
1631                 goto err_out;
1632
1633         alloc_normal = 0;
1634         alloc_highmem = 0;
1635
1636         /* Count the number of saveable data pages. */
1637         save_highmem = count_highmem_pages();
1638         saveable = count_data_pages();
1639
1640         /*
1641          * Compute the total number of page frames we can use (count) and the
1642          * number of pages needed for image metadata (size).
1643          */
1644         count = saveable;
1645         saveable += save_highmem;
1646         highmem = save_highmem;
1647         size = 0;
1648         for_each_populated_zone(zone) {
1649                 size += snapshot_additional_pages(zone);
1650                 if (is_highmem(zone))
1651                         highmem += zone_page_state(zone, NR_FREE_PAGES);
1652                 else
1653                         count += zone_page_state(zone, NR_FREE_PAGES);
1654         }
1655         avail_normal = count;
1656         count += highmem;
1657         count -= totalreserve_pages;
1658
1659         /* Add number of pages required for page keys (s390 only). */
1660         size += page_key_additional_pages(saveable);
1661
1662         /* Compute the maximum number of saveable pages to leave in memory. */
1663         max_size = (count - (size + PAGES_FOR_IO)) / 2
1664                         - 2 * DIV_ROUND_UP(reserved_size, PAGE_SIZE);
1665         /* Compute the desired number of image pages specified by image_size. */
1666         size = DIV_ROUND_UP(image_size, PAGE_SIZE);
1667         if (size > max_size)
1668                 size = max_size;
1669         /*
1670          * If the desired number of image pages is at least as large as the
1671          * current number of saveable pages in memory, allocate page frames for
1672          * the image and we're done.
1673          */
1674         if (size >= saveable) {
1675                 pages = preallocate_image_highmem(save_highmem);
1676                 pages += preallocate_image_memory(saveable - pages, avail_normal);
1677                 goto out;
1678         }
1679
1680         /* Estimate the minimum size of the image. */
1681         pages = minimum_image_size(saveable);
1682         /*
1683          * To avoid excessive pressure on the normal zone, leave room in it to
1684          * accommodate an image of the minimum size (unless it's already too
1685          * small, in which case don't preallocate pages from it at all).
1686          */
1687         if (avail_normal > pages)
1688                 avail_normal -= pages;
1689         else
1690                 avail_normal = 0;
1691         if (size < pages)
1692                 size = min_t(unsigned long, pages, max_size);
1693
1694         /*
1695          * Let the memory management subsystem know that we're going to need a
1696          * large number of page frames to allocate and make it free some memory.
1697          * NOTE: If this is not done, performance will be hurt badly in some
1698          * test cases.
1699          */
1700         shrink_all_memory(saveable - size);
1701
1702         /*
1703          * The number of saveable pages in memory was too high, so apply some
1704          * pressure to decrease it.  First, make room for the largest possible
1705          * image and fail if that doesn't work.  Next, try to decrease the size
1706          * of the image as much as indicated by 'size' using allocations from
1707          * highmem and non-highmem zones separately.
1708          */
1709         pages_highmem = preallocate_image_highmem(highmem / 2);
1710         alloc = count - max_size;
1711         if (alloc > pages_highmem)
1712                 alloc -= pages_highmem;
1713         else
1714                 alloc = 0;
1715         pages = preallocate_image_memory(alloc, avail_normal);
1716         if (pages < alloc) {
1717                 /* We have exhausted non-highmem pages, try highmem. */
1718                 alloc -= pages;
1719                 pages += pages_highmem;
1720                 pages_highmem = preallocate_image_highmem(alloc);
1721                 if (pages_highmem < alloc)
1722                         goto err_out;
1723                 pages += pages_highmem;
1724                 /*
1725                  * size is the desired number of saveable pages to leave in
1726                  * memory, so try to preallocate (all memory - size) pages.
1727                  */
1728                 alloc = (count - pages) - size;
1729                 pages += preallocate_image_highmem(alloc);
1730         } else {
1731                 /*
1732                  * There are approximately max_size saveable pages at this point
1733                  * and we want to reduce this number down to size.
1734                  */
1735                 alloc = max_size - size;
1736                 size = preallocate_highmem_fraction(alloc, highmem, count);
1737                 pages_highmem += size;
1738                 alloc -= size;
1739                 size = preallocate_image_memory(alloc, avail_normal);
1740                 pages_highmem += preallocate_image_highmem(alloc - size);
1741                 pages += pages_highmem + size;
1742         }
1743
1744         /*
1745          * We only need as many page frames for the image as there are saveable
1746          * pages in memory, but we have allocated more.  Release the excessive
1747          * ones now.
1748          */
1749         pages -= free_unnecessary_pages();
1750
1751  out:
1752         stop = ktime_get();
1753         printk(KERN_CONT "done (allocated %lu pages)\n", pages);
1754         swsusp_show_speed(start, stop, pages, "Allocated");
1755
1756         return 0;
1757
1758  err_out:
1759         printk(KERN_CONT "\n");
1760         swsusp_free();
1761         return -ENOMEM;
1762 }
1763
1764 #ifdef CONFIG_HIGHMEM
1765 /**
1766   *     count_pages_for_highmem - compute the number of non-highmem pages
1767   *     that will be necessary for creating copies of highmem pages.
1768   */
1769
1770 static unsigned int count_pages_for_highmem(unsigned int nr_highmem)
1771 {
1772         unsigned int free_highmem = count_free_highmem_pages() + alloc_highmem;
1773
1774         if (free_highmem >= nr_highmem)
1775                 nr_highmem = 0;
1776         else
1777                 nr_highmem -= free_highmem;
1778
1779         return nr_highmem;
1780 }
1781 #else
1782 static unsigned int count_pages_for_highmem(unsigned int nr_highmem) { return 0; }
1783 #endif /* CONFIG_HIGHMEM */
1784
1785 /**
1786  *      enough_free_mem - Make sure we have enough free memory for the
1787  *      snapshot image.
1788  */
1789
1790 static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
1791 {
1792         struct zone *zone;
1793         unsigned int free = alloc_normal;
1794
1795         for_each_populated_zone(zone)
1796                 if (!is_highmem(zone))
1797                         free += zone_page_state(zone, NR_FREE_PAGES);
1798
1799         nr_pages += count_pages_for_highmem(nr_highmem);
1800         pr_debug("PM: Normal pages needed: %u + %u, available pages: %u\n",
1801                 nr_pages, PAGES_FOR_IO, free);
1802
1803         return free > nr_pages + PAGES_FOR_IO;
1804 }
1805
1806 #ifdef CONFIG_HIGHMEM
1807 /**
1808  *      get_highmem_buffer - if there are some highmem pages in the suspend
1809  *      image, we may need the buffer to copy them and/or load their data.
1810  */
1811
1812 static inline int get_highmem_buffer(int safe_needed)
1813 {
1814         buffer = get_image_page(GFP_ATOMIC | __GFP_COLD, safe_needed);
1815         return buffer ? 0 : -ENOMEM;
1816 }
1817
1818 /**
1819  *      alloc_highmem_image_pages - allocate some highmem pages for the image.
1820  *      Try to allocate as many pages as needed, but if the number of free
1821  *      highmem pages is lesser than that, allocate them all.
1822  */
1823
1824 static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
1825                                                unsigned int nr_highmem)
1826 {
1827         unsigned int to_alloc = count_free_highmem_pages();
1828
1829         if (to_alloc > nr_highmem)
1830                 to_alloc = nr_highmem;
1831
1832         nr_highmem -= to_alloc;
1833         while (to_alloc-- > 0) {
1834                 struct page *page;
1835
1836                 page = alloc_image_page(__GFP_HIGHMEM|__GFP_KSWAPD_RECLAIM);
1837                 memory_bm_set_bit(bm, page_to_pfn(page));
1838         }
1839         return nr_highmem;
1840 }
1841 #else
1842 static inline int get_highmem_buffer(int safe_needed) { return 0; }
1843
1844 static inline unsigned int alloc_highmem_pages(struct memory_bitmap *bm,
1845                                                unsigned int n) { return 0; }
1846 #endif /* CONFIG_HIGHMEM */
1847
1848 /**
1849  *      swsusp_alloc - allocate memory for the suspend image
1850  *
1851  *      We first try to allocate as many highmem pages as there are
1852  *      saveable highmem pages in the system.  If that fails, we allocate
1853  *      non-highmem pages for the copies of the remaining highmem ones.
1854  *
1855  *      In this approach it is likely that the copies of highmem pages will
1856  *      also be located in the high memory, because of the way in which
1857  *      copy_data_pages() works.
1858  */
1859
1860 static int swsusp_alloc(struct memory_bitmap *orig_bm,
1861                         struct memory_bitmap *copy_bm,
1862                         unsigned int nr_pages, unsigned int nr_highmem)
1863 {
1864         if (nr_highmem > 0) {
1865                 if (get_highmem_buffer(PG_ANY))
1866                         goto err_out;
1867                 if (nr_highmem > alloc_highmem) {
1868                         nr_highmem -= alloc_highmem;
1869                         nr_pages += alloc_highmem_pages(copy_bm, nr_highmem);
1870                 }
1871         }
1872         if (nr_pages > alloc_normal) {
1873                 nr_pages -= alloc_normal;
1874                 while (nr_pages-- > 0) {
1875                         struct page *page;
1876
1877                         page = alloc_image_page(GFP_ATOMIC | __GFP_COLD);
1878                         if (!page)
1879                                 goto err_out;
1880                         memory_bm_set_bit(copy_bm, page_to_pfn(page));
1881                 }
1882         }
1883
1884         return 0;
1885
1886  err_out:
1887         swsusp_free();
1888         return -ENOMEM;
1889 }
1890
1891 asmlinkage __visible int swsusp_save(void)
1892 {
1893         unsigned int nr_pages, nr_highmem;
1894
1895         printk(KERN_INFO "PM: Creating hibernation image:\n");
1896
1897         drain_local_pages(NULL);
1898         nr_pages = count_data_pages();
1899         nr_highmem = count_highmem_pages();
1900         printk(KERN_INFO "PM: Need to copy %u pages\n", nr_pages + nr_highmem);
1901
1902         if (!enough_free_mem(nr_pages, nr_highmem)) {
1903                 printk(KERN_ERR "PM: Not enough free memory\n");
1904                 return -ENOMEM;
1905         }
1906
1907         if (swsusp_alloc(&orig_bm, &copy_bm, nr_pages, nr_highmem)) {
1908                 printk(KERN_ERR "PM: Memory allocation failed\n");
1909                 return -ENOMEM;
1910         }
1911
1912         /* During allocating of suspend pagedir, new cold pages may appear.
1913          * Kill them.
1914          */
1915         drain_local_pages(NULL);
1916         copy_data_pages(&copy_bm, &orig_bm);
1917
1918         /*
1919          * End of critical section. From now on, we can write to memory,
1920          * but we should not touch disk. This specially means we must _not_
1921          * touch swap space! Except we must write out our image of course.
1922          */
1923
1924         nr_pages += nr_highmem;
1925         nr_copy_pages = nr_pages;
1926         nr_meta_pages = DIV_ROUND_UP(nr_pages * sizeof(long), PAGE_SIZE);
1927
1928         printk(KERN_INFO "PM: Hibernation image created (%d pages copied)\n",
1929                 nr_pages);
1930
1931         return 0;
1932 }
1933
1934 #ifndef CONFIG_ARCH_HIBERNATION_HEADER
1935 static int init_header_complete(struct swsusp_info *info)
1936 {
1937         memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname));
1938         info->version_code = LINUX_VERSION_CODE;
1939         return 0;
1940 }
1941
1942 static char *check_image_kernel(struct swsusp_info *info)
1943 {
1944         if (info->version_code != LINUX_VERSION_CODE)
1945                 return "kernel version";
1946         if (strcmp(info->uts.sysname,init_utsname()->sysname))
1947                 return "system type";
1948         if (strcmp(info->uts.release,init_utsname()->release))
1949                 return "kernel release";
1950         if (strcmp(info->uts.version,init_utsname()->version))
1951                 return "version";
1952         if (strcmp(info->uts.machine,init_utsname()->machine))
1953                 return "machine";
1954         return NULL;
1955 }
1956 #endif /* CONFIG_ARCH_HIBERNATION_HEADER */
1957
1958 unsigned long snapshot_get_image_size(void)
1959 {
1960         return nr_copy_pages + nr_meta_pages + 1;
1961 }
1962
1963 static int init_header(struct swsusp_info *info)
1964 {
1965         memset(info, 0, sizeof(struct swsusp_info));
1966         info->num_physpages = get_num_physpages();
1967         info->image_pages = nr_copy_pages;
1968         info->pages = snapshot_get_image_size();
1969         info->size = info->pages;
1970         info->size <<= PAGE_SHIFT;
1971         return init_header_complete(info);
1972 }
1973
1974 /**
1975  *      pack_pfns - pfns corresponding to the set bits found in the bitmap @bm
1976  *      are stored in the array @buf[] (1 page at a time)
1977  */
1978
1979 static inline void pack_pfns(unsigned long *buf, struct memory_bitmap *bm)
1980 {
1981         int j;
1982
1983         for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
1984                 buf[j] = memory_bm_next_pfn(bm);
1985                 if (unlikely(buf[j] == BM_END_OF_MAP))
1986                         break;
1987                 /* Save page key for data page (s390 only). */
1988                 page_key_read(buf + j);
1989         }
1990 }
1991
1992 /**
1993  *      snapshot_read_next - used for reading the system memory snapshot.
1994  *
1995  *      On the first call to it @handle should point to a zeroed
1996  *      snapshot_handle structure.  The structure gets updated and a pointer
1997  *      to it should be passed to this function every next time.
1998  *
1999  *      On success the function returns a positive number.  Then, the caller
2000  *      is allowed to read up to the returned number of bytes from the memory
2001  *      location computed by the data_of() macro.
2002  *
2003  *      The function returns 0 to indicate the end of data stream condition,
2004  *      and a negative number is returned on error.  In such cases the
2005  *      structure pointed to by @handle is not updated and should not be used
2006  *      any more.
2007  */
2008
2009 int snapshot_read_next(struct snapshot_handle *handle)
2010 {
2011         if (handle->cur > nr_meta_pages + nr_copy_pages)
2012                 return 0;
2013
2014         if (!buffer) {
2015                 /* This makes the buffer be freed by swsusp_free() */
2016                 buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2017                 if (!buffer)
2018                         return -ENOMEM;
2019         }
2020         if (!handle->cur) {
2021                 int error;
2022
2023                 error = init_header((struct swsusp_info *)buffer);
2024                 if (error)
2025                         return error;
2026                 handle->buffer = buffer;
2027                 memory_bm_position_reset(&orig_bm);
2028                 memory_bm_position_reset(&copy_bm);
2029         } else if (handle->cur <= nr_meta_pages) {
2030                 clear_page(buffer);
2031                 pack_pfns(buffer, &orig_bm);
2032         } else {
2033                 struct page *page;
2034
2035                 page = pfn_to_page(memory_bm_next_pfn(&copy_bm));
2036                 if (PageHighMem(page)) {
2037                         /* Highmem pages are copied to the buffer,
2038                          * because we can't return with a kmapped
2039                          * highmem page (we may not be called again).
2040                          */
2041                         void *kaddr;
2042
2043                         kaddr = kmap_atomic(page);
2044                         copy_page(buffer, kaddr);
2045                         kunmap_atomic(kaddr);
2046                         handle->buffer = buffer;
2047                 } else {
2048                         handle->buffer = page_address(page);
2049                 }
2050         }
2051         handle->cur++;
2052         return PAGE_SIZE;
2053 }
2054
2055 static void duplicate_memory_bitmap(struct memory_bitmap *dst,
2056                                     struct memory_bitmap *src)
2057 {
2058         unsigned long pfn;
2059
2060         memory_bm_position_reset(src);
2061         pfn = memory_bm_next_pfn(src);
2062         while (pfn != BM_END_OF_MAP) {
2063                 memory_bm_set_bit(dst, pfn);
2064                 pfn = memory_bm_next_pfn(src);
2065         }
2066 }
2067
2068 /**
2069  *      mark_unsafe_pages - mark the pages that cannot be used for storing
2070  *      the image during resume, because they conflict with the pages that
2071  *      had been used before suspend
2072  */
2073
2074 static void mark_unsafe_pages(struct memory_bitmap *bm)
2075 {
2076         unsigned long pfn;
2077
2078         /* Clear the "free"/"unsafe" bit for all PFNs */
2079         memory_bm_position_reset(free_pages_map);
2080         pfn = memory_bm_next_pfn(free_pages_map);
2081         while (pfn != BM_END_OF_MAP) {
2082                 memory_bm_clear_current(free_pages_map);
2083                 pfn = memory_bm_next_pfn(free_pages_map);
2084         }
2085
2086         /* Mark pages that correspond to the "original" PFNs as "unsafe" */
2087         duplicate_memory_bitmap(free_pages_map, bm);
2088
2089         allocated_unsafe_pages = 0;
2090 }
2091
2092 static int check_header(struct swsusp_info *info)
2093 {
2094         char *reason;
2095
2096         reason = check_image_kernel(info);
2097         if (!reason && info->num_physpages != get_num_physpages())
2098                 reason = "memory size";
2099         if (reason) {
2100                 printk(KERN_ERR "PM: Image mismatch: %s\n", reason);
2101                 return -EPERM;
2102         }
2103         return 0;
2104 }
2105
2106 /**
2107  *      load header - check the image header and copy data from it
2108  */
2109
2110 static int load_header(struct swsusp_info *info)
2111 {
2112         int error;
2113
2114         restore_pblist = NULL;
2115         error = check_header(info);
2116         if (!error) {
2117                 nr_copy_pages = info->image_pages;
2118                 nr_meta_pages = info->pages - info->image_pages - 1;
2119         }
2120         return error;
2121 }
2122
2123 /**
2124  *      unpack_orig_pfns - for each element of @buf[] (1 page at a time) set
2125  *      the corresponding bit in the memory bitmap @bm
2126  */
2127 static int unpack_orig_pfns(unsigned long *buf, struct memory_bitmap *bm)
2128 {
2129         int j;
2130
2131         for (j = 0; j < PAGE_SIZE / sizeof(long); j++) {
2132                 if (unlikely(buf[j] == BM_END_OF_MAP))
2133                         break;
2134
2135                 /* Extract and buffer page key for data page (s390 only). */
2136                 page_key_memorize(buf + j);
2137
2138                 if (pfn_valid(buf[j]) && memory_bm_pfn_present(bm, buf[j]))
2139                         memory_bm_set_bit(bm, buf[j]);
2140                 else
2141                         return -EFAULT;
2142         }
2143
2144         return 0;
2145 }
2146
2147 #ifdef CONFIG_HIGHMEM
2148 /* struct highmem_pbe is used for creating the list of highmem pages that
2149  * should be restored atomically during the resume from disk, because the page
2150  * frames they have occupied before the suspend are in use.
2151  */
2152 struct highmem_pbe {
2153         struct page *copy_page; /* data is here now */
2154         struct page *orig_page; /* data was here before the suspend */
2155         struct highmem_pbe *next;
2156 };
2157
2158 /* List of highmem PBEs needed for restoring the highmem pages that were
2159  * allocated before the suspend and included in the suspend image, but have
2160  * also been allocated by the "resume" kernel, so their contents cannot be
2161  * written directly to their "original" page frames.
2162  */
2163 static struct highmem_pbe *highmem_pblist;
2164
2165 /**
2166  *      count_highmem_image_pages - compute the number of highmem pages in the
2167  *      suspend image.  The bits in the memory bitmap @bm that correspond to the
2168  *      image pages are assumed to be set.
2169  */
2170
2171 static unsigned int count_highmem_image_pages(struct memory_bitmap *bm)
2172 {
2173         unsigned long pfn;
2174         unsigned int cnt = 0;
2175
2176         memory_bm_position_reset(bm);
2177         pfn = memory_bm_next_pfn(bm);
2178         while (pfn != BM_END_OF_MAP) {
2179                 if (PageHighMem(pfn_to_page(pfn)))
2180                         cnt++;
2181
2182                 pfn = memory_bm_next_pfn(bm);
2183         }
2184         return cnt;
2185 }
2186
2187 /**
2188  *      prepare_highmem_image - try to allocate as many highmem pages as
2189  *      there are highmem image pages (@nr_highmem_p points to the variable
2190  *      containing the number of highmem image pages).  The pages that are
2191  *      "safe" (ie. will not be overwritten when the suspend image is
2192  *      restored) have the corresponding bits set in @bm (it must be
2193  *      unitialized).
2194  *
2195  *      NOTE: This function should not be called if there are no highmem
2196  *      image pages.
2197  */
2198
2199 static unsigned int safe_highmem_pages;
2200
2201 static struct memory_bitmap *safe_highmem_bm;
2202
2203 static int prepare_highmem_image(struct memory_bitmap *bm,
2204                                  unsigned int *nr_highmem_p)
2205 {
2206         unsigned int to_alloc;
2207
2208         if (memory_bm_create(bm, GFP_ATOMIC, PG_SAFE))
2209                 return -ENOMEM;
2210
2211         if (get_highmem_buffer(PG_SAFE))
2212                 return -ENOMEM;
2213
2214         to_alloc = count_free_highmem_pages();
2215         if (to_alloc > *nr_highmem_p)
2216                 to_alloc = *nr_highmem_p;
2217         else
2218                 *nr_highmem_p = to_alloc;
2219
2220         safe_highmem_pages = 0;
2221         while (to_alloc-- > 0) {
2222                 struct page *page;
2223
2224                 page = alloc_page(__GFP_HIGHMEM);
2225                 if (!swsusp_page_is_free(page)) {
2226                         /* The page is "safe", set its bit the bitmap */
2227                         memory_bm_set_bit(bm, page_to_pfn(page));
2228                         safe_highmem_pages++;
2229                 }
2230                 /* Mark the page as allocated */
2231                 swsusp_set_page_forbidden(page);
2232                 swsusp_set_page_free(page);
2233         }
2234         memory_bm_position_reset(bm);
2235         safe_highmem_bm = bm;
2236         return 0;
2237 }
2238
2239 /**
2240  *      get_highmem_page_buffer - for given highmem image page find the buffer
2241  *      that suspend_write_next() should set for its caller to write to.
2242  *
2243  *      If the page is to be saved to its "original" page frame or a copy of
2244  *      the page is to be made in the highmem, @buffer is returned.  Otherwise,
2245  *      the copy of the page is to be made in normal memory, so the address of
2246  *      the copy is returned.
2247  *
2248  *      If @buffer is returned, the caller of suspend_write_next() will write
2249  *      the page's contents to @buffer, so they will have to be copied to the
2250  *      right location on the next call to suspend_write_next() and it is done
2251  *      with the help of copy_last_highmem_page().  For this purpose, if
2252  *      @buffer is returned, @last_highmem page is set to the page to which
2253  *      the data will have to be copied from @buffer.
2254  */
2255
2256 static struct page *last_highmem_page;
2257
2258 static void *get_highmem_page_buffer(struct page *page,
2259                                      struct chain_allocator *ca)
2260 {
2261         struct highmem_pbe *pbe;
2262         void *kaddr;
2263
2264         if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page)) {
2265                 /* We have allocated the "original" page frame and we can
2266                  * use it directly to store the loaded page.
2267                  */
2268                 last_highmem_page = page;
2269                 return buffer;
2270         }
2271         /* The "original" page frame has not been allocated and we have to
2272          * use a "safe" page frame to store the loaded page.
2273          */
2274         pbe = chain_alloc(ca, sizeof(struct highmem_pbe));
2275         if (!pbe) {
2276                 swsusp_free();
2277                 return ERR_PTR(-ENOMEM);
2278         }
2279         pbe->orig_page = page;
2280         if (safe_highmem_pages > 0) {
2281                 struct page *tmp;
2282
2283                 /* Copy of the page will be stored in high memory */
2284                 kaddr = buffer;
2285                 tmp = pfn_to_page(memory_bm_next_pfn(safe_highmem_bm));
2286                 safe_highmem_pages--;
2287                 last_highmem_page = tmp;
2288                 pbe->copy_page = tmp;
2289         } else {
2290                 /* Copy of the page will be stored in normal memory */
2291                 kaddr = safe_pages_list;
2292                 safe_pages_list = safe_pages_list->next;
2293                 pbe->copy_page = virt_to_page(kaddr);
2294         }
2295         pbe->next = highmem_pblist;
2296         highmem_pblist = pbe;
2297         return kaddr;
2298 }
2299
2300 /**
2301  *      copy_last_highmem_page - copy the contents of a highmem image from
2302  *      @buffer, where the caller of snapshot_write_next() has place them,
2303  *      to the right location represented by @last_highmem_page .
2304  */
2305
2306 static void copy_last_highmem_page(void)
2307 {
2308         if (last_highmem_page) {
2309                 void *dst;
2310
2311                 dst = kmap_atomic(last_highmem_page);
2312                 copy_page(dst, buffer);
2313                 kunmap_atomic(dst);
2314                 last_highmem_page = NULL;
2315         }
2316 }
2317
2318 static inline int last_highmem_page_copied(void)
2319 {
2320         return !last_highmem_page;
2321 }
2322
2323 static inline void free_highmem_data(void)
2324 {
2325         if (safe_highmem_bm)
2326                 memory_bm_free(safe_highmem_bm, PG_UNSAFE_CLEAR);
2327
2328         if (buffer)
2329                 free_image_page(buffer, PG_UNSAFE_CLEAR);
2330 }
2331 #else
2332 static unsigned int count_highmem_image_pages(struct memory_bitmap *bm) { return 0; }
2333
2334 static inline int prepare_highmem_image(struct memory_bitmap *bm,
2335                                         unsigned int *nr_highmem_p) { return 0; }
2336
2337 static inline void *get_highmem_page_buffer(struct page *page,
2338                                             struct chain_allocator *ca)
2339 {
2340         return ERR_PTR(-EINVAL);
2341 }
2342
2343 static inline void copy_last_highmem_page(void) {}
2344 static inline int last_highmem_page_copied(void) { return 1; }
2345 static inline void free_highmem_data(void) {}
2346 #endif /* CONFIG_HIGHMEM */
2347
2348 /**
2349  *      prepare_image - use the memory bitmap @bm to mark the pages that will
2350  *      be overwritten in the process of restoring the system memory state
2351  *      from the suspend image ("unsafe" pages) and allocate memory for the
2352  *      image.
2353  *
2354  *      The idea is to allocate a new memory bitmap first and then allocate
2355  *      as many pages as needed for the image data, but not to assign these
2356  *      pages to specific tasks initially.  Instead, we just mark them as
2357  *      allocated and create a lists of "safe" pages that will be used
2358  *      later.  On systems with high memory a list of "safe" highmem pages is
2359  *      also created.
2360  */
2361
2362 #define PBES_PER_LINKED_PAGE    (LINKED_PAGE_DATA_SIZE / sizeof(struct pbe))
2363
2364 static int prepare_image(struct memory_bitmap *new_bm, struct memory_bitmap *bm)
2365 {
2366         unsigned int nr_pages, nr_highmem;
2367         struct linked_page *lp;
2368         int error;
2369
2370         /* If there is no highmem, the buffer will not be necessary */
2371         free_image_page(buffer, PG_UNSAFE_CLEAR);
2372         buffer = NULL;
2373
2374         nr_highmem = count_highmem_image_pages(bm);
2375         mark_unsafe_pages(bm);
2376
2377         error = memory_bm_create(new_bm, GFP_ATOMIC, PG_SAFE);
2378         if (error)
2379                 goto Free;
2380
2381         duplicate_memory_bitmap(new_bm, bm);
2382         memory_bm_free(bm, PG_UNSAFE_KEEP);
2383         if (nr_highmem > 0) {
2384                 error = prepare_highmem_image(bm, &nr_highmem);
2385                 if (error)
2386                         goto Free;
2387         }
2388         /* Reserve some safe pages for potential later use.
2389          *
2390          * NOTE: This way we make sure there will be enough safe pages for the
2391          * chain_alloc() in get_buffer().  It is a bit wasteful, but
2392          * nr_copy_pages cannot be greater than 50% of the memory anyway.
2393          *
2394          * nr_copy_pages cannot be less than allocated_unsafe_pages too.
2395          */
2396         nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2397         nr_pages = DIV_ROUND_UP(nr_pages, PBES_PER_LINKED_PAGE);
2398         while (nr_pages > 0) {
2399                 lp = get_image_page(GFP_ATOMIC, PG_SAFE);
2400                 if (!lp) {
2401                         error = -ENOMEM;
2402                         goto Free;
2403                 }
2404                 lp->next = safe_pages_list;
2405                 safe_pages_list = lp;
2406                 nr_pages--;
2407         }
2408         /* Preallocate memory for the image */
2409         nr_pages = nr_copy_pages - nr_highmem - allocated_unsafe_pages;
2410         while (nr_pages > 0) {
2411                 lp = (struct linked_page *)get_zeroed_page(GFP_ATOMIC);
2412                 if (!lp) {
2413                         error = -ENOMEM;
2414                         goto Free;
2415                 }
2416                 if (!swsusp_page_is_free(virt_to_page(lp))) {
2417                         /* The page is "safe", add it to the list */
2418                         lp->next = safe_pages_list;
2419                         safe_pages_list = lp;
2420                 }
2421                 /* Mark the page as allocated */
2422                 swsusp_set_page_forbidden(virt_to_page(lp));
2423                 swsusp_set_page_free(virt_to_page(lp));
2424                 nr_pages--;
2425         }
2426         return 0;
2427
2428  Free:
2429         swsusp_free();
2430         return error;
2431 }
2432
2433 /**
2434  *      get_buffer - compute the address that snapshot_write_next() should
2435  *      set for its caller to write to.
2436  */
2437
2438 static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
2439 {
2440         struct pbe *pbe;
2441         struct page *page;
2442         unsigned long pfn = memory_bm_next_pfn(bm);
2443
2444         if (pfn == BM_END_OF_MAP)
2445                 return ERR_PTR(-EFAULT);
2446
2447         page = pfn_to_page(pfn);
2448         if (PageHighMem(page))
2449                 return get_highmem_page_buffer(page, ca);
2450
2451         if (swsusp_page_is_forbidden(page) && swsusp_page_is_free(page))
2452                 /* We have allocated the "original" page frame and we can
2453                  * use it directly to store the loaded page.
2454                  */
2455                 return page_address(page);
2456
2457         /* The "original" page frame has not been allocated and we have to
2458          * use a "safe" page frame to store the loaded page.
2459          */
2460         pbe = chain_alloc(ca, sizeof(struct pbe));
2461         if (!pbe) {
2462                 swsusp_free();
2463                 return ERR_PTR(-ENOMEM);
2464         }
2465         pbe->orig_address = page_address(page);
2466         pbe->address = safe_pages_list;
2467         safe_pages_list = safe_pages_list->next;
2468         pbe->next = restore_pblist;
2469         restore_pblist = pbe;
2470         return pbe->address;
2471 }
2472
2473 /**
2474  *      snapshot_write_next - used for writing the system memory snapshot.
2475  *
2476  *      On the first call to it @handle should point to a zeroed
2477  *      snapshot_handle structure.  The structure gets updated and a pointer
2478  *      to it should be passed to this function every next time.
2479  *
2480  *      On success the function returns a positive number.  Then, the caller
2481  *      is allowed to write up to the returned number of bytes to the memory
2482  *      location computed by the data_of() macro.
2483  *
2484  *      The function returns 0 to indicate the "end of file" condition,
2485  *      and a negative number is returned on error.  In such cases the
2486  *      structure pointed to by @handle is not updated and should not be used
2487  *      any more.
2488  */
2489
2490 int snapshot_write_next(struct snapshot_handle *handle)
2491 {
2492         static struct chain_allocator ca;
2493         int error = 0;
2494
2495         /* Check if we have already loaded the entire image */
2496         if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages)
2497                 return 0;
2498
2499         handle->sync_read = 1;
2500
2501         if (!handle->cur) {
2502                 if (!buffer)
2503                         /* This makes the buffer be freed by swsusp_free() */
2504                         buffer = get_image_page(GFP_ATOMIC, PG_ANY);
2505
2506                 if (!buffer)
2507                         return -ENOMEM;
2508
2509                 handle->buffer = buffer;
2510         } else if (handle->cur == 1) {
2511                 error = load_header(buffer);
2512                 if (error)
2513                         return error;
2514
2515                 safe_pages_list = NULL;
2516
2517                 error = memory_bm_create(&copy_bm, GFP_ATOMIC, PG_ANY);
2518                 if (error)
2519                         return error;
2520
2521                 /* Allocate buffer for page keys. */
2522                 error = page_key_alloc(nr_copy_pages);
2523                 if (error)
2524                         return error;
2525
2526         } else if (handle->cur <= nr_meta_pages + 1) {
2527                 error = unpack_orig_pfns(buffer, &copy_bm);
2528                 if (error)
2529                         return error;
2530
2531                 if (handle->cur == nr_meta_pages + 1) {
2532                         error = prepare_image(&orig_bm, &copy_bm);
2533                         if (error)
2534                                 return error;
2535
2536                         chain_init(&ca, GFP_ATOMIC, PG_SAFE);
2537                         memory_bm_position_reset(&orig_bm);
2538                         restore_pblist = NULL;
2539                         handle->buffer = get_buffer(&orig_bm, &ca);
2540                         handle->sync_read = 0;
2541                         if (IS_ERR(handle->buffer))
2542                                 return PTR_ERR(handle->buffer);
2543                 }
2544         } else {
2545                 copy_last_highmem_page();
2546                 /* Restore page key for data page (s390 only). */
2547                 page_key_write(handle->buffer);
2548                 handle->buffer = get_buffer(&orig_bm, &ca);
2549                 if (IS_ERR(handle->buffer))
2550                         return PTR_ERR(handle->buffer);
2551                 if (handle->buffer != buffer)
2552                         handle->sync_read = 0;
2553         }
2554         handle->cur++;
2555         return PAGE_SIZE;
2556 }
2557
2558 /**
2559  *      snapshot_write_finalize - must be called after the last call to
2560  *      snapshot_write_next() in case the last page in the image happens
2561  *      to be a highmem page and its contents should be stored in the
2562  *      highmem.  Additionally, it releases the memory that will not be
2563  *      used any more.
2564  */
2565
2566 void snapshot_write_finalize(struct snapshot_handle *handle)
2567 {
2568         copy_last_highmem_page();
2569         /* Restore page key for data page (s390 only). */
2570         page_key_write(handle->buffer);
2571         page_key_free();
2572         /* Do that only if we have loaded the image entirely */
2573         if (handle->cur > 1 && handle->cur > nr_meta_pages + nr_copy_pages) {
2574                 memory_bm_recycle(&orig_bm);
2575                 free_highmem_data();
2576         }
2577 }
2578
2579 int snapshot_image_loaded(struct snapshot_handle *handle)
2580 {
2581         return !(!nr_copy_pages || !last_highmem_page_copied() ||
2582                         handle->cur <= nr_meta_pages + nr_copy_pages);
2583 }
2584
2585 #ifdef CONFIG_HIGHMEM
2586 /* Assumes that @buf is ready and points to a "safe" page */
2587 static inline void swap_two_pages_data(struct page *p1, struct page *p2,
2588                                        void *buf)
2589 {
2590         void *kaddr1, *kaddr2;
2591
2592         kaddr1 = kmap_atomic(p1);
2593         kaddr2 = kmap_atomic(p2);
2594         copy_page(buf, kaddr1);
2595         copy_page(kaddr1, kaddr2);
2596         copy_page(kaddr2, buf);
2597         kunmap_atomic(kaddr2);
2598         kunmap_atomic(kaddr1);
2599 }
2600
2601 /**
2602  *      restore_highmem - for each highmem page that was allocated before
2603  *      the suspend and included in the suspend image, and also has been
2604  *      allocated by the "resume" kernel swap its current (ie. "before
2605  *      resume") contents with the previous (ie. "before suspend") one.
2606  *
2607  *      If the resume eventually fails, we can call this function once
2608  *      again and restore the "before resume" highmem state.
2609  */
2610
2611 int restore_highmem(void)
2612 {
2613         struct highmem_pbe *pbe = highmem_pblist;
2614         void *buf;
2615
2616         if (!pbe)
2617                 return 0;
2618
2619         buf = get_image_page(GFP_ATOMIC, PG_SAFE);
2620         if (!buf)
2621                 return -ENOMEM;
2622
2623         while (pbe) {
2624                 swap_two_pages_data(pbe->copy_page, pbe->orig_page, buf);
2625                 pbe = pbe->next;
2626         }
2627         free_image_page(buf, PG_UNSAFE_CLEAR);
2628         return 0;
2629 }
2630 #endif /* CONFIG_HIGHMEM */