Merge tag 'scsi-misc' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi
[cascardo/linux.git] / kernel / trace / ring_buffer.c
1 /*
2  * Generic ring buffer
3  *
4  * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5  */
6 #include <linux/ftrace_event.h>
7 #include <linux/ring_buffer.h>
8 #include <linux/trace_clock.h>
9 #include <linux/trace_seq.h>
10 #include <linux/spinlock.h>
11 #include <linux/irq_work.h>
12 #include <linux/debugfs.h>
13 #include <linux/uaccess.h>
14 #include <linux/hardirq.h>
15 #include <linux/kthread.h>      /* for self test */
16 #include <linux/kmemcheck.h>
17 #include <linux/module.h>
18 #include <linux/percpu.h>
19 #include <linux/mutex.h>
20 #include <linux/delay.h>
21 #include <linux/slab.h>
22 #include <linux/init.h>
23 #include <linux/hash.h>
24 #include <linux/list.h>
25 #include <linux/cpu.h>
26 #include <linux/fs.h>
27
28 #include <asm/local.h>
29
30 static void update_pages_handler(struct work_struct *work);
31
32 /*
33  * The ring buffer header is special. We must manually up keep it.
34  */
35 int ring_buffer_print_entry_header(struct trace_seq *s)
36 {
37         int ret;
38
39         ret = trace_seq_puts(s, "# compressed entry header\n");
40         ret = trace_seq_puts(s, "\ttype_len    :    5 bits\n");
41         ret = trace_seq_puts(s, "\ttime_delta  :   27 bits\n");
42         ret = trace_seq_puts(s, "\tarray       :   32 bits\n");
43         ret = trace_seq_putc(s, '\n');
44         ret = trace_seq_printf(s, "\tpadding     : type == %d\n",
45                                RINGBUF_TYPE_PADDING);
46         ret = trace_seq_printf(s, "\ttime_extend : type == %d\n",
47                                RINGBUF_TYPE_TIME_EXTEND);
48         ret = trace_seq_printf(s, "\tdata max type_len  == %d\n",
49                                RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
50
51         return ret;
52 }
53
54 /*
55  * The ring buffer is made up of a list of pages. A separate list of pages is
56  * allocated for each CPU. A writer may only write to a buffer that is
57  * associated with the CPU it is currently executing on.  A reader may read
58  * from any per cpu buffer.
59  *
60  * The reader is special. For each per cpu buffer, the reader has its own
61  * reader page. When a reader has read the entire reader page, this reader
62  * page is swapped with another page in the ring buffer.
63  *
64  * Now, as long as the writer is off the reader page, the reader can do what
65  * ever it wants with that page. The writer will never write to that page
66  * again (as long as it is out of the ring buffer).
67  *
68  * Here's some silly ASCII art.
69  *
70  *   +------+
71  *   |reader|          RING BUFFER
72  *   |page  |
73  *   +------+        +---+   +---+   +---+
74  *                   |   |-->|   |-->|   |
75  *                   +---+   +---+   +---+
76  *                     ^               |
77  *                     |               |
78  *                     +---------------+
79  *
80  *
81  *   +------+
82  *   |reader|          RING BUFFER
83  *   |page  |------------------v
84  *   +------+        +---+   +---+   +---+
85  *                   |   |-->|   |-->|   |
86  *                   +---+   +---+   +---+
87  *                     ^               |
88  *                     |               |
89  *                     +---------------+
90  *
91  *
92  *   +------+
93  *   |reader|          RING BUFFER
94  *   |page  |------------------v
95  *   +------+        +---+   +---+   +---+
96  *      ^            |   |-->|   |-->|   |
97  *      |            +---+   +---+   +---+
98  *      |                              |
99  *      |                              |
100  *      +------------------------------+
101  *
102  *
103  *   +------+
104  *   |buffer|          RING BUFFER
105  *   |page  |------------------v
106  *   +------+        +---+   +---+   +---+
107  *      ^            |   |   |   |-->|   |
108  *      |   New      +---+   +---+   +---+
109  *      |  Reader------^               |
110  *      |   page                       |
111  *      +------------------------------+
112  *
113  *
114  * After we make this swap, the reader can hand this page off to the splice
115  * code and be done with it. It can even allocate a new page if it needs to
116  * and swap that into the ring buffer.
117  *
118  * We will be using cmpxchg soon to make all this lockless.
119  *
120  */
121
122 /*
123  * A fast way to enable or disable all ring buffers is to
124  * call tracing_on or tracing_off. Turning off the ring buffers
125  * prevents all ring buffers from being recorded to.
126  * Turning this switch on, makes it OK to write to the
127  * ring buffer, if the ring buffer is enabled itself.
128  *
129  * There's three layers that must be on in order to write
130  * to the ring buffer.
131  *
132  * 1) This global flag must be set.
133  * 2) The ring buffer must be enabled for recording.
134  * 3) The per cpu buffer must be enabled for recording.
135  *
136  * In case of an anomaly, this global flag has a bit set that
137  * will permantly disable all ring buffers.
138  */
139
140 /*
141  * Global flag to disable all recording to ring buffers
142  *  This has two bits: ON, DISABLED
143  *
144  *  ON   DISABLED
145  * ---- ----------
146  *   0      0        : ring buffers are off
147  *   1      0        : ring buffers are on
148  *   X      1        : ring buffers are permanently disabled
149  */
150
151 enum {
152         RB_BUFFERS_ON_BIT       = 0,
153         RB_BUFFERS_DISABLED_BIT = 1,
154 };
155
156 enum {
157         RB_BUFFERS_ON           = 1 << RB_BUFFERS_ON_BIT,
158         RB_BUFFERS_DISABLED     = 1 << RB_BUFFERS_DISABLED_BIT,
159 };
160
161 static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
162
163 /* Used for individual buffers (after the counter) */
164 #define RB_BUFFER_OFF           (1 << 20)
165
166 #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
167
168 /**
169  * tracing_off_permanent - permanently disable ring buffers
170  *
171  * This function, once called, will disable all ring buffers
172  * permanently.
173  */
174 void tracing_off_permanent(void)
175 {
176         set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
177 }
178
179 #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
180 #define RB_ALIGNMENT            4U
181 #define RB_MAX_SMALL_DATA       (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
182 #define RB_EVNT_MIN_SIZE        8U      /* two 32bit words */
183
184 #ifndef CONFIG_HAVE_64BIT_ALIGNED_ACCESS
185 # define RB_FORCE_8BYTE_ALIGNMENT       0
186 # define RB_ARCH_ALIGNMENT              RB_ALIGNMENT
187 #else
188 # define RB_FORCE_8BYTE_ALIGNMENT       1
189 # define RB_ARCH_ALIGNMENT              8U
190 #endif
191
192 #define RB_ALIGN_DATA           __aligned(RB_ARCH_ALIGNMENT)
193
194 /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
195 #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
196
197 enum {
198         RB_LEN_TIME_EXTEND = 8,
199         RB_LEN_TIME_STAMP = 16,
200 };
201
202 #define skip_time_extend(event) \
203         ((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND))
204
205 static inline int rb_null_event(struct ring_buffer_event *event)
206 {
207         return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
208 }
209
210 static void rb_event_set_padding(struct ring_buffer_event *event)
211 {
212         /* padding has a NULL time_delta */
213         event->type_len = RINGBUF_TYPE_PADDING;
214         event->time_delta = 0;
215 }
216
217 static unsigned
218 rb_event_data_length(struct ring_buffer_event *event)
219 {
220         unsigned length;
221
222         if (event->type_len)
223                 length = event->type_len * RB_ALIGNMENT;
224         else
225                 length = event->array[0];
226         return length + RB_EVNT_HDR_SIZE;
227 }
228
229 /*
230  * Return the length of the given event. Will return
231  * the length of the time extend if the event is a
232  * time extend.
233  */
234 static inline unsigned
235 rb_event_length(struct ring_buffer_event *event)
236 {
237         switch (event->type_len) {
238         case RINGBUF_TYPE_PADDING:
239                 if (rb_null_event(event))
240                         /* undefined */
241                         return -1;
242                 return  event->array[0] + RB_EVNT_HDR_SIZE;
243
244         case RINGBUF_TYPE_TIME_EXTEND:
245                 return RB_LEN_TIME_EXTEND;
246
247         case RINGBUF_TYPE_TIME_STAMP:
248                 return RB_LEN_TIME_STAMP;
249
250         case RINGBUF_TYPE_DATA:
251                 return rb_event_data_length(event);
252         default:
253                 BUG();
254         }
255         /* not hit */
256         return 0;
257 }
258
259 /*
260  * Return total length of time extend and data,
261  *   or just the event length for all other events.
262  */
263 static inline unsigned
264 rb_event_ts_length(struct ring_buffer_event *event)
265 {
266         unsigned len = 0;
267
268         if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
269                 /* time extends include the data event after it */
270                 len = RB_LEN_TIME_EXTEND;
271                 event = skip_time_extend(event);
272         }
273         return len + rb_event_length(event);
274 }
275
276 /**
277  * ring_buffer_event_length - return the length of the event
278  * @event: the event to get the length of
279  *
280  * Returns the size of the data load of a data event.
281  * If the event is something other than a data event, it
282  * returns the size of the event itself. With the exception
283  * of a TIME EXTEND, where it still returns the size of the
284  * data load of the data event after it.
285  */
286 unsigned ring_buffer_event_length(struct ring_buffer_event *event)
287 {
288         unsigned length;
289
290         if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
291                 event = skip_time_extend(event);
292
293         length = rb_event_length(event);
294         if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
295                 return length;
296         length -= RB_EVNT_HDR_SIZE;
297         if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
298                 length -= sizeof(event->array[0]);
299         return length;
300 }
301 EXPORT_SYMBOL_GPL(ring_buffer_event_length);
302
303 /* inline for ring buffer fast paths */
304 static void *
305 rb_event_data(struct ring_buffer_event *event)
306 {
307         if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
308                 event = skip_time_extend(event);
309         BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
310         /* If length is in len field, then array[0] has the data */
311         if (event->type_len)
312                 return (void *)&event->array[0];
313         /* Otherwise length is in array[0] and array[1] has the data */
314         return (void *)&event->array[1];
315 }
316
317 /**
318  * ring_buffer_event_data - return the data of the event
319  * @event: the event to get the data from
320  */
321 void *ring_buffer_event_data(struct ring_buffer_event *event)
322 {
323         return rb_event_data(event);
324 }
325 EXPORT_SYMBOL_GPL(ring_buffer_event_data);
326
327 #define for_each_buffer_cpu(buffer, cpu)                \
328         for_each_cpu(cpu, buffer->cpumask)
329
330 #define TS_SHIFT        27
331 #define TS_MASK         ((1ULL << TS_SHIFT) - 1)
332 #define TS_DELTA_TEST   (~TS_MASK)
333
334 /* Flag when events were overwritten */
335 #define RB_MISSED_EVENTS        (1 << 31)
336 /* Missed count stored at end */
337 #define RB_MISSED_STORED        (1 << 30)
338
339 struct buffer_data_page {
340         u64              time_stamp;    /* page time stamp */
341         local_t          commit;        /* write committed index */
342         unsigned char    data[] RB_ALIGN_DATA;  /* data of buffer page */
343 };
344
345 /*
346  * Note, the buffer_page list must be first. The buffer pages
347  * are allocated in cache lines, which means that each buffer
348  * page will be at the beginning of a cache line, and thus
349  * the least significant bits will be zero. We use this to
350  * add flags in the list struct pointers, to make the ring buffer
351  * lockless.
352  */
353 struct buffer_page {
354         struct list_head list;          /* list of buffer pages */
355         local_t          write;         /* index for next write */
356         unsigned         read;          /* index for next read */
357         local_t          entries;       /* entries on this page */
358         unsigned long    real_end;      /* real end of data */
359         struct buffer_data_page *page;  /* Actual data page */
360 };
361
362 /*
363  * The buffer page counters, write and entries, must be reset
364  * atomically when crossing page boundaries. To synchronize this
365  * update, two counters are inserted into the number. One is
366  * the actual counter for the write position or count on the page.
367  *
368  * The other is a counter of updaters. Before an update happens
369  * the update partition of the counter is incremented. This will
370  * allow the updater to update the counter atomically.
371  *
372  * The counter is 20 bits, and the state data is 12.
373  */
374 #define RB_WRITE_MASK           0xfffff
375 #define RB_WRITE_INTCNT         (1 << 20)
376
377 static void rb_init_page(struct buffer_data_page *bpage)
378 {
379         local_set(&bpage->commit, 0);
380 }
381
382 /**
383  * ring_buffer_page_len - the size of data on the page.
384  * @page: The page to read
385  *
386  * Returns the amount of data on the page, including buffer page header.
387  */
388 size_t ring_buffer_page_len(void *page)
389 {
390         return local_read(&((struct buffer_data_page *)page)->commit)
391                 + BUF_PAGE_HDR_SIZE;
392 }
393
394 /*
395  * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
396  * this issue out.
397  */
398 static void free_buffer_page(struct buffer_page *bpage)
399 {
400         free_page((unsigned long)bpage->page);
401         kfree(bpage);
402 }
403
404 /*
405  * We need to fit the time_stamp delta into 27 bits.
406  */
407 static inline int test_time_stamp(u64 delta)
408 {
409         if (delta & TS_DELTA_TEST)
410                 return 1;
411         return 0;
412 }
413
414 #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
415
416 /* Max payload is BUF_PAGE_SIZE - header (8bytes) */
417 #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
418
419 int ring_buffer_print_page_header(struct trace_seq *s)
420 {
421         struct buffer_data_page field;
422         int ret;
423
424         ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t"
425                                "offset:0;\tsize:%u;\tsigned:%u;\n",
426                                (unsigned int)sizeof(field.time_stamp),
427                                (unsigned int)is_signed_type(u64));
428
429         ret = trace_seq_printf(s, "\tfield: local_t commit;\t"
430                                "offset:%u;\tsize:%u;\tsigned:%u;\n",
431                                (unsigned int)offsetof(typeof(field), commit),
432                                (unsigned int)sizeof(field.commit),
433                                (unsigned int)is_signed_type(long));
434
435         ret = trace_seq_printf(s, "\tfield: int overwrite;\t"
436                                "offset:%u;\tsize:%u;\tsigned:%u;\n",
437                                (unsigned int)offsetof(typeof(field), commit),
438                                1,
439                                (unsigned int)is_signed_type(long));
440
441         ret = trace_seq_printf(s, "\tfield: char data;\t"
442                                "offset:%u;\tsize:%u;\tsigned:%u;\n",
443                                (unsigned int)offsetof(typeof(field), data),
444                                (unsigned int)BUF_PAGE_SIZE,
445                                (unsigned int)is_signed_type(char));
446
447         return ret;
448 }
449
450 struct rb_irq_work {
451         struct irq_work                 work;
452         wait_queue_head_t               waiters;
453         bool                            waiters_pending;
454 };
455
456 /*
457  * head_page == tail_page && head == tail then buffer is empty.
458  */
459 struct ring_buffer_per_cpu {
460         int                             cpu;
461         atomic_t                        record_disabled;
462         struct ring_buffer              *buffer;
463         raw_spinlock_t                  reader_lock;    /* serialize readers */
464         arch_spinlock_t                 lock;
465         struct lock_class_key           lock_key;
466         unsigned int                    nr_pages;
467         struct list_head                *pages;
468         struct buffer_page              *head_page;     /* read from head */
469         struct buffer_page              *tail_page;     /* write to tail */
470         struct buffer_page              *commit_page;   /* committed pages */
471         struct buffer_page              *reader_page;
472         unsigned long                   lost_events;
473         unsigned long                   last_overrun;
474         local_t                         entries_bytes;
475         local_t                         entries;
476         local_t                         overrun;
477         local_t                         commit_overrun;
478         local_t                         dropped_events;
479         local_t                         committing;
480         local_t                         commits;
481         unsigned long                   read;
482         unsigned long                   read_bytes;
483         u64                             write_stamp;
484         u64                             read_stamp;
485         /* ring buffer pages to update, > 0 to add, < 0 to remove */
486         int                             nr_pages_to_update;
487         struct list_head                new_pages; /* new pages to add */
488         struct work_struct              update_pages_work;
489         struct completion               update_done;
490
491         struct rb_irq_work              irq_work;
492 };
493
494 struct ring_buffer {
495         unsigned                        flags;
496         int                             cpus;
497         atomic_t                        record_disabled;
498         atomic_t                        resize_disabled;
499         cpumask_var_t                   cpumask;
500
501         struct lock_class_key           *reader_lock_key;
502
503         struct mutex                    mutex;
504
505         struct ring_buffer_per_cpu      **buffers;
506
507 #ifdef CONFIG_HOTPLUG_CPU
508         struct notifier_block           cpu_notify;
509 #endif
510         u64                             (*clock)(void);
511
512         struct rb_irq_work              irq_work;
513 };
514
515 struct ring_buffer_iter {
516         struct ring_buffer_per_cpu      *cpu_buffer;
517         unsigned long                   head;
518         struct buffer_page              *head_page;
519         struct buffer_page              *cache_reader_page;
520         unsigned long                   cache_read;
521         u64                             read_stamp;
522 };
523
524 /*
525  * rb_wake_up_waiters - wake up tasks waiting for ring buffer input
526  *
527  * Schedules a delayed work to wake up any task that is blocked on the
528  * ring buffer waiters queue.
529  */
530 static void rb_wake_up_waiters(struct irq_work *work)
531 {
532         struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work);
533
534         wake_up_all(&rbwork->waiters);
535 }
536
537 /**
538  * ring_buffer_wait - wait for input to the ring buffer
539  * @buffer: buffer to wait on
540  * @cpu: the cpu buffer to wait on
541  *
542  * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
543  * as data is added to any of the @buffer's cpu buffers. Otherwise
544  * it will wait for data to be added to a specific cpu buffer.
545  */
546 int ring_buffer_wait(struct ring_buffer *buffer, int cpu)
547 {
548         struct ring_buffer_per_cpu *cpu_buffer;
549         DEFINE_WAIT(wait);
550         struct rb_irq_work *work;
551
552         /*
553          * Depending on what the caller is waiting for, either any
554          * data in any cpu buffer, or a specific buffer, put the
555          * caller on the appropriate wait queue.
556          */
557         if (cpu == RING_BUFFER_ALL_CPUS)
558                 work = &buffer->irq_work;
559         else {
560                 if (!cpumask_test_cpu(cpu, buffer->cpumask))
561                         return -ENODEV;
562                 cpu_buffer = buffer->buffers[cpu];
563                 work = &cpu_buffer->irq_work;
564         }
565
566
567         prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE);
568
569         /*
570          * The events can happen in critical sections where
571          * checking a work queue can cause deadlocks.
572          * After adding a task to the queue, this flag is set
573          * only to notify events to try to wake up the queue
574          * using irq_work.
575          *
576          * We don't clear it even if the buffer is no longer
577          * empty. The flag only causes the next event to run
578          * irq_work to do the work queue wake up. The worse
579          * that can happen if we race with !trace_empty() is that
580          * an event will cause an irq_work to try to wake up
581          * an empty queue.
582          *
583          * There's no reason to protect this flag either, as
584          * the work queue and irq_work logic will do the necessary
585          * synchronization for the wake ups. The only thing
586          * that is necessary is that the wake up happens after
587          * a task has been queued. It's OK for spurious wake ups.
588          */
589         work->waiters_pending = true;
590
591         if ((cpu == RING_BUFFER_ALL_CPUS && ring_buffer_empty(buffer)) ||
592             (cpu != RING_BUFFER_ALL_CPUS && ring_buffer_empty_cpu(buffer, cpu)))
593                 schedule();
594
595         finish_wait(&work->waiters, &wait);
596         return 0;
597 }
598
599 /**
600  * ring_buffer_poll_wait - poll on buffer input
601  * @buffer: buffer to wait on
602  * @cpu: the cpu buffer to wait on
603  * @filp: the file descriptor
604  * @poll_table: The poll descriptor
605  *
606  * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
607  * as data is added to any of the @buffer's cpu buffers. Otherwise
608  * it will wait for data to be added to a specific cpu buffer.
609  *
610  * Returns POLLIN | POLLRDNORM if data exists in the buffers,
611  * zero otherwise.
612  */
613 int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
614                           struct file *filp, poll_table *poll_table)
615 {
616         struct ring_buffer_per_cpu *cpu_buffer;
617         struct rb_irq_work *work;
618
619         if (cpu == RING_BUFFER_ALL_CPUS)
620                 work = &buffer->irq_work;
621         else {
622                 if (!cpumask_test_cpu(cpu, buffer->cpumask))
623                         return -EINVAL;
624
625                 cpu_buffer = buffer->buffers[cpu];
626                 work = &cpu_buffer->irq_work;
627         }
628
629         work->waiters_pending = true;
630         poll_wait(filp, &work->waiters, poll_table);
631
632         if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
633             (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
634                 return POLLIN | POLLRDNORM;
635         return 0;
636 }
637
638 /* buffer may be either ring_buffer or ring_buffer_per_cpu */
639 #define RB_WARN_ON(b, cond)                                             \
640         ({                                                              \
641                 int _____ret = unlikely(cond);                          \
642                 if (_____ret) {                                         \
643                         if (__same_type(*(b), struct ring_buffer_per_cpu)) { \
644                                 struct ring_buffer_per_cpu *__b =       \
645                                         (void *)b;                      \
646                                 atomic_inc(&__b->buffer->record_disabled); \
647                         } else                                          \
648                                 atomic_inc(&b->record_disabled);        \
649                         WARN_ON(1);                                     \
650                 }                                                       \
651                 _____ret;                                               \
652         })
653
654 /* Up this if you want to test the TIME_EXTENTS and normalization */
655 #define DEBUG_SHIFT 0
656
657 static inline u64 rb_time_stamp(struct ring_buffer *buffer)
658 {
659         /* shift to debug/test normalization and TIME_EXTENTS */
660         return buffer->clock() << DEBUG_SHIFT;
661 }
662
663 u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
664 {
665         u64 time;
666
667         preempt_disable_notrace();
668         time = rb_time_stamp(buffer);
669         preempt_enable_no_resched_notrace();
670
671         return time;
672 }
673 EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
674
675 void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
676                                       int cpu, u64 *ts)
677 {
678         /* Just stupid testing the normalize function and deltas */
679         *ts >>= DEBUG_SHIFT;
680 }
681 EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
682
683 /*
684  * Making the ring buffer lockless makes things tricky.
685  * Although writes only happen on the CPU that they are on,
686  * and they only need to worry about interrupts. Reads can
687  * happen on any CPU.
688  *
689  * The reader page is always off the ring buffer, but when the
690  * reader finishes with a page, it needs to swap its page with
691  * a new one from the buffer. The reader needs to take from
692  * the head (writes go to the tail). But if a writer is in overwrite
693  * mode and wraps, it must push the head page forward.
694  *
695  * Here lies the problem.
696  *
697  * The reader must be careful to replace only the head page, and
698  * not another one. As described at the top of the file in the
699  * ASCII art, the reader sets its old page to point to the next
700  * page after head. It then sets the page after head to point to
701  * the old reader page. But if the writer moves the head page
702  * during this operation, the reader could end up with the tail.
703  *
704  * We use cmpxchg to help prevent this race. We also do something
705  * special with the page before head. We set the LSB to 1.
706  *
707  * When the writer must push the page forward, it will clear the
708  * bit that points to the head page, move the head, and then set
709  * the bit that points to the new head page.
710  *
711  * We also don't want an interrupt coming in and moving the head
712  * page on another writer. Thus we use the second LSB to catch
713  * that too. Thus:
714  *
715  * head->list->prev->next        bit 1          bit 0
716  *                              -------        -------
717  * Normal page                     0              0
718  * Points to head page             0              1
719  * New head page                   1              0
720  *
721  * Note we can not trust the prev pointer of the head page, because:
722  *
723  * +----+       +-----+        +-----+
724  * |    |------>|  T  |---X--->|  N  |
725  * |    |<------|     |        |     |
726  * +----+       +-----+        +-----+
727  *   ^                           ^ |
728  *   |          +-----+          | |
729  *   +----------|  R  |----------+ |
730  *              |     |<-----------+
731  *              +-----+
732  *
733  * Key:  ---X-->  HEAD flag set in pointer
734  *         T      Tail page
735  *         R      Reader page
736  *         N      Next page
737  *
738  * (see __rb_reserve_next() to see where this happens)
739  *
740  *  What the above shows is that the reader just swapped out
741  *  the reader page with a page in the buffer, but before it
742  *  could make the new header point back to the new page added
743  *  it was preempted by a writer. The writer moved forward onto
744  *  the new page added by the reader and is about to move forward
745  *  again.
746  *
747  *  You can see, it is legitimate for the previous pointer of
748  *  the head (or any page) not to point back to itself. But only
749  *  temporarially.
750  */
751
752 #define RB_PAGE_NORMAL          0UL
753 #define RB_PAGE_HEAD            1UL
754 #define RB_PAGE_UPDATE          2UL
755
756
757 #define RB_FLAG_MASK            3UL
758
759 /* PAGE_MOVED is not part of the mask */
760 #define RB_PAGE_MOVED           4UL
761
762 /*
763  * rb_list_head - remove any bit
764  */
765 static struct list_head *rb_list_head(struct list_head *list)
766 {
767         unsigned long val = (unsigned long)list;
768
769         return (struct list_head *)(val & ~RB_FLAG_MASK);
770 }
771
772 /*
773  * rb_is_head_page - test if the given page is the head page
774  *
775  * Because the reader may move the head_page pointer, we can
776  * not trust what the head page is (it may be pointing to
777  * the reader page). But if the next page is a header page,
778  * its flags will be non zero.
779  */
780 static inline int
781 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
782                 struct buffer_page *page, struct list_head *list)
783 {
784         unsigned long val;
785
786         val = (unsigned long)list->next;
787
788         if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list)
789                 return RB_PAGE_MOVED;
790
791         return val & RB_FLAG_MASK;
792 }
793
794 /*
795  * rb_is_reader_page
796  *
797  * The unique thing about the reader page, is that, if the
798  * writer is ever on it, the previous pointer never points
799  * back to the reader page.
800  */
801 static int rb_is_reader_page(struct buffer_page *page)
802 {
803         struct list_head *list = page->list.prev;
804
805         return rb_list_head(list->next) != &page->list;
806 }
807
808 /*
809  * rb_set_list_to_head - set a list_head to be pointing to head.
810  */
811 static void rb_set_list_to_head(struct ring_buffer_per_cpu *cpu_buffer,
812                                 struct list_head *list)
813 {
814         unsigned long *ptr;
815
816         ptr = (unsigned long *)&list->next;
817         *ptr |= RB_PAGE_HEAD;
818         *ptr &= ~RB_PAGE_UPDATE;
819 }
820
821 /*
822  * rb_head_page_activate - sets up head page
823  */
824 static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer)
825 {
826         struct buffer_page *head;
827
828         head = cpu_buffer->head_page;
829         if (!head)
830                 return;
831
832         /*
833          * Set the previous list pointer to have the HEAD flag.
834          */
835         rb_set_list_to_head(cpu_buffer, head->list.prev);
836 }
837
838 static void rb_list_head_clear(struct list_head *list)
839 {
840         unsigned long *ptr = (unsigned long *)&list->next;
841
842         *ptr &= ~RB_FLAG_MASK;
843 }
844
845 /*
846  * rb_head_page_dactivate - clears head page ptr (for free list)
847  */
848 static void
849 rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer)
850 {
851         struct list_head *hd;
852
853         /* Go through the whole list and clear any pointers found. */
854         rb_list_head_clear(cpu_buffer->pages);
855
856         list_for_each(hd, cpu_buffer->pages)
857                 rb_list_head_clear(hd);
858 }
859
860 static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer,
861                             struct buffer_page *head,
862                             struct buffer_page *prev,
863                             int old_flag, int new_flag)
864 {
865         struct list_head *list;
866         unsigned long val = (unsigned long)&head->list;
867         unsigned long ret;
868
869         list = &prev->list;
870
871         val &= ~RB_FLAG_MASK;
872
873         ret = cmpxchg((unsigned long *)&list->next,
874                       val | old_flag, val | new_flag);
875
876         /* check if the reader took the page */
877         if ((ret & ~RB_FLAG_MASK) != val)
878                 return RB_PAGE_MOVED;
879
880         return ret & RB_FLAG_MASK;
881 }
882
883 static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer,
884                                    struct buffer_page *head,
885                                    struct buffer_page *prev,
886                                    int old_flag)
887 {
888         return rb_head_page_set(cpu_buffer, head, prev,
889                                 old_flag, RB_PAGE_UPDATE);
890 }
891
892 static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer,
893                                  struct buffer_page *head,
894                                  struct buffer_page *prev,
895                                  int old_flag)
896 {
897         return rb_head_page_set(cpu_buffer, head, prev,
898                                 old_flag, RB_PAGE_HEAD);
899 }
900
901 static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer,
902                                    struct buffer_page *head,
903                                    struct buffer_page *prev,
904                                    int old_flag)
905 {
906         return rb_head_page_set(cpu_buffer, head, prev,
907                                 old_flag, RB_PAGE_NORMAL);
908 }
909
910 static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
911                                struct buffer_page **bpage)
912 {
913         struct list_head *p = rb_list_head((*bpage)->list.next);
914
915         *bpage = list_entry(p, struct buffer_page, list);
916 }
917
918 static struct buffer_page *
919 rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
920 {
921         struct buffer_page *head;
922         struct buffer_page *page;
923         struct list_head *list;
924         int i;
925
926         if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page))
927                 return NULL;
928
929         /* sanity check */
930         list = cpu_buffer->pages;
931         if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list))
932                 return NULL;
933
934         page = head = cpu_buffer->head_page;
935         /*
936          * It is possible that the writer moves the header behind
937          * where we started, and we miss in one loop.
938          * A second loop should grab the header, but we'll do
939          * three loops just because I'm paranoid.
940          */
941         for (i = 0; i < 3; i++) {
942                 do {
943                         if (rb_is_head_page(cpu_buffer, page, page->list.prev)) {
944                                 cpu_buffer->head_page = page;
945                                 return page;
946                         }
947                         rb_inc_page(cpu_buffer, &page);
948                 } while (page != head);
949         }
950
951         RB_WARN_ON(cpu_buffer, 1);
952
953         return NULL;
954 }
955
956 static int rb_head_page_replace(struct buffer_page *old,
957                                 struct buffer_page *new)
958 {
959         unsigned long *ptr = (unsigned long *)&old->list.prev->next;
960         unsigned long val;
961         unsigned long ret;
962
963         val = *ptr & ~RB_FLAG_MASK;
964         val |= RB_PAGE_HEAD;
965
966         ret = cmpxchg(ptr, val, (unsigned long)&new->list);
967
968         return ret == val;
969 }
970
971 /*
972  * rb_tail_page_update - move the tail page forward
973  *
974  * Returns 1 if moved tail page, 0 if someone else did.
975  */
976 static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
977                                struct buffer_page *tail_page,
978                                struct buffer_page *next_page)
979 {
980         struct buffer_page *old_tail;
981         unsigned long old_entries;
982         unsigned long old_write;
983         int ret = 0;
984
985         /*
986          * The tail page now needs to be moved forward.
987          *
988          * We need to reset the tail page, but without messing
989          * with possible erasing of data brought in by interrupts
990          * that have moved the tail page and are currently on it.
991          *
992          * We add a counter to the write field to denote this.
993          */
994         old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
995         old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
996
997         /*
998          * Just make sure we have seen our old_write and synchronize
999          * with any interrupts that come in.
1000          */
1001         barrier();
1002
1003         /*
1004          * If the tail page is still the same as what we think
1005          * it is, then it is up to us to update the tail
1006          * pointer.
1007          */
1008         if (tail_page == cpu_buffer->tail_page) {
1009                 /* Zero the write counter */
1010                 unsigned long val = old_write & ~RB_WRITE_MASK;
1011                 unsigned long eval = old_entries & ~RB_WRITE_MASK;
1012
1013                 /*
1014                  * This will only succeed if an interrupt did
1015                  * not come in and change it. In which case, we
1016                  * do not want to modify it.
1017                  *
1018                  * We add (void) to let the compiler know that we do not care
1019                  * about the return value of these functions. We use the
1020                  * cmpxchg to only update if an interrupt did not already
1021                  * do it for us. If the cmpxchg fails, we don't care.
1022                  */
1023                 (void)local_cmpxchg(&next_page->write, old_write, val);
1024                 (void)local_cmpxchg(&next_page->entries, old_entries, eval);
1025
1026                 /*
1027                  * No need to worry about races with clearing out the commit.
1028                  * it only can increment when a commit takes place. But that
1029                  * only happens in the outer most nested commit.
1030                  */
1031                 local_set(&next_page->page->commit, 0);
1032
1033                 old_tail = cmpxchg(&cpu_buffer->tail_page,
1034                                    tail_page, next_page);
1035
1036                 if (old_tail == tail_page)
1037                         ret = 1;
1038         }
1039
1040         return ret;
1041 }
1042
1043 static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
1044                           struct buffer_page *bpage)
1045 {
1046         unsigned long val = (unsigned long)bpage;
1047
1048         if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK))
1049                 return 1;
1050
1051         return 0;
1052 }
1053
1054 /**
1055  * rb_check_list - make sure a pointer to a list has the last bits zero
1056  */
1057 static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer,
1058                          struct list_head *list)
1059 {
1060         if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev))
1061                 return 1;
1062         if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next))
1063                 return 1;
1064         return 0;
1065 }
1066
1067 /**
1068  * rb_check_pages - integrity check of buffer pages
1069  * @cpu_buffer: CPU buffer with pages to test
1070  *
1071  * As a safety measure we check to make sure the data pages have not
1072  * been corrupted.
1073  */
1074 static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
1075 {
1076         struct list_head *head = cpu_buffer->pages;
1077         struct buffer_page *bpage, *tmp;
1078
1079         /* Reset the head page if it exists */
1080         if (cpu_buffer->head_page)
1081                 rb_set_head_page(cpu_buffer);
1082
1083         rb_head_page_deactivate(cpu_buffer);
1084
1085         if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
1086                 return -1;
1087         if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
1088                 return -1;
1089
1090         if (rb_check_list(cpu_buffer, head))
1091                 return -1;
1092
1093         list_for_each_entry_safe(bpage, tmp, head, list) {
1094                 if (RB_WARN_ON(cpu_buffer,
1095                                bpage->list.next->prev != &bpage->list))
1096                         return -1;
1097                 if (RB_WARN_ON(cpu_buffer,
1098                                bpage->list.prev->next != &bpage->list))
1099                         return -1;
1100                 if (rb_check_list(cpu_buffer, &bpage->list))
1101                         return -1;
1102         }
1103
1104         rb_head_page_activate(cpu_buffer);
1105
1106         return 0;
1107 }
1108
1109 static int __rb_allocate_pages(int nr_pages, struct list_head *pages, int cpu)
1110 {
1111         int i;
1112         struct buffer_page *bpage, *tmp;
1113
1114         for (i = 0; i < nr_pages; i++) {
1115                 struct page *page;
1116                 /*
1117                  * __GFP_NORETRY flag makes sure that the allocation fails
1118                  * gracefully without invoking oom-killer and the system is
1119                  * not destabilized.
1120                  */
1121                 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1122                                     GFP_KERNEL | __GFP_NORETRY,
1123                                     cpu_to_node(cpu));
1124                 if (!bpage)
1125                         goto free_pages;
1126
1127                 list_add(&bpage->list, pages);
1128
1129                 page = alloc_pages_node(cpu_to_node(cpu),
1130                                         GFP_KERNEL | __GFP_NORETRY, 0);
1131                 if (!page)
1132                         goto free_pages;
1133                 bpage->page = page_address(page);
1134                 rb_init_page(bpage->page);
1135         }
1136
1137         return 0;
1138
1139 free_pages:
1140         list_for_each_entry_safe(bpage, tmp, pages, list) {
1141                 list_del_init(&bpage->list);
1142                 free_buffer_page(bpage);
1143         }
1144
1145         return -ENOMEM;
1146 }
1147
1148 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
1149                              unsigned nr_pages)
1150 {
1151         LIST_HEAD(pages);
1152
1153         WARN_ON(!nr_pages);
1154
1155         if (__rb_allocate_pages(nr_pages, &pages, cpu_buffer->cpu))
1156                 return -ENOMEM;
1157
1158         /*
1159          * The ring buffer page list is a circular list that does not
1160          * start and end with a list head. All page list items point to
1161          * other pages.
1162          */
1163         cpu_buffer->pages = pages.next;
1164         list_del(&pages);
1165
1166         cpu_buffer->nr_pages = nr_pages;
1167
1168         rb_check_pages(cpu_buffer);
1169
1170         return 0;
1171 }
1172
1173 static struct ring_buffer_per_cpu *
1174 rb_allocate_cpu_buffer(struct ring_buffer *buffer, int nr_pages, int cpu)
1175 {
1176         struct ring_buffer_per_cpu *cpu_buffer;
1177         struct buffer_page *bpage;
1178         struct page *page;
1179         int ret;
1180
1181         cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
1182                                   GFP_KERNEL, cpu_to_node(cpu));
1183         if (!cpu_buffer)
1184                 return NULL;
1185
1186         cpu_buffer->cpu = cpu;
1187         cpu_buffer->buffer = buffer;
1188         raw_spin_lock_init(&cpu_buffer->reader_lock);
1189         lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
1190         cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
1191         INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler);
1192         init_completion(&cpu_buffer->update_done);
1193         init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters);
1194         init_waitqueue_head(&cpu_buffer->irq_work.waiters);
1195
1196         bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1197                             GFP_KERNEL, cpu_to_node(cpu));
1198         if (!bpage)
1199                 goto fail_free_buffer;
1200
1201         rb_check_bpage(cpu_buffer, bpage);
1202
1203         cpu_buffer->reader_page = bpage;
1204         page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0);
1205         if (!page)
1206                 goto fail_free_reader;
1207         bpage->page = page_address(page);
1208         rb_init_page(bpage->page);
1209
1210         INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
1211         INIT_LIST_HEAD(&cpu_buffer->new_pages);
1212
1213         ret = rb_allocate_pages(cpu_buffer, nr_pages);
1214         if (ret < 0)
1215                 goto fail_free_reader;
1216
1217         cpu_buffer->head_page
1218                 = list_entry(cpu_buffer->pages, struct buffer_page, list);
1219         cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
1220
1221         rb_head_page_activate(cpu_buffer);
1222
1223         return cpu_buffer;
1224
1225  fail_free_reader:
1226         free_buffer_page(cpu_buffer->reader_page);
1227
1228  fail_free_buffer:
1229         kfree(cpu_buffer);
1230         return NULL;
1231 }
1232
1233 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
1234 {
1235         struct list_head *head = cpu_buffer->pages;
1236         struct buffer_page *bpage, *tmp;
1237
1238         free_buffer_page(cpu_buffer->reader_page);
1239
1240         rb_head_page_deactivate(cpu_buffer);
1241
1242         if (head) {
1243                 list_for_each_entry_safe(bpage, tmp, head, list) {
1244                         list_del_init(&bpage->list);
1245                         free_buffer_page(bpage);
1246                 }
1247                 bpage = list_entry(head, struct buffer_page, list);
1248                 free_buffer_page(bpage);
1249         }
1250
1251         kfree(cpu_buffer);
1252 }
1253
1254 #ifdef CONFIG_HOTPLUG_CPU
1255 static int rb_cpu_notify(struct notifier_block *self,
1256                          unsigned long action, void *hcpu);
1257 #endif
1258
1259 /**
1260  * __ring_buffer_alloc - allocate a new ring_buffer
1261  * @size: the size in bytes per cpu that is needed.
1262  * @flags: attributes to set for the ring buffer.
1263  *
1264  * Currently the only flag that is available is the RB_FL_OVERWRITE
1265  * flag. This flag means that the buffer will overwrite old data
1266  * when the buffer wraps. If this flag is not set, the buffer will
1267  * drop data when the tail hits the head.
1268  */
1269 struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
1270                                         struct lock_class_key *key)
1271 {
1272         struct ring_buffer *buffer;
1273         int bsize;
1274         int cpu, nr_pages;
1275
1276         /* keep it in its own cache line */
1277         buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
1278                          GFP_KERNEL);
1279         if (!buffer)
1280                 return NULL;
1281
1282         if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
1283                 goto fail_free_buffer;
1284
1285         nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1286         buffer->flags = flags;
1287         buffer->clock = trace_clock_local;
1288         buffer->reader_lock_key = key;
1289
1290         init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters);
1291         init_waitqueue_head(&buffer->irq_work.waiters);
1292
1293         /* need at least two pages */
1294         if (nr_pages < 2)
1295                 nr_pages = 2;
1296
1297         /*
1298          * In case of non-hotplug cpu, if the ring-buffer is allocated
1299          * in early initcall, it will not be notified of secondary cpus.
1300          * In that off case, we need to allocate for all possible cpus.
1301          */
1302 #ifdef CONFIG_HOTPLUG_CPU
1303         cpu_notifier_register_begin();
1304         cpumask_copy(buffer->cpumask, cpu_online_mask);
1305 #else
1306         cpumask_copy(buffer->cpumask, cpu_possible_mask);
1307 #endif
1308         buffer->cpus = nr_cpu_ids;
1309
1310         bsize = sizeof(void *) * nr_cpu_ids;
1311         buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
1312                                   GFP_KERNEL);
1313         if (!buffer->buffers)
1314                 goto fail_free_cpumask;
1315
1316         for_each_buffer_cpu(buffer, cpu) {
1317                 buffer->buffers[cpu] =
1318                         rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
1319                 if (!buffer->buffers[cpu])
1320                         goto fail_free_buffers;
1321         }
1322
1323 #ifdef CONFIG_HOTPLUG_CPU
1324         buffer->cpu_notify.notifier_call = rb_cpu_notify;
1325         buffer->cpu_notify.priority = 0;
1326         __register_cpu_notifier(&buffer->cpu_notify);
1327         cpu_notifier_register_done();
1328 #endif
1329
1330         mutex_init(&buffer->mutex);
1331
1332         return buffer;
1333
1334  fail_free_buffers:
1335         for_each_buffer_cpu(buffer, cpu) {
1336                 if (buffer->buffers[cpu])
1337                         rb_free_cpu_buffer(buffer->buffers[cpu]);
1338         }
1339         kfree(buffer->buffers);
1340
1341  fail_free_cpumask:
1342         free_cpumask_var(buffer->cpumask);
1343 #ifdef CONFIG_HOTPLUG_CPU
1344         cpu_notifier_register_done();
1345 #endif
1346
1347  fail_free_buffer:
1348         kfree(buffer);
1349         return NULL;
1350 }
1351 EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
1352
1353 /**
1354  * ring_buffer_free - free a ring buffer.
1355  * @buffer: the buffer to free.
1356  */
1357 void
1358 ring_buffer_free(struct ring_buffer *buffer)
1359 {
1360         int cpu;
1361
1362 #ifdef CONFIG_HOTPLUG_CPU
1363         cpu_notifier_register_begin();
1364         __unregister_cpu_notifier(&buffer->cpu_notify);
1365 #endif
1366
1367         for_each_buffer_cpu(buffer, cpu)
1368                 rb_free_cpu_buffer(buffer->buffers[cpu]);
1369
1370 #ifdef CONFIG_HOTPLUG_CPU
1371         cpu_notifier_register_done();
1372 #endif
1373
1374         kfree(buffer->buffers);
1375         free_cpumask_var(buffer->cpumask);
1376
1377         kfree(buffer);
1378 }
1379 EXPORT_SYMBOL_GPL(ring_buffer_free);
1380
1381 void ring_buffer_set_clock(struct ring_buffer *buffer,
1382                            u64 (*clock)(void))
1383 {
1384         buffer->clock = clock;
1385 }
1386
1387 static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
1388
1389 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
1390 {
1391         return local_read(&bpage->entries) & RB_WRITE_MASK;
1392 }
1393
1394 static inline unsigned long rb_page_write(struct buffer_page *bpage)
1395 {
1396         return local_read(&bpage->write) & RB_WRITE_MASK;
1397 }
1398
1399 static int
1400 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
1401 {
1402         struct list_head *tail_page, *to_remove, *next_page;
1403         struct buffer_page *to_remove_page, *tmp_iter_page;
1404         struct buffer_page *last_page, *first_page;
1405         unsigned int nr_removed;
1406         unsigned long head_bit;
1407         int page_entries;
1408
1409         head_bit = 0;
1410
1411         raw_spin_lock_irq(&cpu_buffer->reader_lock);
1412         atomic_inc(&cpu_buffer->record_disabled);
1413         /*
1414          * We don't race with the readers since we have acquired the reader
1415          * lock. We also don't race with writers after disabling recording.
1416          * This makes it easy to figure out the first and the last page to be
1417          * removed from the list. We unlink all the pages in between including
1418          * the first and last pages. This is done in a busy loop so that we
1419          * lose the least number of traces.
1420          * The pages are freed after we restart recording and unlock readers.
1421          */
1422         tail_page = &cpu_buffer->tail_page->list;
1423
1424         /*
1425          * tail page might be on reader page, we remove the next page
1426          * from the ring buffer
1427          */
1428         if (cpu_buffer->tail_page == cpu_buffer->reader_page)
1429                 tail_page = rb_list_head(tail_page->next);
1430         to_remove = tail_page;
1431
1432         /* start of pages to remove */
1433         first_page = list_entry(rb_list_head(to_remove->next),
1434                                 struct buffer_page, list);
1435
1436         for (nr_removed = 0; nr_removed < nr_pages; nr_removed++) {
1437                 to_remove = rb_list_head(to_remove)->next;
1438                 head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD;
1439         }
1440
1441         next_page = rb_list_head(to_remove)->next;
1442
1443         /*
1444          * Now we remove all pages between tail_page and next_page.
1445          * Make sure that we have head_bit value preserved for the
1446          * next page
1447          */
1448         tail_page->next = (struct list_head *)((unsigned long)next_page |
1449                                                 head_bit);
1450         next_page = rb_list_head(next_page);
1451         next_page->prev = tail_page;
1452
1453         /* make sure pages points to a valid page in the ring buffer */
1454         cpu_buffer->pages = next_page;
1455
1456         /* update head page */
1457         if (head_bit)
1458                 cpu_buffer->head_page = list_entry(next_page,
1459                                                 struct buffer_page, list);
1460
1461         /*
1462          * change read pointer to make sure any read iterators reset
1463          * themselves
1464          */
1465         cpu_buffer->read = 0;
1466
1467         /* pages are removed, resume tracing and then free the pages */
1468         atomic_dec(&cpu_buffer->record_disabled);
1469         raw_spin_unlock_irq(&cpu_buffer->reader_lock);
1470
1471         RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages));
1472
1473         /* last buffer page to remove */
1474         last_page = list_entry(rb_list_head(to_remove), struct buffer_page,
1475                                 list);
1476         tmp_iter_page = first_page;
1477
1478         do {
1479                 to_remove_page = tmp_iter_page;
1480                 rb_inc_page(cpu_buffer, &tmp_iter_page);
1481
1482                 /* update the counters */
1483                 page_entries = rb_page_entries(to_remove_page);
1484                 if (page_entries) {
1485                         /*
1486                          * If something was added to this page, it was full
1487                          * since it is not the tail page. So we deduct the
1488                          * bytes consumed in ring buffer from here.
1489                          * Increment overrun to account for the lost events.
1490                          */
1491                         local_add(page_entries, &cpu_buffer->overrun);
1492                         local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
1493                 }
1494
1495                 /*
1496                  * We have already removed references to this list item, just
1497                  * free up the buffer_page and its page
1498                  */
1499                 free_buffer_page(to_remove_page);
1500                 nr_removed--;
1501
1502         } while (to_remove_page != last_page);
1503
1504         RB_WARN_ON(cpu_buffer, nr_removed);
1505
1506         return nr_removed == 0;
1507 }
1508
1509 static int
1510 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
1511 {
1512         struct list_head *pages = &cpu_buffer->new_pages;
1513         int retries, success;
1514
1515         raw_spin_lock_irq(&cpu_buffer->reader_lock);
1516         /*
1517          * We are holding the reader lock, so the reader page won't be swapped
1518          * in the ring buffer. Now we are racing with the writer trying to
1519          * move head page and the tail page.
1520          * We are going to adapt the reader page update process where:
1521          * 1. We first splice the start and end of list of new pages between
1522          *    the head page and its previous page.
1523          * 2. We cmpxchg the prev_page->next to point from head page to the
1524          *    start of new pages list.
1525          * 3. Finally, we update the head->prev to the end of new list.
1526          *
1527          * We will try this process 10 times, to make sure that we don't keep
1528          * spinning.
1529          */
1530         retries = 10;
1531         success = 0;
1532         while (retries--) {
1533                 struct list_head *head_page, *prev_page, *r;
1534                 struct list_head *last_page, *first_page;
1535                 struct list_head *head_page_with_bit;
1536
1537                 head_page = &rb_set_head_page(cpu_buffer)->list;
1538                 if (!head_page)
1539                         break;
1540                 prev_page = head_page->prev;
1541
1542                 first_page = pages->next;
1543                 last_page  = pages->prev;
1544
1545                 head_page_with_bit = (struct list_head *)
1546                                      ((unsigned long)head_page | RB_PAGE_HEAD);
1547
1548                 last_page->next = head_page_with_bit;
1549                 first_page->prev = prev_page;
1550
1551                 r = cmpxchg(&prev_page->next, head_page_with_bit, first_page);
1552
1553                 if (r == head_page_with_bit) {
1554                         /*
1555                          * yay, we replaced the page pointer to our new list,
1556                          * now, we just have to update to head page's prev
1557                          * pointer to point to end of list
1558                          */
1559                         head_page->prev = last_page;
1560                         success = 1;
1561                         break;
1562                 }
1563         }
1564
1565         if (success)
1566                 INIT_LIST_HEAD(pages);
1567         /*
1568          * If we weren't successful in adding in new pages, warn and stop
1569          * tracing
1570          */
1571         RB_WARN_ON(cpu_buffer, !success);
1572         raw_spin_unlock_irq(&cpu_buffer->reader_lock);
1573
1574         /* free pages if they weren't inserted */
1575         if (!success) {
1576                 struct buffer_page *bpage, *tmp;
1577                 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
1578                                          list) {
1579                         list_del_init(&bpage->list);
1580                         free_buffer_page(bpage);
1581                 }
1582         }
1583         return success;
1584 }
1585
1586 static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer)
1587 {
1588         int success;
1589
1590         if (cpu_buffer->nr_pages_to_update > 0)
1591                 success = rb_insert_pages(cpu_buffer);
1592         else
1593                 success = rb_remove_pages(cpu_buffer,
1594                                         -cpu_buffer->nr_pages_to_update);
1595
1596         if (success)
1597                 cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update;
1598 }
1599
1600 static void update_pages_handler(struct work_struct *work)
1601 {
1602         struct ring_buffer_per_cpu *cpu_buffer = container_of(work,
1603                         struct ring_buffer_per_cpu, update_pages_work);
1604         rb_update_pages(cpu_buffer);
1605         complete(&cpu_buffer->update_done);
1606 }
1607
1608 /**
1609  * ring_buffer_resize - resize the ring buffer
1610  * @buffer: the buffer to resize.
1611  * @size: the new size.
1612  * @cpu_id: the cpu buffer to resize
1613  *
1614  * Minimum size is 2 * BUF_PAGE_SIZE.
1615  *
1616  * Returns 0 on success and < 0 on failure.
1617  */
1618 int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
1619                         int cpu_id)
1620 {
1621         struct ring_buffer_per_cpu *cpu_buffer;
1622         unsigned nr_pages;
1623         int cpu, err = 0;
1624
1625         /*
1626          * Always succeed at resizing a non-existent buffer:
1627          */
1628         if (!buffer)
1629                 return size;
1630
1631         /* Make sure the requested buffer exists */
1632         if (cpu_id != RING_BUFFER_ALL_CPUS &&
1633             !cpumask_test_cpu(cpu_id, buffer->cpumask))
1634                 return size;
1635
1636         size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1637         size *= BUF_PAGE_SIZE;
1638
1639         /* we need a minimum of two pages */
1640         if (size < BUF_PAGE_SIZE * 2)
1641                 size = BUF_PAGE_SIZE * 2;
1642
1643         nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1644
1645         /*
1646          * Don't succeed if resizing is disabled, as a reader might be
1647          * manipulating the ring buffer and is expecting a sane state while
1648          * this is true.
1649          */
1650         if (atomic_read(&buffer->resize_disabled))
1651                 return -EBUSY;
1652
1653         /* prevent another thread from changing buffer sizes */
1654         mutex_lock(&buffer->mutex);
1655
1656         if (cpu_id == RING_BUFFER_ALL_CPUS) {
1657                 /* calculate the pages to update */
1658                 for_each_buffer_cpu(buffer, cpu) {
1659                         cpu_buffer = buffer->buffers[cpu];
1660
1661                         cpu_buffer->nr_pages_to_update = nr_pages -
1662                                                         cpu_buffer->nr_pages;
1663                         /*
1664                          * nothing more to do for removing pages or no update
1665                          */
1666                         if (cpu_buffer->nr_pages_to_update <= 0)
1667                                 continue;
1668                         /*
1669                          * to add pages, make sure all new pages can be
1670                          * allocated without receiving ENOMEM
1671                          */
1672                         INIT_LIST_HEAD(&cpu_buffer->new_pages);
1673                         if (__rb_allocate_pages(cpu_buffer->nr_pages_to_update,
1674                                                 &cpu_buffer->new_pages, cpu)) {
1675                                 /* not enough memory for new pages */
1676                                 err = -ENOMEM;
1677                                 goto out_err;
1678                         }
1679                 }
1680
1681                 get_online_cpus();
1682                 /*
1683                  * Fire off all the required work handlers
1684                  * We can't schedule on offline CPUs, but it's not necessary
1685                  * since we can change their buffer sizes without any race.
1686                  */
1687                 for_each_buffer_cpu(buffer, cpu) {
1688                         cpu_buffer = buffer->buffers[cpu];
1689                         if (!cpu_buffer->nr_pages_to_update)
1690                                 continue;
1691
1692                         /* Can't run something on an offline CPU. */
1693                         if (!cpu_online(cpu)) {
1694                                 rb_update_pages(cpu_buffer);
1695                                 cpu_buffer->nr_pages_to_update = 0;
1696                         } else {
1697                                 schedule_work_on(cpu,
1698                                                 &cpu_buffer->update_pages_work);
1699                         }
1700                 }
1701
1702                 /* wait for all the updates to complete */
1703                 for_each_buffer_cpu(buffer, cpu) {
1704                         cpu_buffer = buffer->buffers[cpu];
1705                         if (!cpu_buffer->nr_pages_to_update)
1706                                 continue;
1707
1708                         if (cpu_online(cpu))
1709                                 wait_for_completion(&cpu_buffer->update_done);
1710                         cpu_buffer->nr_pages_to_update = 0;
1711                 }
1712
1713                 put_online_cpus();
1714         } else {
1715                 /* Make sure this CPU has been intitialized */
1716                 if (!cpumask_test_cpu(cpu_id, buffer->cpumask))
1717                         goto out;
1718
1719                 cpu_buffer = buffer->buffers[cpu_id];
1720
1721                 if (nr_pages == cpu_buffer->nr_pages)
1722                         goto out;
1723
1724                 cpu_buffer->nr_pages_to_update = nr_pages -
1725                                                 cpu_buffer->nr_pages;
1726
1727                 INIT_LIST_HEAD(&cpu_buffer->new_pages);
1728                 if (cpu_buffer->nr_pages_to_update > 0 &&
1729                         __rb_allocate_pages(cpu_buffer->nr_pages_to_update,
1730                                             &cpu_buffer->new_pages, cpu_id)) {
1731                         err = -ENOMEM;
1732                         goto out_err;
1733                 }
1734
1735                 get_online_cpus();
1736
1737                 /* Can't run something on an offline CPU. */
1738                 if (!cpu_online(cpu_id))
1739                         rb_update_pages(cpu_buffer);
1740                 else {
1741                         schedule_work_on(cpu_id,
1742                                          &cpu_buffer->update_pages_work);
1743                         wait_for_completion(&cpu_buffer->update_done);
1744                 }
1745
1746                 cpu_buffer->nr_pages_to_update = 0;
1747                 put_online_cpus();
1748         }
1749
1750  out:
1751         /*
1752          * The ring buffer resize can happen with the ring buffer
1753          * enabled, so that the update disturbs the tracing as little
1754          * as possible. But if the buffer is disabled, we do not need
1755          * to worry about that, and we can take the time to verify
1756          * that the buffer is not corrupt.
1757          */
1758         if (atomic_read(&buffer->record_disabled)) {
1759                 atomic_inc(&buffer->record_disabled);
1760                 /*
1761                  * Even though the buffer was disabled, we must make sure
1762                  * that it is truly disabled before calling rb_check_pages.
1763                  * There could have been a race between checking
1764                  * record_disable and incrementing it.
1765                  */
1766                 synchronize_sched();
1767                 for_each_buffer_cpu(buffer, cpu) {
1768                         cpu_buffer = buffer->buffers[cpu];
1769                         rb_check_pages(cpu_buffer);
1770                 }
1771                 atomic_dec(&buffer->record_disabled);
1772         }
1773
1774         mutex_unlock(&buffer->mutex);
1775         return size;
1776
1777  out_err:
1778         for_each_buffer_cpu(buffer, cpu) {
1779                 struct buffer_page *bpage, *tmp;
1780
1781                 cpu_buffer = buffer->buffers[cpu];
1782                 cpu_buffer->nr_pages_to_update = 0;
1783
1784                 if (list_empty(&cpu_buffer->new_pages))
1785                         continue;
1786
1787                 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
1788                                         list) {
1789                         list_del_init(&bpage->list);
1790                         free_buffer_page(bpage);
1791                 }
1792         }
1793         mutex_unlock(&buffer->mutex);
1794         return err;
1795 }
1796 EXPORT_SYMBOL_GPL(ring_buffer_resize);
1797
1798 void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val)
1799 {
1800         mutex_lock(&buffer->mutex);
1801         if (val)
1802                 buffer->flags |= RB_FL_OVERWRITE;
1803         else
1804                 buffer->flags &= ~RB_FL_OVERWRITE;
1805         mutex_unlock(&buffer->mutex);
1806 }
1807 EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite);
1808
1809 static inline void *
1810 __rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
1811 {
1812         return bpage->data + index;
1813 }
1814
1815 static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
1816 {
1817         return bpage->page->data + index;
1818 }
1819
1820 static inline struct ring_buffer_event *
1821 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
1822 {
1823         return __rb_page_index(cpu_buffer->reader_page,
1824                                cpu_buffer->reader_page->read);
1825 }
1826
1827 static inline struct ring_buffer_event *
1828 rb_iter_head_event(struct ring_buffer_iter *iter)
1829 {
1830         return __rb_page_index(iter->head_page, iter->head);
1831 }
1832
1833 static inline unsigned rb_page_commit(struct buffer_page *bpage)
1834 {
1835         return local_read(&bpage->page->commit);
1836 }
1837
1838 /* Size is determined by what has been committed */
1839 static inline unsigned rb_page_size(struct buffer_page *bpage)
1840 {
1841         return rb_page_commit(bpage);
1842 }
1843
1844 static inline unsigned
1845 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
1846 {
1847         return rb_page_commit(cpu_buffer->commit_page);
1848 }
1849
1850 static inline unsigned
1851 rb_event_index(struct ring_buffer_event *event)
1852 {
1853         unsigned long addr = (unsigned long)event;
1854
1855         return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE;
1856 }
1857
1858 static inline int
1859 rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
1860                    struct ring_buffer_event *event)
1861 {
1862         unsigned long addr = (unsigned long)event;
1863         unsigned long index;
1864
1865         index = rb_event_index(event);
1866         addr &= PAGE_MASK;
1867
1868         return cpu_buffer->commit_page->page == (void *)addr &&
1869                 rb_commit_index(cpu_buffer) == index;
1870 }
1871
1872 static void
1873 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
1874 {
1875         unsigned long max_count;
1876
1877         /*
1878          * We only race with interrupts and NMIs on this CPU.
1879          * If we own the commit event, then we can commit
1880          * all others that interrupted us, since the interruptions
1881          * are in stack format (they finish before they come
1882          * back to us). This allows us to do a simple loop to
1883          * assign the commit to the tail.
1884          */
1885  again:
1886         max_count = cpu_buffer->nr_pages * 100;
1887
1888         while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
1889                 if (RB_WARN_ON(cpu_buffer, !(--max_count)))
1890                         return;
1891                 if (RB_WARN_ON(cpu_buffer,
1892                                rb_is_reader_page(cpu_buffer->tail_page)))
1893                         return;
1894                 local_set(&cpu_buffer->commit_page->page->commit,
1895                           rb_page_write(cpu_buffer->commit_page));
1896                 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
1897                 cpu_buffer->write_stamp =
1898                         cpu_buffer->commit_page->page->time_stamp;
1899                 /* add barrier to keep gcc from optimizing too much */
1900                 barrier();
1901         }
1902         while (rb_commit_index(cpu_buffer) !=
1903                rb_page_write(cpu_buffer->commit_page)) {
1904
1905                 local_set(&cpu_buffer->commit_page->page->commit,
1906                           rb_page_write(cpu_buffer->commit_page));
1907                 RB_WARN_ON(cpu_buffer,
1908                            local_read(&cpu_buffer->commit_page->page->commit) &
1909                            ~RB_WRITE_MASK);
1910                 barrier();
1911         }
1912
1913         /* again, keep gcc from optimizing */
1914         barrier();
1915
1916         /*
1917          * If an interrupt came in just after the first while loop
1918          * and pushed the tail page forward, we will be left with
1919          * a dangling commit that will never go forward.
1920          */
1921         if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
1922                 goto again;
1923 }
1924
1925 static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1926 {
1927         cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
1928         cpu_buffer->reader_page->read = 0;
1929 }
1930
1931 static void rb_inc_iter(struct ring_buffer_iter *iter)
1932 {
1933         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1934
1935         /*
1936          * The iterator could be on the reader page (it starts there).
1937          * But the head could have moved, since the reader was
1938          * found. Check for this case and assign the iterator
1939          * to the head page instead of next.
1940          */
1941         if (iter->head_page == cpu_buffer->reader_page)
1942                 iter->head_page = rb_set_head_page(cpu_buffer);
1943         else
1944                 rb_inc_page(cpu_buffer, &iter->head_page);
1945
1946         iter->read_stamp = iter->head_page->page->time_stamp;
1947         iter->head = 0;
1948 }
1949
1950 /* Slow path, do not inline */
1951 static noinline struct ring_buffer_event *
1952 rb_add_time_stamp(struct ring_buffer_event *event, u64 delta)
1953 {
1954         event->type_len = RINGBUF_TYPE_TIME_EXTEND;
1955
1956         /* Not the first event on the page? */
1957         if (rb_event_index(event)) {
1958                 event->time_delta = delta & TS_MASK;
1959                 event->array[0] = delta >> TS_SHIFT;
1960         } else {
1961                 /* nope, just zero it */
1962                 event->time_delta = 0;
1963                 event->array[0] = 0;
1964         }
1965
1966         return skip_time_extend(event);
1967 }
1968
1969 /**
1970  * rb_update_event - update event type and data
1971  * @event: the event to update
1972  * @type: the type of event
1973  * @length: the size of the event field in the ring buffer
1974  *
1975  * Update the type and data fields of the event. The length
1976  * is the actual size that is written to the ring buffer,
1977  * and with this, we can determine what to place into the
1978  * data field.
1979  */
1980 static void
1981 rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
1982                 struct ring_buffer_event *event, unsigned length,
1983                 int add_timestamp, u64 delta)
1984 {
1985         /* Only a commit updates the timestamp */
1986         if (unlikely(!rb_event_is_commit(cpu_buffer, event)))
1987                 delta = 0;
1988
1989         /*
1990          * If we need to add a timestamp, then we
1991          * add it to the start of the resevered space.
1992          */
1993         if (unlikely(add_timestamp)) {
1994                 event = rb_add_time_stamp(event, delta);
1995                 length -= RB_LEN_TIME_EXTEND;
1996                 delta = 0;
1997         }
1998
1999         event->time_delta = delta;
2000         length -= RB_EVNT_HDR_SIZE;
2001         if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) {
2002                 event->type_len = 0;
2003                 event->array[0] = length;
2004         } else
2005                 event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
2006 }
2007
2008 /*
2009  * rb_handle_head_page - writer hit the head page
2010  *
2011  * Returns: +1 to retry page
2012  *           0 to continue
2013  *          -1 on error
2014  */
2015 static int
2016 rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
2017                     struct buffer_page *tail_page,
2018                     struct buffer_page *next_page)
2019 {
2020         struct buffer_page *new_head;
2021         int entries;
2022         int type;
2023         int ret;
2024
2025         entries = rb_page_entries(next_page);
2026
2027         /*
2028          * The hard part is here. We need to move the head
2029          * forward, and protect against both readers on
2030          * other CPUs and writers coming in via interrupts.
2031          */
2032         type = rb_head_page_set_update(cpu_buffer, next_page, tail_page,
2033                                        RB_PAGE_HEAD);
2034
2035         /*
2036          * type can be one of four:
2037          *  NORMAL - an interrupt already moved it for us
2038          *  HEAD   - we are the first to get here.
2039          *  UPDATE - we are the interrupt interrupting
2040          *           a current move.
2041          *  MOVED  - a reader on another CPU moved the next
2042          *           pointer to its reader page. Give up
2043          *           and try again.
2044          */
2045
2046         switch (type) {
2047         case RB_PAGE_HEAD:
2048                 /*
2049                  * We changed the head to UPDATE, thus
2050                  * it is our responsibility to update
2051                  * the counters.
2052                  */
2053                 local_add(entries, &cpu_buffer->overrun);
2054                 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
2055
2056                 /*
2057                  * The entries will be zeroed out when we move the
2058                  * tail page.
2059                  */
2060
2061                 /* still more to do */
2062                 break;
2063
2064         case RB_PAGE_UPDATE:
2065                 /*
2066                  * This is an interrupt that interrupt the
2067                  * previous update. Still more to do.
2068                  */
2069                 break;
2070         case RB_PAGE_NORMAL:
2071                 /*
2072                  * An interrupt came in before the update
2073                  * and processed this for us.
2074                  * Nothing left to do.
2075                  */
2076                 return 1;
2077         case RB_PAGE_MOVED:
2078                 /*
2079                  * The reader is on another CPU and just did
2080                  * a swap with our next_page.
2081                  * Try again.
2082                  */
2083                 return 1;
2084         default:
2085                 RB_WARN_ON(cpu_buffer, 1); /* WTF??? */
2086                 return -1;
2087         }
2088
2089         /*
2090          * Now that we are here, the old head pointer is
2091          * set to UPDATE. This will keep the reader from
2092          * swapping the head page with the reader page.
2093          * The reader (on another CPU) will spin till
2094          * we are finished.
2095          *
2096          * We just need to protect against interrupts
2097          * doing the job. We will set the next pointer
2098          * to HEAD. After that, we set the old pointer
2099          * to NORMAL, but only if it was HEAD before.
2100          * otherwise we are an interrupt, and only
2101          * want the outer most commit to reset it.
2102          */
2103         new_head = next_page;
2104         rb_inc_page(cpu_buffer, &new_head);
2105
2106         ret = rb_head_page_set_head(cpu_buffer, new_head, next_page,
2107                                     RB_PAGE_NORMAL);
2108
2109         /*
2110          * Valid returns are:
2111          *  HEAD   - an interrupt came in and already set it.
2112          *  NORMAL - One of two things:
2113          *            1) We really set it.
2114          *            2) A bunch of interrupts came in and moved
2115          *               the page forward again.
2116          */
2117         switch (ret) {
2118         case RB_PAGE_HEAD:
2119         case RB_PAGE_NORMAL:
2120                 /* OK */
2121                 break;
2122         default:
2123                 RB_WARN_ON(cpu_buffer, 1);
2124                 return -1;
2125         }
2126
2127         /*
2128          * It is possible that an interrupt came in,
2129          * set the head up, then more interrupts came in
2130          * and moved it again. When we get back here,
2131          * the page would have been set to NORMAL but we
2132          * just set it back to HEAD.
2133          *
2134          * How do you detect this? Well, if that happened
2135          * the tail page would have moved.
2136          */
2137         if (ret == RB_PAGE_NORMAL) {
2138                 /*
2139                  * If the tail had moved passed next, then we need
2140                  * to reset the pointer.
2141                  */
2142                 if (cpu_buffer->tail_page != tail_page &&
2143                     cpu_buffer->tail_page != next_page)
2144                         rb_head_page_set_normal(cpu_buffer, new_head,
2145                                                 next_page,
2146                                                 RB_PAGE_HEAD);
2147         }
2148
2149         /*
2150          * If this was the outer most commit (the one that
2151          * changed the original pointer from HEAD to UPDATE),
2152          * then it is up to us to reset it to NORMAL.
2153          */
2154         if (type == RB_PAGE_HEAD) {
2155                 ret = rb_head_page_set_normal(cpu_buffer, next_page,
2156                                               tail_page,
2157                                               RB_PAGE_UPDATE);
2158                 if (RB_WARN_ON(cpu_buffer,
2159                                ret != RB_PAGE_UPDATE))
2160                         return -1;
2161         }
2162
2163         return 0;
2164 }
2165
2166 static unsigned rb_calculate_event_length(unsigned length)
2167 {
2168         struct ring_buffer_event event; /* Used only for sizeof array */
2169
2170         /* zero length can cause confusions */
2171         if (!length)
2172                 length = 1;
2173
2174         if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
2175                 length += sizeof(event.array[0]);
2176
2177         length += RB_EVNT_HDR_SIZE;
2178         length = ALIGN(length, RB_ARCH_ALIGNMENT);
2179
2180         return length;
2181 }
2182
2183 static inline void
2184 rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
2185               struct buffer_page *tail_page,
2186               unsigned long tail, unsigned long length)
2187 {
2188         struct ring_buffer_event *event;
2189
2190         /*
2191          * Only the event that crossed the page boundary
2192          * must fill the old tail_page with padding.
2193          */
2194         if (tail >= BUF_PAGE_SIZE) {
2195                 /*
2196                  * If the page was filled, then we still need
2197                  * to update the real_end. Reset it to zero
2198                  * and the reader will ignore it.
2199                  */
2200                 if (tail == BUF_PAGE_SIZE)
2201                         tail_page->real_end = 0;
2202
2203                 local_sub(length, &tail_page->write);
2204                 return;
2205         }
2206
2207         event = __rb_page_index(tail_page, tail);
2208         kmemcheck_annotate_bitfield(event, bitfield);
2209
2210         /* account for padding bytes */
2211         local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes);
2212
2213         /*
2214          * Save the original length to the meta data.
2215          * This will be used by the reader to add lost event
2216          * counter.
2217          */
2218         tail_page->real_end = tail;
2219
2220         /*
2221          * If this event is bigger than the minimum size, then
2222          * we need to be careful that we don't subtract the
2223          * write counter enough to allow another writer to slip
2224          * in on this page.
2225          * We put in a discarded commit instead, to make sure
2226          * that this space is not used again.
2227          *
2228          * If we are less than the minimum size, we don't need to
2229          * worry about it.
2230          */
2231         if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) {
2232                 /* No room for any events */
2233
2234                 /* Mark the rest of the page with padding */
2235                 rb_event_set_padding(event);
2236
2237                 /* Set the write back to the previous setting */
2238                 local_sub(length, &tail_page->write);
2239                 return;
2240         }
2241
2242         /* Put in a discarded event */
2243         event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE;
2244         event->type_len = RINGBUF_TYPE_PADDING;
2245         /* time delta must be non zero */
2246         event->time_delta = 1;
2247
2248         /* Set write to end of buffer */
2249         length = (tail + length) - BUF_PAGE_SIZE;
2250         local_sub(length, &tail_page->write);
2251 }
2252
2253 /*
2254  * This is the slow path, force gcc not to inline it.
2255  */
2256 static noinline struct ring_buffer_event *
2257 rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
2258              unsigned long length, unsigned long tail,
2259              struct buffer_page *tail_page, u64 ts)
2260 {
2261         struct buffer_page *commit_page = cpu_buffer->commit_page;
2262         struct ring_buffer *buffer = cpu_buffer->buffer;
2263         struct buffer_page *next_page;
2264         int ret;
2265
2266         next_page = tail_page;
2267
2268         rb_inc_page(cpu_buffer, &next_page);
2269
2270         /*
2271          * If for some reason, we had an interrupt storm that made
2272          * it all the way around the buffer, bail, and warn
2273          * about it.
2274          */
2275         if (unlikely(next_page == commit_page)) {
2276                 local_inc(&cpu_buffer->commit_overrun);
2277                 goto out_reset;
2278         }
2279
2280         /*
2281          * This is where the fun begins!
2282          *
2283          * We are fighting against races between a reader that
2284          * could be on another CPU trying to swap its reader
2285          * page with the buffer head.
2286          *
2287          * We are also fighting against interrupts coming in and
2288          * moving the head or tail on us as well.
2289          *
2290          * If the next page is the head page then we have filled
2291          * the buffer, unless the commit page is still on the
2292          * reader page.
2293          */
2294         if (rb_is_head_page(cpu_buffer, next_page, &tail_page->list)) {
2295
2296                 /*
2297                  * If the commit is not on the reader page, then
2298                  * move the header page.
2299                  */
2300                 if (!rb_is_reader_page(cpu_buffer->commit_page)) {
2301                         /*
2302                          * If we are not in overwrite mode,
2303                          * this is easy, just stop here.
2304                          */
2305                         if (!(buffer->flags & RB_FL_OVERWRITE)) {
2306                                 local_inc(&cpu_buffer->dropped_events);
2307                                 goto out_reset;
2308                         }
2309
2310                         ret = rb_handle_head_page(cpu_buffer,
2311                                                   tail_page,
2312                                                   next_page);
2313                         if (ret < 0)
2314                                 goto out_reset;
2315                         if (ret)
2316                                 goto out_again;
2317                 } else {
2318                         /*
2319                          * We need to be careful here too. The
2320                          * commit page could still be on the reader
2321                          * page. We could have a small buffer, and
2322                          * have filled up the buffer with events
2323                          * from interrupts and such, and wrapped.
2324                          *
2325                          * Note, if the tail page is also the on the
2326                          * reader_page, we let it move out.
2327                          */
2328                         if (unlikely((cpu_buffer->commit_page !=
2329                                       cpu_buffer->tail_page) &&
2330                                      (cpu_buffer->commit_page ==
2331                                       cpu_buffer->reader_page))) {
2332                                 local_inc(&cpu_buffer->commit_overrun);
2333                                 goto out_reset;
2334                         }
2335                 }
2336         }
2337
2338         ret = rb_tail_page_update(cpu_buffer, tail_page, next_page);
2339         if (ret) {
2340                 /*
2341                  * Nested commits always have zero deltas, so
2342                  * just reread the time stamp
2343                  */
2344                 ts = rb_time_stamp(buffer);
2345                 next_page->page->time_stamp = ts;
2346         }
2347
2348  out_again:
2349
2350         rb_reset_tail(cpu_buffer, tail_page, tail, length);
2351
2352         /* fail and let the caller try again */
2353         return ERR_PTR(-EAGAIN);
2354
2355  out_reset:
2356         /* reset write */
2357         rb_reset_tail(cpu_buffer, tail_page, tail, length);
2358
2359         return NULL;
2360 }
2361
2362 static struct ring_buffer_event *
2363 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
2364                   unsigned long length, u64 ts,
2365                   u64 delta, int add_timestamp)
2366 {
2367         struct buffer_page *tail_page;
2368         struct ring_buffer_event *event;
2369         unsigned long tail, write;
2370
2371         /*
2372          * If the time delta since the last event is too big to
2373          * hold in the time field of the event, then we append a
2374          * TIME EXTEND event ahead of the data event.
2375          */
2376         if (unlikely(add_timestamp))
2377                 length += RB_LEN_TIME_EXTEND;
2378
2379         tail_page = cpu_buffer->tail_page;
2380         write = local_add_return(length, &tail_page->write);
2381
2382         /* set write to only the index of the write */
2383         write &= RB_WRITE_MASK;
2384         tail = write - length;
2385
2386         /*
2387          * If this is the first commit on the page, then it has the same
2388          * timestamp as the page itself.
2389          */
2390         if (!tail)
2391                 delta = 0;
2392
2393         /* See if we shot pass the end of this buffer page */
2394         if (unlikely(write > BUF_PAGE_SIZE))
2395                 return rb_move_tail(cpu_buffer, length, tail,
2396                                     tail_page, ts);
2397
2398         /* We reserved something on the buffer */
2399
2400         event = __rb_page_index(tail_page, tail);
2401         kmemcheck_annotate_bitfield(event, bitfield);
2402         rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
2403
2404         local_inc(&tail_page->entries);
2405
2406         /*
2407          * If this is the first commit on the page, then update
2408          * its timestamp.
2409          */
2410         if (!tail)
2411                 tail_page->page->time_stamp = ts;
2412
2413         /* account for these added bytes */
2414         local_add(length, &cpu_buffer->entries_bytes);
2415
2416         return event;
2417 }
2418
2419 static inline int
2420 rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
2421                   struct ring_buffer_event *event)
2422 {
2423         unsigned long new_index, old_index;
2424         struct buffer_page *bpage;
2425         unsigned long index;
2426         unsigned long addr;
2427
2428         new_index = rb_event_index(event);
2429         old_index = new_index + rb_event_ts_length(event);
2430         addr = (unsigned long)event;
2431         addr &= PAGE_MASK;
2432
2433         bpage = cpu_buffer->tail_page;
2434
2435         if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
2436                 unsigned long write_mask =
2437                         local_read(&bpage->write) & ~RB_WRITE_MASK;
2438                 unsigned long event_length = rb_event_length(event);
2439                 /*
2440                  * This is on the tail page. It is possible that
2441                  * a write could come in and move the tail page
2442                  * and write to the next page. That is fine
2443                  * because we just shorten what is on this page.
2444                  */
2445                 old_index += write_mask;
2446                 new_index += write_mask;
2447                 index = local_cmpxchg(&bpage->write, old_index, new_index);
2448                 if (index == old_index) {
2449                         /* update counters */
2450                         local_sub(event_length, &cpu_buffer->entries_bytes);
2451                         return 1;
2452                 }
2453         }
2454
2455         /* could not discard */
2456         return 0;
2457 }
2458
2459 static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
2460 {
2461         local_inc(&cpu_buffer->committing);
2462         local_inc(&cpu_buffer->commits);
2463 }
2464
2465 static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
2466 {
2467         unsigned long commits;
2468
2469         if (RB_WARN_ON(cpu_buffer,
2470                        !local_read(&cpu_buffer->committing)))
2471                 return;
2472
2473  again:
2474         commits = local_read(&cpu_buffer->commits);
2475         /* synchronize with interrupts */
2476         barrier();
2477         if (local_read(&cpu_buffer->committing) == 1)
2478                 rb_set_commit_to_write(cpu_buffer);
2479
2480         local_dec(&cpu_buffer->committing);
2481
2482         /* synchronize with interrupts */
2483         barrier();
2484
2485         /*
2486          * Need to account for interrupts coming in between the
2487          * updating of the commit page and the clearing of the
2488          * committing counter.
2489          */
2490         if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
2491             !local_read(&cpu_buffer->committing)) {
2492                 local_inc(&cpu_buffer->committing);
2493                 goto again;
2494         }
2495 }
2496
2497 static struct ring_buffer_event *
2498 rb_reserve_next_event(struct ring_buffer *buffer,
2499                       struct ring_buffer_per_cpu *cpu_buffer,
2500                       unsigned long length)
2501 {
2502         struct ring_buffer_event *event;
2503         u64 ts, delta;
2504         int nr_loops = 0;
2505         int add_timestamp;
2506         u64 diff;
2507
2508         rb_start_commit(cpu_buffer);
2509
2510 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2511         /*
2512          * Due to the ability to swap a cpu buffer from a buffer
2513          * it is possible it was swapped before we committed.
2514          * (committing stops a swap). We check for it here and
2515          * if it happened, we have to fail the write.
2516          */
2517         barrier();
2518         if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) {
2519                 local_dec(&cpu_buffer->committing);
2520                 local_dec(&cpu_buffer->commits);
2521                 return NULL;
2522         }
2523 #endif
2524
2525         length = rb_calculate_event_length(length);
2526  again:
2527         add_timestamp = 0;
2528         delta = 0;
2529
2530         /*
2531          * We allow for interrupts to reenter here and do a trace.
2532          * If one does, it will cause this original code to loop
2533          * back here. Even with heavy interrupts happening, this
2534          * should only happen a few times in a row. If this happens
2535          * 1000 times in a row, there must be either an interrupt
2536          * storm or we have something buggy.
2537          * Bail!
2538          */
2539         if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
2540                 goto out_fail;
2541
2542         ts = rb_time_stamp(cpu_buffer->buffer);
2543         diff = ts - cpu_buffer->write_stamp;
2544
2545         /* make sure this diff is calculated here */
2546         barrier();
2547
2548         /* Did the write stamp get updated already? */
2549         if (likely(ts >= cpu_buffer->write_stamp)) {
2550                 delta = diff;
2551                 if (unlikely(test_time_stamp(delta))) {
2552                         int local_clock_stable = 1;
2553 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
2554                         local_clock_stable = sched_clock_stable();
2555 #endif
2556                         WARN_ONCE(delta > (1ULL << 59),
2557                                   KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n%s",
2558                                   (unsigned long long)delta,
2559                                   (unsigned long long)ts,
2560                                   (unsigned long long)cpu_buffer->write_stamp,
2561                                   local_clock_stable ? "" :
2562                                   "If you just came from a suspend/resume,\n"
2563                                   "please switch to the trace global clock:\n"
2564                                   "  echo global > /sys/kernel/debug/tracing/trace_clock\n");
2565                         add_timestamp = 1;
2566                 }
2567         }
2568
2569         event = __rb_reserve_next(cpu_buffer, length, ts,
2570                                   delta, add_timestamp);
2571         if (unlikely(PTR_ERR(event) == -EAGAIN))
2572                 goto again;
2573
2574         if (!event)
2575                 goto out_fail;
2576
2577         return event;
2578
2579  out_fail:
2580         rb_end_commit(cpu_buffer);
2581         return NULL;
2582 }
2583
2584 #ifdef CONFIG_TRACING
2585
2586 /*
2587  * The lock and unlock are done within a preempt disable section.
2588  * The current_context per_cpu variable can only be modified
2589  * by the current task between lock and unlock. But it can
2590  * be modified more than once via an interrupt. To pass this
2591  * information from the lock to the unlock without having to
2592  * access the 'in_interrupt()' functions again (which do show
2593  * a bit of overhead in something as critical as function tracing,
2594  * we use a bitmask trick.
2595  *
2596  *  bit 0 =  NMI context
2597  *  bit 1 =  IRQ context
2598  *  bit 2 =  SoftIRQ context
2599  *  bit 3 =  normal context.
2600  *
2601  * This works because this is the order of contexts that can
2602  * preempt other contexts. A SoftIRQ never preempts an IRQ
2603  * context.
2604  *
2605  * When the context is determined, the corresponding bit is
2606  * checked and set (if it was set, then a recursion of that context
2607  * happened).
2608  *
2609  * On unlock, we need to clear this bit. To do so, just subtract
2610  * 1 from the current_context and AND it to itself.
2611  *
2612  * (binary)
2613  *  101 - 1 = 100
2614  *  101 & 100 = 100 (clearing bit zero)
2615  *
2616  *  1010 - 1 = 1001
2617  *  1010 & 1001 = 1000 (clearing bit 1)
2618  *
2619  * The least significant bit can be cleared this way, and it
2620  * just so happens that it is the same bit corresponding to
2621  * the current context.
2622  */
2623 static DEFINE_PER_CPU(unsigned int, current_context);
2624
2625 static __always_inline int trace_recursive_lock(void)
2626 {
2627         unsigned int val = this_cpu_read(current_context);
2628         int bit;
2629
2630         if (in_interrupt()) {
2631                 if (in_nmi())
2632                         bit = 0;
2633                 else if (in_irq())
2634                         bit = 1;
2635                 else
2636                         bit = 2;
2637         } else
2638                 bit = 3;
2639
2640         if (unlikely(val & (1 << bit)))
2641                 return 1;
2642
2643         val |= (1 << bit);
2644         this_cpu_write(current_context, val);
2645
2646         return 0;
2647 }
2648
2649 static __always_inline void trace_recursive_unlock(void)
2650 {
2651         unsigned int val = this_cpu_read(current_context);
2652
2653         val--;
2654         val &= this_cpu_read(current_context);
2655         this_cpu_write(current_context, val);
2656 }
2657
2658 #else
2659
2660 #define trace_recursive_lock()          (0)
2661 #define trace_recursive_unlock()        do { } while (0)
2662
2663 #endif
2664
2665 /**
2666  * ring_buffer_lock_reserve - reserve a part of the buffer
2667  * @buffer: the ring buffer to reserve from
2668  * @length: the length of the data to reserve (excluding event header)
2669  *
2670  * Returns a reseverd event on the ring buffer to copy directly to.
2671  * The user of this interface will need to get the body to write into
2672  * and can use the ring_buffer_event_data() interface.
2673  *
2674  * The length is the length of the data needed, not the event length
2675  * which also includes the event header.
2676  *
2677  * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
2678  * If NULL is returned, then nothing has been allocated or locked.
2679  */
2680 struct ring_buffer_event *
2681 ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
2682 {
2683         struct ring_buffer_per_cpu *cpu_buffer;
2684         struct ring_buffer_event *event;
2685         int cpu;
2686
2687         if (ring_buffer_flags != RB_BUFFERS_ON)
2688                 return NULL;
2689
2690         /* If we are tracing schedule, we don't want to recurse */
2691         preempt_disable_notrace();
2692
2693         if (atomic_read(&buffer->record_disabled))
2694                 goto out_nocheck;
2695
2696         if (trace_recursive_lock())
2697                 goto out_nocheck;
2698
2699         cpu = raw_smp_processor_id();
2700
2701         if (!cpumask_test_cpu(cpu, buffer->cpumask))
2702                 goto out;
2703
2704         cpu_buffer = buffer->buffers[cpu];
2705
2706         if (atomic_read(&cpu_buffer->record_disabled))
2707                 goto out;
2708
2709         if (length > BUF_MAX_DATA_SIZE)
2710                 goto out;
2711
2712         event = rb_reserve_next_event(buffer, cpu_buffer, length);
2713         if (!event)
2714                 goto out;
2715
2716         return event;
2717
2718  out:
2719         trace_recursive_unlock();
2720
2721  out_nocheck:
2722         preempt_enable_notrace();
2723         return NULL;
2724 }
2725 EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
2726
2727 static void
2728 rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer,
2729                       struct ring_buffer_event *event)
2730 {
2731         u64 delta;
2732
2733         /*
2734          * The event first in the commit queue updates the
2735          * time stamp.
2736          */
2737         if (rb_event_is_commit(cpu_buffer, event)) {
2738                 /*
2739                  * A commit event that is first on a page
2740                  * updates the write timestamp with the page stamp
2741                  */
2742                 if (!rb_event_index(event))
2743                         cpu_buffer->write_stamp =
2744                                 cpu_buffer->commit_page->page->time_stamp;
2745                 else if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
2746                         delta = event->array[0];
2747                         delta <<= TS_SHIFT;
2748                         delta += event->time_delta;
2749                         cpu_buffer->write_stamp += delta;
2750                 } else
2751                         cpu_buffer->write_stamp += event->time_delta;
2752         }
2753 }
2754
2755 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
2756                       struct ring_buffer_event *event)
2757 {
2758         local_inc(&cpu_buffer->entries);
2759         rb_update_write_stamp(cpu_buffer, event);
2760         rb_end_commit(cpu_buffer);
2761 }
2762
2763 static __always_inline void
2764 rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
2765 {
2766         if (buffer->irq_work.waiters_pending) {
2767                 buffer->irq_work.waiters_pending = false;
2768                 /* irq_work_queue() supplies it's own memory barriers */
2769                 irq_work_queue(&buffer->irq_work.work);
2770         }
2771
2772         if (cpu_buffer->irq_work.waiters_pending) {
2773                 cpu_buffer->irq_work.waiters_pending = false;
2774                 /* irq_work_queue() supplies it's own memory barriers */
2775                 irq_work_queue(&cpu_buffer->irq_work.work);
2776         }
2777 }
2778
2779 /**
2780  * ring_buffer_unlock_commit - commit a reserved
2781  * @buffer: The buffer to commit to
2782  * @event: The event pointer to commit.
2783  *
2784  * This commits the data to the ring buffer, and releases any locks held.
2785  *
2786  * Must be paired with ring_buffer_lock_reserve.
2787  */
2788 int ring_buffer_unlock_commit(struct ring_buffer *buffer,
2789                               struct ring_buffer_event *event)
2790 {
2791         struct ring_buffer_per_cpu *cpu_buffer;
2792         int cpu = raw_smp_processor_id();
2793
2794         cpu_buffer = buffer->buffers[cpu];
2795
2796         rb_commit(cpu_buffer, event);
2797
2798         rb_wakeups(buffer, cpu_buffer);
2799
2800         trace_recursive_unlock();
2801
2802         preempt_enable_notrace();
2803
2804         return 0;
2805 }
2806 EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
2807
2808 static inline void rb_event_discard(struct ring_buffer_event *event)
2809 {
2810         if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
2811                 event = skip_time_extend(event);
2812
2813         /* array[0] holds the actual length for the discarded event */
2814         event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
2815         event->type_len = RINGBUF_TYPE_PADDING;
2816         /* time delta must be non zero */
2817         if (!event->time_delta)
2818                 event->time_delta = 1;
2819 }
2820
2821 /*
2822  * Decrement the entries to the page that an event is on.
2823  * The event does not even need to exist, only the pointer
2824  * to the page it is on. This may only be called before the commit
2825  * takes place.
2826  */
2827 static inline void
2828 rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
2829                    struct ring_buffer_event *event)
2830 {
2831         unsigned long addr = (unsigned long)event;
2832         struct buffer_page *bpage = cpu_buffer->commit_page;
2833         struct buffer_page *start;
2834
2835         addr &= PAGE_MASK;
2836
2837         /* Do the likely case first */
2838         if (likely(bpage->page == (void *)addr)) {
2839                 local_dec(&bpage->entries);
2840                 return;
2841         }
2842
2843         /*
2844          * Because the commit page may be on the reader page we
2845          * start with the next page and check the end loop there.
2846          */
2847         rb_inc_page(cpu_buffer, &bpage);
2848         start = bpage;
2849         do {
2850                 if (bpage->page == (void *)addr) {
2851                         local_dec(&bpage->entries);
2852                         return;
2853                 }
2854                 rb_inc_page(cpu_buffer, &bpage);
2855         } while (bpage != start);
2856
2857         /* commit not part of this buffer?? */
2858         RB_WARN_ON(cpu_buffer, 1);
2859 }
2860
2861 /**
2862  * ring_buffer_commit_discard - discard an event that has not been committed
2863  * @buffer: the ring buffer
2864  * @event: non committed event to discard
2865  *
2866  * Sometimes an event that is in the ring buffer needs to be ignored.
2867  * This function lets the user discard an event in the ring buffer
2868  * and then that event will not be read later.
2869  *
2870  * This function only works if it is called before the the item has been
2871  * committed. It will try to free the event from the ring buffer
2872  * if another event has not been added behind it.
2873  *
2874  * If another event has been added behind it, it will set the event
2875  * up as discarded, and perform the commit.
2876  *
2877  * If this function is called, do not call ring_buffer_unlock_commit on
2878  * the event.
2879  */
2880 void ring_buffer_discard_commit(struct ring_buffer *buffer,
2881                                 struct ring_buffer_event *event)
2882 {
2883         struct ring_buffer_per_cpu *cpu_buffer;
2884         int cpu;
2885
2886         /* The event is discarded regardless */
2887         rb_event_discard(event);
2888
2889         cpu = smp_processor_id();
2890         cpu_buffer = buffer->buffers[cpu];
2891
2892         /*
2893          * This must only be called if the event has not been
2894          * committed yet. Thus we can assume that preemption
2895          * is still disabled.
2896          */
2897         RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
2898
2899         rb_decrement_entry(cpu_buffer, event);
2900         if (rb_try_to_discard(cpu_buffer, event))
2901                 goto out;
2902
2903         /*
2904          * The commit is still visible by the reader, so we
2905          * must still update the timestamp.
2906          */
2907         rb_update_write_stamp(cpu_buffer, event);
2908  out:
2909         rb_end_commit(cpu_buffer);
2910
2911         trace_recursive_unlock();
2912
2913         preempt_enable_notrace();
2914
2915 }
2916 EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
2917
2918 /**
2919  * ring_buffer_write - write data to the buffer without reserving
2920  * @buffer: The ring buffer to write to.
2921  * @length: The length of the data being written (excluding the event header)
2922  * @data: The data to write to the buffer.
2923  *
2924  * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
2925  * one function. If you already have the data to write to the buffer, it
2926  * may be easier to simply call this function.
2927  *
2928  * Note, like ring_buffer_lock_reserve, the length is the length of the data
2929  * and not the length of the event which would hold the header.
2930  */
2931 int ring_buffer_write(struct ring_buffer *buffer,
2932                       unsigned long length,
2933                       void *data)
2934 {
2935         struct ring_buffer_per_cpu *cpu_buffer;
2936         struct ring_buffer_event *event;
2937         void *body;
2938         int ret = -EBUSY;
2939         int cpu;
2940
2941         if (ring_buffer_flags != RB_BUFFERS_ON)
2942                 return -EBUSY;
2943
2944         preempt_disable_notrace();
2945
2946         if (atomic_read(&buffer->record_disabled))
2947                 goto out;
2948
2949         cpu = raw_smp_processor_id();
2950
2951         if (!cpumask_test_cpu(cpu, buffer->cpumask))
2952                 goto out;
2953
2954         cpu_buffer = buffer->buffers[cpu];
2955
2956         if (atomic_read(&cpu_buffer->record_disabled))
2957                 goto out;
2958
2959         if (length > BUF_MAX_DATA_SIZE)
2960                 goto out;
2961
2962         event = rb_reserve_next_event(buffer, cpu_buffer, length);
2963         if (!event)
2964                 goto out;
2965
2966         body = rb_event_data(event);
2967
2968         memcpy(body, data, length);
2969
2970         rb_commit(cpu_buffer, event);
2971
2972         rb_wakeups(buffer, cpu_buffer);
2973
2974         ret = 0;
2975  out:
2976         preempt_enable_notrace();
2977
2978         return ret;
2979 }
2980 EXPORT_SYMBOL_GPL(ring_buffer_write);
2981
2982 static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
2983 {
2984         struct buffer_page *reader = cpu_buffer->reader_page;
2985         struct buffer_page *head = rb_set_head_page(cpu_buffer);
2986         struct buffer_page *commit = cpu_buffer->commit_page;
2987
2988         /* In case of error, head will be NULL */
2989         if (unlikely(!head))
2990                 return 1;
2991
2992         return reader->read == rb_page_commit(reader) &&
2993                 (commit == reader ||
2994                  (commit == head &&
2995                   head->read == rb_page_commit(commit)));
2996 }
2997
2998 /**
2999  * ring_buffer_record_disable - stop all writes into the buffer
3000  * @buffer: The ring buffer to stop writes to.
3001  *
3002  * This prevents all writes to the buffer. Any attempt to write
3003  * to the buffer after this will fail and return NULL.
3004  *
3005  * The caller should call synchronize_sched() after this.
3006  */
3007 void ring_buffer_record_disable(struct ring_buffer *buffer)
3008 {
3009         atomic_inc(&buffer->record_disabled);
3010 }
3011 EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
3012
3013 /**
3014  * ring_buffer_record_enable - enable writes to the buffer
3015  * @buffer: The ring buffer to enable writes
3016  *
3017  * Note, multiple disables will need the same number of enables
3018  * to truly enable the writing (much like preempt_disable).
3019  */
3020 void ring_buffer_record_enable(struct ring_buffer *buffer)
3021 {
3022         atomic_dec(&buffer->record_disabled);
3023 }
3024 EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
3025
3026 /**
3027  * ring_buffer_record_off - stop all writes into the buffer
3028  * @buffer: The ring buffer to stop writes to.
3029  *
3030  * This prevents all writes to the buffer. Any attempt to write
3031  * to the buffer after this will fail and return NULL.
3032  *
3033  * This is different than ring_buffer_record_disable() as
3034  * it works like an on/off switch, where as the disable() version
3035  * must be paired with a enable().
3036  */
3037 void ring_buffer_record_off(struct ring_buffer *buffer)
3038 {
3039         unsigned int rd;
3040         unsigned int new_rd;
3041
3042         do {
3043                 rd = atomic_read(&buffer->record_disabled);
3044                 new_rd = rd | RB_BUFFER_OFF;
3045         } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
3046 }
3047 EXPORT_SYMBOL_GPL(ring_buffer_record_off);
3048
3049 /**
3050  * ring_buffer_record_on - restart writes into the buffer
3051  * @buffer: The ring buffer to start writes to.
3052  *
3053  * This enables all writes to the buffer that was disabled by
3054  * ring_buffer_record_off().
3055  *
3056  * This is different than ring_buffer_record_enable() as
3057  * it works like an on/off switch, where as the enable() version
3058  * must be paired with a disable().
3059  */
3060 void ring_buffer_record_on(struct ring_buffer *buffer)
3061 {
3062         unsigned int rd;
3063         unsigned int new_rd;
3064
3065         do {
3066                 rd = atomic_read(&buffer->record_disabled);
3067                 new_rd = rd & ~RB_BUFFER_OFF;
3068         } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
3069 }
3070 EXPORT_SYMBOL_GPL(ring_buffer_record_on);
3071
3072 /**
3073  * ring_buffer_record_is_on - return true if the ring buffer can write
3074  * @buffer: The ring buffer to see if write is enabled
3075  *
3076  * Returns true if the ring buffer is in a state that it accepts writes.
3077  */
3078 int ring_buffer_record_is_on(struct ring_buffer *buffer)
3079 {
3080         return !atomic_read(&buffer->record_disabled);
3081 }
3082
3083 /**
3084  * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
3085  * @buffer: The ring buffer to stop writes to.
3086  * @cpu: The CPU buffer to stop
3087  *
3088  * This prevents all writes to the buffer. Any attempt to write
3089  * to the buffer after this will fail and return NULL.
3090  *
3091  * The caller should call synchronize_sched() after this.
3092  */
3093 void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
3094 {
3095         struct ring_buffer_per_cpu *cpu_buffer;
3096
3097         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3098                 return;
3099
3100         cpu_buffer = buffer->buffers[cpu];
3101         atomic_inc(&cpu_buffer->record_disabled);
3102 }
3103 EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
3104
3105 /**
3106  * ring_buffer_record_enable_cpu - enable writes to the buffer
3107  * @buffer: The ring buffer to enable writes
3108  * @cpu: The CPU to enable.
3109  *
3110  * Note, multiple disables will need the same number of enables
3111  * to truly enable the writing (much like preempt_disable).
3112  */
3113 void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
3114 {
3115         struct ring_buffer_per_cpu *cpu_buffer;
3116
3117         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3118                 return;
3119
3120         cpu_buffer = buffer->buffers[cpu];
3121         atomic_dec(&cpu_buffer->record_disabled);
3122 }
3123 EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
3124
3125 /*
3126  * The total entries in the ring buffer is the running counter
3127  * of entries entered into the ring buffer, minus the sum of
3128  * the entries read from the ring buffer and the number of
3129  * entries that were overwritten.
3130  */
3131 static inline unsigned long
3132 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
3133 {
3134         return local_read(&cpu_buffer->entries) -
3135                 (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
3136 }
3137
3138 /**
3139  * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer
3140  * @buffer: The ring buffer
3141  * @cpu: The per CPU buffer to read from.
3142  */
3143 u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
3144 {
3145         unsigned long flags;
3146         struct ring_buffer_per_cpu *cpu_buffer;
3147         struct buffer_page *bpage;
3148         u64 ret = 0;
3149
3150         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3151                 return 0;
3152
3153         cpu_buffer = buffer->buffers[cpu];
3154         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3155         /*
3156          * if the tail is on reader_page, oldest time stamp is on the reader
3157          * page
3158          */
3159         if (cpu_buffer->tail_page == cpu_buffer->reader_page)
3160                 bpage = cpu_buffer->reader_page;
3161         else
3162                 bpage = rb_set_head_page(cpu_buffer);
3163         if (bpage)
3164                 ret = bpage->page->time_stamp;
3165         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3166
3167         return ret;
3168 }
3169 EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts);
3170
3171 /**
3172  * ring_buffer_bytes_cpu - get the number of bytes consumed in a cpu buffer
3173  * @buffer: The ring buffer
3174  * @cpu: The per CPU buffer to read from.
3175  */
3176 unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu)
3177 {
3178         struct ring_buffer_per_cpu *cpu_buffer;
3179         unsigned long ret;
3180
3181         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3182                 return 0;
3183
3184         cpu_buffer = buffer->buffers[cpu];
3185         ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes;
3186
3187         return ret;
3188 }
3189 EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu);
3190
3191 /**
3192  * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
3193  * @buffer: The ring buffer
3194  * @cpu: The per CPU buffer to get the entries from.
3195  */
3196 unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
3197 {
3198         struct ring_buffer_per_cpu *cpu_buffer;
3199
3200         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3201                 return 0;
3202
3203         cpu_buffer = buffer->buffers[cpu];
3204
3205         return rb_num_of_entries(cpu_buffer);
3206 }
3207 EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
3208
3209 /**
3210  * ring_buffer_overrun_cpu - get the number of overruns caused by the ring
3211  * buffer wrapping around (only if RB_FL_OVERWRITE is on).
3212  * @buffer: The ring buffer
3213  * @cpu: The per CPU buffer to get the number of overruns from
3214  */
3215 unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
3216 {
3217         struct ring_buffer_per_cpu *cpu_buffer;
3218         unsigned long ret;
3219
3220         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3221                 return 0;
3222
3223         cpu_buffer = buffer->buffers[cpu];
3224         ret = local_read(&cpu_buffer->overrun);
3225
3226         return ret;
3227 }
3228 EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
3229
3230 /**
3231  * ring_buffer_commit_overrun_cpu - get the number of overruns caused by
3232  * commits failing due to the buffer wrapping around while there are uncommitted
3233  * events, such as during an interrupt storm.
3234  * @buffer: The ring buffer
3235  * @cpu: The per CPU buffer to get the number of overruns from
3236  */
3237 unsigned long
3238 ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
3239 {
3240         struct ring_buffer_per_cpu *cpu_buffer;
3241         unsigned long ret;
3242
3243         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3244                 return 0;
3245
3246         cpu_buffer = buffer->buffers[cpu];
3247         ret = local_read(&cpu_buffer->commit_overrun);
3248
3249         return ret;
3250 }
3251 EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
3252
3253 /**
3254  * ring_buffer_dropped_events_cpu - get the number of dropped events caused by
3255  * the ring buffer filling up (only if RB_FL_OVERWRITE is off).
3256  * @buffer: The ring buffer
3257  * @cpu: The per CPU buffer to get the number of overruns from
3258  */
3259 unsigned long
3260 ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu)
3261 {
3262         struct ring_buffer_per_cpu *cpu_buffer;
3263         unsigned long ret;
3264
3265         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3266                 return 0;
3267
3268         cpu_buffer = buffer->buffers[cpu];
3269         ret = local_read(&cpu_buffer->dropped_events);
3270
3271         return ret;
3272 }
3273 EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu);
3274
3275 /**
3276  * ring_buffer_read_events_cpu - get the number of events successfully read
3277  * @buffer: The ring buffer
3278  * @cpu: The per CPU buffer to get the number of events read
3279  */
3280 unsigned long
3281 ring_buffer_read_events_cpu(struct ring_buffer *buffer, int cpu)
3282 {
3283         struct ring_buffer_per_cpu *cpu_buffer;
3284
3285         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3286                 return 0;
3287
3288         cpu_buffer = buffer->buffers[cpu];
3289         return cpu_buffer->read;
3290 }
3291 EXPORT_SYMBOL_GPL(ring_buffer_read_events_cpu);
3292
3293 /**
3294  * ring_buffer_entries - get the number of entries in a buffer
3295  * @buffer: The ring buffer
3296  *
3297  * Returns the total number of entries in the ring buffer
3298  * (all CPU entries)
3299  */
3300 unsigned long ring_buffer_entries(struct ring_buffer *buffer)
3301 {
3302         struct ring_buffer_per_cpu *cpu_buffer;
3303         unsigned long entries = 0;
3304         int cpu;
3305
3306         /* if you care about this being correct, lock the buffer */
3307         for_each_buffer_cpu(buffer, cpu) {
3308                 cpu_buffer = buffer->buffers[cpu];
3309                 entries += rb_num_of_entries(cpu_buffer);
3310         }
3311
3312         return entries;
3313 }
3314 EXPORT_SYMBOL_GPL(ring_buffer_entries);
3315
3316 /**
3317  * ring_buffer_overruns - get the number of overruns in buffer
3318  * @buffer: The ring buffer
3319  *
3320  * Returns the total number of overruns in the ring buffer
3321  * (all CPU entries)
3322  */
3323 unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
3324 {
3325         struct ring_buffer_per_cpu *cpu_buffer;
3326         unsigned long overruns = 0;
3327         int cpu;
3328
3329         /* if you care about this being correct, lock the buffer */
3330         for_each_buffer_cpu(buffer, cpu) {
3331                 cpu_buffer = buffer->buffers[cpu];
3332                 overruns += local_read(&cpu_buffer->overrun);
3333         }
3334
3335         return overruns;
3336 }
3337 EXPORT_SYMBOL_GPL(ring_buffer_overruns);
3338
3339 static void rb_iter_reset(struct ring_buffer_iter *iter)
3340 {
3341         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3342
3343         /* Iterator usage is expected to have record disabled */
3344         iter->head_page = cpu_buffer->reader_page;
3345         iter->head = cpu_buffer->reader_page->read;
3346
3347         iter->cache_reader_page = iter->head_page;
3348         iter->cache_read = iter->head;
3349
3350         if (iter->head)
3351                 iter->read_stamp = cpu_buffer->read_stamp;
3352         else
3353                 iter->read_stamp = iter->head_page->page->time_stamp;
3354 }
3355
3356 /**
3357  * ring_buffer_iter_reset - reset an iterator
3358  * @iter: The iterator to reset
3359  *
3360  * Resets the iterator, so that it will start from the beginning
3361  * again.
3362  */
3363 void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
3364 {
3365         struct ring_buffer_per_cpu *cpu_buffer;
3366         unsigned long flags;
3367
3368         if (!iter)
3369                 return;
3370
3371         cpu_buffer = iter->cpu_buffer;
3372
3373         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3374         rb_iter_reset(iter);
3375         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3376 }
3377 EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
3378
3379 /**
3380  * ring_buffer_iter_empty - check if an iterator has no more to read
3381  * @iter: The iterator to check
3382  */
3383 int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
3384 {
3385         struct ring_buffer_per_cpu *cpu_buffer;
3386
3387         cpu_buffer = iter->cpu_buffer;
3388
3389         return iter->head_page == cpu_buffer->commit_page &&
3390                 iter->head == rb_commit_index(cpu_buffer);
3391 }
3392 EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
3393
3394 static void
3395 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
3396                      struct ring_buffer_event *event)
3397 {
3398         u64 delta;
3399
3400         switch (event->type_len) {
3401         case RINGBUF_TYPE_PADDING:
3402                 return;
3403
3404         case RINGBUF_TYPE_TIME_EXTEND:
3405                 delta = event->array[0];
3406                 delta <<= TS_SHIFT;
3407                 delta += event->time_delta;
3408                 cpu_buffer->read_stamp += delta;
3409                 return;
3410
3411         case RINGBUF_TYPE_TIME_STAMP:
3412                 /* FIXME: not implemented */
3413                 return;
3414
3415         case RINGBUF_TYPE_DATA:
3416                 cpu_buffer->read_stamp += event->time_delta;
3417                 return;
3418
3419         default:
3420                 BUG();
3421         }
3422         return;
3423 }
3424
3425 static void
3426 rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
3427                           struct ring_buffer_event *event)
3428 {
3429         u64 delta;
3430
3431         switch (event->type_len) {
3432         case RINGBUF_TYPE_PADDING:
3433                 return;
3434
3435         case RINGBUF_TYPE_TIME_EXTEND:
3436                 delta = event->array[0];
3437                 delta <<= TS_SHIFT;
3438                 delta += event->time_delta;
3439                 iter->read_stamp += delta;
3440                 return;
3441
3442         case RINGBUF_TYPE_TIME_STAMP:
3443                 /* FIXME: not implemented */
3444                 return;
3445
3446         case RINGBUF_TYPE_DATA:
3447                 iter->read_stamp += event->time_delta;
3448                 return;
3449
3450         default:
3451                 BUG();
3452         }
3453         return;
3454 }
3455
3456 static struct buffer_page *
3457 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
3458 {
3459         struct buffer_page *reader = NULL;
3460         unsigned long overwrite;
3461         unsigned long flags;
3462         int nr_loops = 0;
3463         int ret;
3464
3465         local_irq_save(flags);
3466         arch_spin_lock(&cpu_buffer->lock);
3467
3468  again:
3469         /*
3470          * This should normally only loop twice. But because the
3471          * start of the reader inserts an empty page, it causes
3472          * a case where we will loop three times. There should be no
3473          * reason to loop four times (that I know of).
3474          */
3475         if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
3476                 reader = NULL;
3477                 goto out;
3478         }
3479
3480         reader = cpu_buffer->reader_page;
3481
3482         /* If there's more to read, return this page */
3483         if (cpu_buffer->reader_page->read < rb_page_size(reader))
3484                 goto out;
3485
3486         /* Never should we have an index greater than the size */
3487         if (RB_WARN_ON(cpu_buffer,
3488                        cpu_buffer->reader_page->read > rb_page_size(reader)))
3489                 goto out;
3490
3491         /* check if we caught up to the tail */
3492         reader = NULL;
3493         if (cpu_buffer->commit_page == cpu_buffer->reader_page)
3494                 goto out;
3495
3496         /* Don't bother swapping if the ring buffer is empty */
3497         if (rb_num_of_entries(cpu_buffer) == 0)
3498                 goto out;
3499
3500         /*
3501          * Reset the reader page to size zero.
3502          */
3503         local_set(&cpu_buffer->reader_page->write, 0);
3504         local_set(&cpu_buffer->reader_page->entries, 0);
3505         local_set(&cpu_buffer->reader_page->page->commit, 0);
3506         cpu_buffer->reader_page->real_end = 0;
3507
3508  spin:
3509         /*
3510          * Splice the empty reader page into the list around the head.
3511          */
3512         reader = rb_set_head_page(cpu_buffer);
3513         if (!reader)
3514                 goto out;
3515         cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
3516         cpu_buffer->reader_page->list.prev = reader->list.prev;
3517
3518         /*
3519          * cpu_buffer->pages just needs to point to the buffer, it
3520          *  has no specific buffer page to point to. Lets move it out
3521          *  of our way so we don't accidentally swap it.
3522          */
3523         cpu_buffer->pages = reader->list.prev;
3524
3525         /* The reader page will be pointing to the new head */
3526         rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list);
3527
3528         /*
3529          * We want to make sure we read the overruns after we set up our
3530          * pointers to the next object. The writer side does a
3531          * cmpxchg to cross pages which acts as the mb on the writer
3532          * side. Note, the reader will constantly fail the swap
3533          * while the writer is updating the pointers, so this
3534          * guarantees that the overwrite recorded here is the one we
3535          * want to compare with the last_overrun.
3536          */
3537         smp_mb();
3538         overwrite = local_read(&(cpu_buffer->overrun));
3539
3540         /*
3541          * Here's the tricky part.
3542          *
3543          * We need to move the pointer past the header page.
3544          * But we can only do that if a writer is not currently
3545          * moving it. The page before the header page has the
3546          * flag bit '1' set if it is pointing to the page we want.
3547          * but if the writer is in the process of moving it
3548          * than it will be '2' or already moved '0'.
3549          */
3550
3551         ret = rb_head_page_replace(reader, cpu_buffer->reader_page);
3552
3553         /*
3554          * If we did not convert it, then we must try again.
3555          */
3556         if (!ret)
3557                 goto spin;
3558
3559         /*
3560          * Yeah! We succeeded in replacing the page.
3561          *
3562          * Now make the new head point back to the reader page.
3563          */
3564         rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
3565         rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
3566
3567         /* Finally update the reader page to the new head */
3568         cpu_buffer->reader_page = reader;
3569         rb_reset_reader_page(cpu_buffer);
3570
3571         if (overwrite != cpu_buffer->last_overrun) {
3572                 cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
3573                 cpu_buffer->last_overrun = overwrite;
3574         }
3575
3576         goto again;
3577
3578  out:
3579         arch_spin_unlock(&cpu_buffer->lock);
3580         local_irq_restore(flags);
3581
3582         return reader;
3583 }
3584
3585 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
3586 {
3587         struct ring_buffer_event *event;
3588         struct buffer_page *reader;
3589         unsigned length;
3590
3591         reader = rb_get_reader_page(cpu_buffer);
3592
3593         /* This function should not be called when buffer is empty */
3594         if (RB_WARN_ON(cpu_buffer, !reader))
3595                 return;
3596
3597         event = rb_reader_event(cpu_buffer);
3598
3599         if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
3600                 cpu_buffer->read++;
3601
3602         rb_update_read_stamp(cpu_buffer, event);
3603
3604         length = rb_event_length(event);
3605         cpu_buffer->reader_page->read += length;
3606 }
3607
3608 static void rb_advance_iter(struct ring_buffer_iter *iter)
3609 {
3610         struct ring_buffer_per_cpu *cpu_buffer;
3611         struct ring_buffer_event *event;
3612         unsigned length;
3613
3614         cpu_buffer = iter->cpu_buffer;
3615
3616         /*
3617          * Check if we are at the end of the buffer.
3618          */
3619         if (iter->head >= rb_page_size(iter->head_page)) {
3620                 /* discarded commits can make the page empty */
3621                 if (iter->head_page == cpu_buffer->commit_page)
3622                         return;
3623                 rb_inc_iter(iter);
3624                 return;
3625         }
3626
3627         event = rb_iter_head_event(iter);
3628
3629         length = rb_event_length(event);
3630
3631         /*
3632          * This should not be called to advance the header if we are
3633          * at the tail of the buffer.
3634          */
3635         if (RB_WARN_ON(cpu_buffer,
3636                        (iter->head_page == cpu_buffer->commit_page) &&
3637                        (iter->head + length > rb_commit_index(cpu_buffer))))
3638                 return;
3639
3640         rb_update_iter_read_stamp(iter, event);
3641
3642         iter->head += length;
3643
3644         /* check for end of page padding */
3645         if ((iter->head >= rb_page_size(iter->head_page)) &&
3646             (iter->head_page != cpu_buffer->commit_page))
3647                 rb_inc_iter(iter);
3648 }
3649
3650 static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer)
3651 {
3652         return cpu_buffer->lost_events;
3653 }
3654
3655 static struct ring_buffer_event *
3656 rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts,
3657                unsigned long *lost_events)
3658 {
3659         struct ring_buffer_event *event;
3660         struct buffer_page *reader;
3661         int nr_loops = 0;
3662
3663  again:
3664         /*
3665          * We repeat when a time extend is encountered.
3666          * Since the time extend is always attached to a data event,
3667          * we should never loop more than once.
3668          * (We never hit the following condition more than twice).
3669          */
3670         if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
3671                 return NULL;
3672
3673         reader = rb_get_reader_page(cpu_buffer);
3674         if (!reader)
3675                 return NULL;
3676
3677         event = rb_reader_event(cpu_buffer);
3678
3679         switch (event->type_len) {
3680         case RINGBUF_TYPE_PADDING:
3681                 if (rb_null_event(event))
3682                         RB_WARN_ON(cpu_buffer, 1);
3683                 /*
3684                  * Because the writer could be discarding every
3685                  * event it creates (which would probably be bad)
3686                  * if we were to go back to "again" then we may never
3687                  * catch up, and will trigger the warn on, or lock
3688                  * the box. Return the padding, and we will release
3689                  * the current locks, and try again.
3690                  */
3691                 return event;
3692
3693         case RINGBUF_TYPE_TIME_EXTEND:
3694                 /* Internal data, OK to advance */
3695                 rb_advance_reader(cpu_buffer);
3696                 goto again;
3697
3698         case RINGBUF_TYPE_TIME_STAMP:
3699                 /* FIXME: not implemented */
3700                 rb_advance_reader(cpu_buffer);
3701                 goto again;
3702
3703         case RINGBUF_TYPE_DATA:
3704                 if (ts) {
3705                         *ts = cpu_buffer->read_stamp + event->time_delta;
3706                         ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
3707                                                          cpu_buffer->cpu, ts);
3708                 }
3709                 if (lost_events)
3710                         *lost_events = rb_lost_events(cpu_buffer);
3711                 return event;
3712
3713         default:
3714                 BUG();
3715         }
3716
3717         return NULL;
3718 }
3719 EXPORT_SYMBOL_GPL(ring_buffer_peek);
3720
3721 static struct ring_buffer_event *
3722 rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3723 {
3724         struct ring_buffer *buffer;
3725         struct ring_buffer_per_cpu *cpu_buffer;
3726         struct ring_buffer_event *event;
3727         int nr_loops = 0;
3728
3729         cpu_buffer = iter->cpu_buffer;
3730         buffer = cpu_buffer->buffer;
3731
3732         /*
3733          * Check if someone performed a consuming read to
3734          * the buffer. A consuming read invalidates the iterator
3735          * and we need to reset the iterator in this case.
3736          */
3737         if (unlikely(iter->cache_read != cpu_buffer->read ||
3738                      iter->cache_reader_page != cpu_buffer->reader_page))
3739                 rb_iter_reset(iter);
3740
3741  again:
3742         if (ring_buffer_iter_empty(iter))
3743                 return NULL;
3744
3745         /*
3746          * We repeat when a time extend is encountered or we hit
3747          * the end of the page. Since the time extend is always attached
3748          * to a data event, we should never loop more than three times.
3749          * Once for going to next page, once on time extend, and
3750          * finally once to get the event.
3751          * (We never hit the following condition more than thrice).
3752          */
3753         if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3))
3754                 return NULL;
3755
3756         if (rb_per_cpu_empty(cpu_buffer))
3757                 return NULL;
3758
3759         if (iter->head >= rb_page_size(iter->head_page)) {
3760                 rb_inc_iter(iter);
3761                 goto again;
3762         }
3763
3764         event = rb_iter_head_event(iter);
3765
3766         switch (event->type_len) {
3767         case RINGBUF_TYPE_PADDING:
3768                 if (rb_null_event(event)) {
3769                         rb_inc_iter(iter);
3770                         goto again;
3771                 }
3772                 rb_advance_iter(iter);
3773                 return event;
3774
3775         case RINGBUF_TYPE_TIME_EXTEND:
3776                 /* Internal data, OK to advance */
3777                 rb_advance_iter(iter);
3778                 goto again;
3779
3780         case RINGBUF_TYPE_TIME_STAMP:
3781                 /* FIXME: not implemented */
3782                 rb_advance_iter(iter);
3783                 goto again;
3784
3785         case RINGBUF_TYPE_DATA:
3786                 if (ts) {
3787                         *ts = iter->read_stamp + event->time_delta;
3788                         ring_buffer_normalize_time_stamp(buffer,
3789                                                          cpu_buffer->cpu, ts);
3790                 }
3791                 return event;
3792
3793         default:
3794                 BUG();
3795         }
3796
3797         return NULL;
3798 }
3799 EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
3800
3801 static inline int rb_ok_to_lock(void)
3802 {
3803         /*
3804          * If an NMI die dumps out the content of the ring buffer
3805          * do not grab locks. We also permanently disable the ring
3806          * buffer too. A one time deal is all you get from reading
3807          * the ring buffer from an NMI.
3808          */
3809         if (likely(!in_nmi()))
3810                 return 1;
3811
3812         tracing_off_permanent();
3813         return 0;
3814 }
3815
3816 /**
3817  * ring_buffer_peek - peek at the next event to be read
3818  * @buffer: The ring buffer to read
3819  * @cpu: The cpu to peak at
3820  * @ts: The timestamp counter of this event.
3821  * @lost_events: a variable to store if events were lost (may be NULL)
3822  *
3823  * This will return the event that will be read next, but does
3824  * not consume the data.
3825  */
3826 struct ring_buffer_event *
3827 ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
3828                  unsigned long *lost_events)
3829 {
3830         struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
3831         struct ring_buffer_event *event;
3832         unsigned long flags;
3833         int dolock;
3834
3835         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3836                 return NULL;
3837
3838         dolock = rb_ok_to_lock();
3839  again:
3840         local_irq_save(flags);
3841         if (dolock)
3842                 raw_spin_lock(&cpu_buffer->reader_lock);
3843         event = rb_buffer_peek(cpu_buffer, ts, lost_events);
3844         if (event && event->type_len == RINGBUF_TYPE_PADDING)
3845                 rb_advance_reader(cpu_buffer);
3846         if (dolock)
3847                 raw_spin_unlock(&cpu_buffer->reader_lock);
3848         local_irq_restore(flags);
3849
3850         if (event && event->type_len == RINGBUF_TYPE_PADDING)
3851                 goto again;
3852
3853         return event;
3854 }
3855
3856 /**
3857  * ring_buffer_iter_peek - peek at the next event to be read
3858  * @iter: The ring buffer iterator
3859  * @ts: The timestamp counter of this event.
3860  *
3861  * This will return the event that will be read next, but does
3862  * not increment the iterator.
3863  */
3864 struct ring_buffer_event *
3865 ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3866 {
3867         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3868         struct ring_buffer_event *event;
3869         unsigned long flags;
3870
3871  again:
3872         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3873         event = rb_iter_peek(iter, ts);
3874         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3875
3876         if (event && event->type_len == RINGBUF_TYPE_PADDING)
3877                 goto again;
3878
3879         return event;
3880 }
3881
3882 /**
3883  * ring_buffer_consume - return an event and consume it
3884  * @buffer: The ring buffer to get the next event from
3885  * @cpu: the cpu to read the buffer from
3886  * @ts: a variable to store the timestamp (may be NULL)
3887  * @lost_events: a variable to store if events were lost (may be NULL)
3888  *
3889  * Returns the next event in the ring buffer, and that event is consumed.
3890  * Meaning, that sequential reads will keep returning a different event,
3891  * and eventually empty the ring buffer if the producer is slower.
3892  */
3893 struct ring_buffer_event *
3894 ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
3895                     unsigned long *lost_events)
3896 {
3897         struct ring_buffer_per_cpu *cpu_buffer;
3898         struct ring_buffer_event *event = NULL;
3899         unsigned long flags;
3900         int dolock;
3901
3902         dolock = rb_ok_to_lock();
3903
3904  again:
3905         /* might be called in atomic */
3906         preempt_disable();
3907
3908         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3909                 goto out;
3910
3911         cpu_buffer = buffer->buffers[cpu];
3912         local_irq_save(flags);
3913         if (dolock)
3914                 raw_spin_lock(&cpu_buffer->reader_lock);
3915
3916         event = rb_buffer_peek(cpu_buffer, ts, lost_events);
3917         if (event) {
3918                 cpu_buffer->lost_events = 0;
3919                 rb_advance_reader(cpu_buffer);
3920         }
3921
3922         if (dolock)
3923                 raw_spin_unlock(&cpu_buffer->reader_lock);
3924         local_irq_restore(flags);
3925
3926  out:
3927         preempt_enable();
3928
3929         if (event && event->type_len == RINGBUF_TYPE_PADDING)
3930                 goto again;
3931
3932         return event;
3933 }
3934 EXPORT_SYMBOL_GPL(ring_buffer_consume);
3935
3936 /**
3937  * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
3938  * @buffer: The ring buffer to read from
3939  * @cpu: The cpu buffer to iterate over
3940  *
3941  * This performs the initial preparations necessary to iterate
3942  * through the buffer.  Memory is allocated, buffer recording
3943  * is disabled, and the iterator pointer is returned to the caller.
3944  *
3945  * Disabling buffer recordng prevents the reading from being
3946  * corrupted. This is not a consuming read, so a producer is not
3947  * expected.
3948  *
3949  * After a sequence of ring_buffer_read_prepare calls, the user is
3950  * expected to make at least one call to ring_buffer_read_prepare_sync.
3951  * Afterwards, ring_buffer_read_start is invoked to get things going
3952  * for real.
3953  *
3954  * This overall must be paired with ring_buffer_read_finish.
3955  */
3956 struct ring_buffer_iter *
3957 ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu)
3958 {
3959         struct ring_buffer_per_cpu *cpu_buffer;
3960         struct ring_buffer_iter *iter;
3961
3962         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3963                 return NULL;
3964
3965         iter = kmalloc(sizeof(*iter), GFP_KERNEL);
3966         if (!iter)
3967                 return NULL;
3968
3969         cpu_buffer = buffer->buffers[cpu];
3970
3971         iter->cpu_buffer = cpu_buffer;
3972
3973         atomic_inc(&buffer->resize_disabled);
3974         atomic_inc(&cpu_buffer->record_disabled);
3975
3976         return iter;
3977 }
3978 EXPORT_SYMBOL_GPL(ring_buffer_read_prepare);
3979
3980 /**
3981  * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls
3982  *
3983  * All previously invoked ring_buffer_read_prepare calls to prepare
3984  * iterators will be synchronized.  Afterwards, read_buffer_read_start
3985  * calls on those iterators are allowed.
3986  */
3987 void
3988 ring_buffer_read_prepare_sync(void)
3989 {
3990         synchronize_sched();
3991 }
3992 EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync);
3993
3994 /**
3995  * ring_buffer_read_start - start a non consuming read of the buffer
3996  * @iter: The iterator returned by ring_buffer_read_prepare
3997  *
3998  * This finalizes the startup of an iteration through the buffer.
3999  * The iterator comes from a call to ring_buffer_read_prepare and
4000  * an intervening ring_buffer_read_prepare_sync must have been
4001  * performed.
4002  *
4003  * Must be paired with ring_buffer_read_finish.
4004  */
4005 void
4006 ring_buffer_read_start(struct ring_buffer_iter *iter)
4007 {
4008         struct ring_buffer_per_cpu *cpu_buffer;
4009         unsigned long flags;
4010
4011         if (!iter)
4012                 return;
4013
4014         cpu_buffer = iter->cpu_buffer;
4015
4016         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4017         arch_spin_lock(&cpu_buffer->lock);
4018         rb_iter_reset(iter);
4019         arch_spin_unlock(&cpu_buffer->lock);
4020         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4021 }
4022 EXPORT_SYMBOL_GPL(ring_buffer_read_start);
4023
4024 /**
4025  * ring_buffer_read_finish - finish reading the iterator of the buffer
4026  * @iter: The iterator retrieved by ring_buffer_start
4027  *
4028  * This re-enables the recording to the buffer, and frees the
4029  * iterator.
4030  */
4031 void
4032 ring_buffer_read_finish(struct ring_buffer_iter *iter)
4033 {
4034         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
4035         unsigned long flags;
4036
4037         /*
4038          * Ring buffer is disabled from recording, here's a good place
4039          * to check the integrity of the ring buffer.
4040          * Must prevent readers from trying to read, as the check
4041          * clears the HEAD page and readers require it.
4042          */
4043         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4044         rb_check_pages(cpu_buffer);
4045         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4046
4047         atomic_dec(&cpu_buffer->record_disabled);
4048         atomic_dec(&cpu_buffer->buffer->resize_disabled);
4049         kfree(iter);
4050 }
4051 EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
4052
4053 /**
4054  * ring_buffer_read - read the next item in the ring buffer by the iterator
4055  * @iter: The ring buffer iterator
4056  * @ts: The time stamp of the event read.
4057  *
4058  * This reads the next event in the ring buffer and increments the iterator.
4059  */
4060 struct ring_buffer_event *
4061 ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
4062 {
4063         struct ring_buffer_event *event;
4064         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
4065         unsigned long flags;
4066
4067         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4068  again:
4069         event = rb_iter_peek(iter, ts);
4070         if (!event)
4071                 goto out;
4072
4073         if (event->type_len == RINGBUF_TYPE_PADDING)
4074                 goto again;
4075
4076         rb_advance_iter(iter);
4077  out:
4078         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4079
4080         return event;
4081 }
4082 EXPORT_SYMBOL_GPL(ring_buffer_read);
4083
4084 /**
4085  * ring_buffer_size - return the size of the ring buffer (in bytes)
4086  * @buffer: The ring buffer.
4087  */
4088 unsigned long ring_buffer_size(struct ring_buffer *buffer, int cpu)
4089 {
4090         /*
4091          * Earlier, this method returned
4092          *      BUF_PAGE_SIZE * buffer->nr_pages
4093          * Since the nr_pages field is now removed, we have converted this to
4094          * return the per cpu buffer value.
4095          */
4096         if (!cpumask_test_cpu(cpu, buffer->cpumask))
4097                 return 0;
4098
4099         return BUF_PAGE_SIZE * buffer->buffers[cpu]->nr_pages;
4100 }
4101 EXPORT_SYMBOL_GPL(ring_buffer_size);
4102
4103 static void
4104 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
4105 {
4106         rb_head_page_deactivate(cpu_buffer);
4107
4108         cpu_buffer->head_page
4109                 = list_entry(cpu_buffer->pages, struct buffer_page, list);
4110         local_set(&cpu_buffer->head_page->write, 0);
4111         local_set(&cpu_buffer->head_page->entries, 0);
4112         local_set(&cpu_buffer->head_page->page->commit, 0);
4113
4114         cpu_buffer->head_page->read = 0;
4115
4116         cpu_buffer->tail_page = cpu_buffer->head_page;
4117         cpu_buffer->commit_page = cpu_buffer->head_page;
4118
4119         INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
4120         INIT_LIST_HEAD(&cpu_buffer->new_pages);
4121         local_set(&cpu_buffer->reader_page->write, 0);
4122         local_set(&cpu_buffer->reader_page->entries, 0);
4123         local_set(&cpu_buffer->reader_page->page->commit, 0);
4124         cpu_buffer->reader_page->read = 0;
4125
4126         local_set(&cpu_buffer->entries_bytes, 0);
4127         local_set(&cpu_buffer->overrun, 0);
4128         local_set(&cpu_buffer->commit_overrun, 0);
4129         local_set(&cpu_buffer->dropped_events, 0);
4130         local_set(&cpu_buffer->entries, 0);
4131         local_set(&cpu_buffer->committing, 0);
4132         local_set(&cpu_buffer->commits, 0);
4133         cpu_buffer->read = 0;
4134         cpu_buffer->read_bytes = 0;
4135
4136         cpu_buffer->write_stamp = 0;
4137         cpu_buffer->read_stamp = 0;
4138
4139         cpu_buffer->lost_events = 0;
4140         cpu_buffer->last_overrun = 0;
4141
4142         rb_head_page_activate(cpu_buffer);
4143 }
4144
4145 /**
4146  * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
4147  * @buffer: The ring buffer to reset a per cpu buffer of
4148  * @cpu: The CPU buffer to be reset
4149  */
4150 void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
4151 {
4152         struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
4153         unsigned long flags;
4154
4155         if (!cpumask_test_cpu(cpu, buffer->cpumask))
4156                 return;
4157
4158         atomic_inc(&buffer->resize_disabled);
4159         atomic_inc(&cpu_buffer->record_disabled);
4160
4161         /* Make sure all commits have finished */
4162         synchronize_sched();
4163
4164         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4165
4166         if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
4167                 goto out;
4168
4169         arch_spin_lock(&cpu_buffer->lock);
4170
4171         rb_reset_cpu(cpu_buffer);
4172
4173         arch_spin_unlock(&cpu_buffer->lock);
4174
4175  out:
4176         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4177
4178         atomic_dec(&cpu_buffer->record_disabled);
4179         atomic_dec(&buffer->resize_disabled);
4180 }
4181 EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
4182
4183 /**
4184  * ring_buffer_reset - reset a ring buffer
4185  * @buffer: The ring buffer to reset all cpu buffers
4186  */
4187 void ring_buffer_reset(struct ring_buffer *buffer)
4188 {
4189         int cpu;
4190
4191         for_each_buffer_cpu(buffer, cpu)
4192                 ring_buffer_reset_cpu(buffer, cpu);
4193 }
4194 EXPORT_SYMBOL_GPL(ring_buffer_reset);
4195
4196 /**
4197  * rind_buffer_empty - is the ring buffer empty?
4198  * @buffer: The ring buffer to test
4199  */
4200 int ring_buffer_empty(struct ring_buffer *buffer)
4201 {
4202         struct ring_buffer_per_cpu *cpu_buffer;
4203         unsigned long flags;
4204         int dolock;
4205         int cpu;
4206         int ret;
4207
4208         dolock = rb_ok_to_lock();
4209
4210         /* yes this is racy, but if you don't like the race, lock the buffer */
4211         for_each_buffer_cpu(buffer, cpu) {
4212                 cpu_buffer = buffer->buffers[cpu];
4213                 local_irq_save(flags);
4214                 if (dolock)
4215                         raw_spin_lock(&cpu_buffer->reader_lock);
4216                 ret = rb_per_cpu_empty(cpu_buffer);
4217                 if (dolock)
4218                         raw_spin_unlock(&cpu_buffer->reader_lock);
4219                 local_irq_restore(flags);
4220
4221                 if (!ret)
4222                         return 0;
4223         }
4224
4225         return 1;
4226 }
4227 EXPORT_SYMBOL_GPL(ring_buffer_empty);
4228
4229 /**
4230  * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
4231  * @buffer: The ring buffer
4232  * @cpu: The CPU buffer to test
4233  */
4234 int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
4235 {
4236         struct ring_buffer_per_cpu *cpu_buffer;
4237         unsigned long flags;
4238         int dolock;
4239         int ret;
4240
4241         if (!cpumask_test_cpu(cpu, buffer->cpumask))
4242                 return 1;
4243
4244         dolock = rb_ok_to_lock();
4245
4246         cpu_buffer = buffer->buffers[cpu];
4247         local_irq_save(flags);
4248         if (dolock)
4249                 raw_spin_lock(&cpu_buffer->reader_lock);
4250         ret = rb_per_cpu_empty(cpu_buffer);
4251         if (dolock)
4252                 raw_spin_unlock(&cpu_buffer->reader_lock);
4253         local_irq_restore(flags);
4254
4255         return ret;
4256 }
4257 EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
4258
4259 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4260 /**
4261  * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
4262  * @buffer_a: One buffer to swap with
4263  * @buffer_b: The other buffer to swap with
4264  *
4265  * This function is useful for tracers that want to take a "snapshot"
4266  * of a CPU buffer and has another back up buffer lying around.
4267  * it is expected that the tracer handles the cpu buffer not being
4268  * used at the moment.
4269  */
4270 int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
4271                          struct ring_buffer *buffer_b, int cpu)
4272 {
4273         struct ring_buffer_per_cpu *cpu_buffer_a;
4274         struct ring_buffer_per_cpu *cpu_buffer_b;
4275         int ret = -EINVAL;
4276
4277         if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
4278             !cpumask_test_cpu(cpu, buffer_b->cpumask))
4279                 goto out;
4280
4281         cpu_buffer_a = buffer_a->buffers[cpu];
4282         cpu_buffer_b = buffer_b->buffers[cpu];
4283
4284         /* At least make sure the two buffers are somewhat the same */
4285         if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages)
4286                 goto out;
4287
4288         ret = -EAGAIN;
4289
4290         if (ring_buffer_flags != RB_BUFFERS_ON)
4291                 goto out;
4292
4293         if (atomic_read(&buffer_a->record_disabled))
4294                 goto out;
4295
4296         if (atomic_read(&buffer_b->record_disabled))
4297                 goto out;
4298
4299         if (atomic_read(&cpu_buffer_a->record_disabled))
4300                 goto out;
4301
4302         if (atomic_read(&cpu_buffer_b->record_disabled))
4303                 goto out;
4304
4305         /*
4306          * We can't do a synchronize_sched here because this
4307          * function can be called in atomic context.
4308          * Normally this will be called from the same CPU as cpu.
4309          * If not it's up to the caller to protect this.
4310          */
4311         atomic_inc(&cpu_buffer_a->record_disabled);
4312         atomic_inc(&cpu_buffer_b->record_disabled);
4313
4314         ret = -EBUSY;
4315         if (local_read(&cpu_buffer_a->committing))
4316                 goto out_dec;
4317         if (local_read(&cpu_buffer_b->committing))
4318                 goto out_dec;
4319
4320         buffer_a->buffers[cpu] = cpu_buffer_b;
4321         buffer_b->buffers[cpu] = cpu_buffer_a;
4322
4323         cpu_buffer_b->buffer = buffer_a;
4324         cpu_buffer_a->buffer = buffer_b;
4325
4326         ret = 0;
4327
4328 out_dec:
4329         atomic_dec(&cpu_buffer_a->record_disabled);
4330         atomic_dec(&cpu_buffer_b->record_disabled);
4331 out:
4332         return ret;
4333 }
4334 EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
4335 #endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */
4336
4337 /**
4338  * ring_buffer_alloc_read_page - allocate a page to read from buffer
4339  * @buffer: the buffer to allocate for.
4340  * @cpu: the cpu buffer to allocate.
4341  *
4342  * This function is used in conjunction with ring_buffer_read_page.
4343  * When reading a full page from the ring buffer, these functions
4344  * can be used to speed up the process. The calling function should
4345  * allocate a few pages first with this function. Then when it
4346  * needs to get pages from the ring buffer, it passes the result
4347  * of this function into ring_buffer_read_page, which will swap
4348  * the page that was allocated, with the read page of the buffer.
4349  *
4350  * Returns:
4351  *  The page allocated, or NULL on error.
4352  */
4353 void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
4354 {
4355         struct buffer_data_page *bpage;
4356         struct page *page;
4357
4358         page = alloc_pages_node(cpu_to_node(cpu),
4359                                 GFP_KERNEL | __GFP_NORETRY, 0);
4360         if (!page)
4361                 return NULL;
4362
4363         bpage = page_address(page);
4364
4365         rb_init_page(bpage);
4366
4367         return bpage;
4368 }
4369 EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
4370
4371 /**
4372  * ring_buffer_free_read_page - free an allocated read page
4373  * @buffer: the buffer the page was allocate for
4374  * @data: the page to free
4375  *
4376  * Free a page allocated from ring_buffer_alloc_read_page.
4377  */
4378 void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
4379 {
4380         free_page((unsigned long)data);
4381 }
4382 EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
4383
4384 /**
4385  * ring_buffer_read_page - extract a page from the ring buffer
4386  * @buffer: buffer to extract from
4387  * @data_page: the page to use allocated from ring_buffer_alloc_read_page
4388  * @len: amount to extract
4389  * @cpu: the cpu of the buffer to extract
4390  * @full: should the extraction only happen when the page is full.
4391  *
4392  * This function will pull out a page from the ring buffer and consume it.
4393  * @data_page must be the address of the variable that was returned
4394  * from ring_buffer_alloc_read_page. This is because the page might be used
4395  * to swap with a page in the ring buffer.
4396  *
4397  * for example:
4398  *      rpage = ring_buffer_alloc_read_page(buffer, cpu);
4399  *      if (!rpage)
4400  *              return error;
4401  *      ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
4402  *      if (ret >= 0)
4403  *              process_page(rpage, ret);
4404  *
4405  * When @full is set, the function will not return true unless
4406  * the writer is off the reader page.
4407  *
4408  * Note: it is up to the calling functions to handle sleeps and wakeups.
4409  *  The ring buffer can be used anywhere in the kernel and can not
4410  *  blindly call wake_up. The layer that uses the ring buffer must be
4411  *  responsible for that.
4412  *
4413  * Returns:
4414  *  >=0 if data has been transferred, returns the offset of consumed data.
4415  *  <0 if no data has been transferred.
4416  */
4417 int ring_buffer_read_page(struct ring_buffer *buffer,
4418                           void **data_page, size_t len, int cpu, int full)
4419 {
4420         struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
4421         struct ring_buffer_event *event;
4422         struct buffer_data_page *bpage;
4423         struct buffer_page *reader;
4424         unsigned long missed_events;
4425         unsigned long flags;
4426         unsigned int commit;
4427         unsigned int read;
4428         u64 save_timestamp;
4429         int ret = -1;
4430
4431         if (!cpumask_test_cpu(cpu, buffer->cpumask))
4432                 goto out;
4433
4434         /*
4435          * If len is not big enough to hold the page header, then
4436          * we can not copy anything.
4437          */
4438         if (len <= BUF_PAGE_HDR_SIZE)
4439                 goto out;
4440
4441         len -= BUF_PAGE_HDR_SIZE;
4442
4443         if (!data_page)
4444                 goto out;
4445
4446         bpage = *data_page;
4447         if (!bpage)
4448                 goto out;
4449
4450         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4451
4452         reader = rb_get_reader_page(cpu_buffer);
4453         if (!reader)
4454                 goto out_unlock;
4455
4456         event = rb_reader_event(cpu_buffer);
4457
4458         read = reader->read;
4459         commit = rb_page_commit(reader);
4460
4461         /* Check if any events were dropped */
4462         missed_events = cpu_buffer->lost_events;
4463
4464         /*
4465          * If this page has been partially read or
4466          * if len is not big enough to read the rest of the page or
4467          * a writer is still on the page, then
4468          * we must copy the data from the page to the buffer.
4469          * Otherwise, we can simply swap the page with the one passed in.
4470          */
4471         if (read || (len < (commit - read)) ||
4472             cpu_buffer->reader_page == cpu_buffer->commit_page) {
4473                 struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
4474                 unsigned int rpos = read;
4475                 unsigned int pos = 0;
4476                 unsigned int size;
4477
4478                 if (full)
4479                         goto out_unlock;
4480
4481                 if (len > (commit - read))
4482                         len = (commit - read);
4483
4484                 /* Always keep the time extend and data together */
4485                 size = rb_event_ts_length(event);
4486
4487                 if (len < size)
4488                         goto out_unlock;
4489
4490                 /* save the current timestamp, since the user will need it */
4491                 save_timestamp = cpu_buffer->read_stamp;
4492
4493                 /* Need to copy one event at a time */
4494                 do {
4495                         /* We need the size of one event, because
4496                          * rb_advance_reader only advances by one event,
4497                          * whereas rb_event_ts_length may include the size of
4498                          * one or two events.
4499                          * We have already ensured there's enough space if this
4500                          * is a time extend. */
4501                         size = rb_event_length(event);
4502                         memcpy(bpage->data + pos, rpage->data + rpos, size);
4503
4504                         len -= size;
4505
4506                         rb_advance_reader(cpu_buffer);
4507                         rpos = reader->read;
4508                         pos += size;
4509
4510                         if (rpos >= commit)
4511                                 break;
4512
4513                         event = rb_reader_event(cpu_buffer);
4514                         /* Always keep the time extend and data together */
4515                         size = rb_event_ts_length(event);
4516                 } while (len >= size);
4517
4518                 /* update bpage */
4519                 local_set(&bpage->commit, pos);
4520                 bpage->time_stamp = save_timestamp;
4521
4522                 /* we copied everything to the beginning */
4523                 read = 0;
4524         } else {
4525                 /* update the entry counter */
4526                 cpu_buffer->read += rb_page_entries(reader);
4527                 cpu_buffer->read_bytes += BUF_PAGE_SIZE;
4528
4529                 /* swap the pages */
4530                 rb_init_page(bpage);
4531                 bpage = reader->page;
4532                 reader->page = *data_page;
4533                 local_set(&reader->write, 0);
4534                 local_set(&reader->entries, 0);
4535                 reader->read = 0;
4536                 *data_page = bpage;
4537
4538                 /*
4539                  * Use the real_end for the data size,
4540                  * This gives us a chance to store the lost events
4541                  * on the page.
4542                  */
4543                 if (reader->real_end)
4544                         local_set(&bpage->commit, reader->real_end);
4545         }
4546         ret = read;
4547
4548         cpu_buffer->lost_events = 0;
4549
4550         commit = local_read(&bpage->commit);
4551         /*
4552          * Set a flag in the commit field if we lost events
4553          */
4554         if (missed_events) {
4555                 /* If there is room at the end of the page to save the
4556                  * missed events, then record it there.
4557                  */
4558                 if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) {
4559                         memcpy(&bpage->data[commit], &missed_events,
4560                                sizeof(missed_events));
4561                         local_add(RB_MISSED_STORED, &bpage->commit);
4562                         commit += sizeof(missed_events);
4563                 }
4564                 local_add(RB_MISSED_EVENTS, &bpage->commit);
4565         }
4566
4567         /*
4568          * This page may be off to user land. Zero it out here.
4569          */
4570         if (commit < BUF_PAGE_SIZE)
4571                 memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit);
4572
4573  out_unlock:
4574         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4575
4576  out:
4577         return ret;
4578 }
4579 EXPORT_SYMBOL_GPL(ring_buffer_read_page);
4580
4581 #ifdef CONFIG_HOTPLUG_CPU
4582 static int rb_cpu_notify(struct notifier_block *self,
4583                          unsigned long action, void *hcpu)
4584 {
4585         struct ring_buffer *buffer =
4586                 container_of(self, struct ring_buffer, cpu_notify);
4587         long cpu = (long)hcpu;
4588         int cpu_i, nr_pages_same;
4589         unsigned int nr_pages;
4590
4591         switch (action) {
4592         case CPU_UP_PREPARE:
4593         case CPU_UP_PREPARE_FROZEN:
4594                 if (cpumask_test_cpu(cpu, buffer->cpumask))
4595                         return NOTIFY_OK;
4596
4597                 nr_pages = 0;
4598                 nr_pages_same = 1;
4599                 /* check if all cpu sizes are same */
4600                 for_each_buffer_cpu(buffer, cpu_i) {
4601                         /* fill in the size from first enabled cpu */
4602                         if (nr_pages == 0)
4603                                 nr_pages = buffer->buffers[cpu_i]->nr_pages;
4604                         if (nr_pages != buffer->buffers[cpu_i]->nr_pages) {
4605                                 nr_pages_same = 0;
4606                                 break;
4607                         }
4608                 }
4609                 /* allocate minimum pages, user can later expand it */
4610                 if (!nr_pages_same)
4611                         nr_pages = 2;
4612                 buffer->buffers[cpu] =
4613                         rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
4614                 if (!buffer->buffers[cpu]) {
4615                         WARN(1, "failed to allocate ring buffer on CPU %ld\n",
4616                              cpu);
4617                         return NOTIFY_OK;
4618                 }
4619                 smp_wmb();
4620                 cpumask_set_cpu(cpu, buffer->cpumask);
4621                 break;
4622         case CPU_DOWN_PREPARE:
4623         case CPU_DOWN_PREPARE_FROZEN:
4624                 /*
4625                  * Do nothing.
4626                  *  If we were to free the buffer, then the user would
4627                  *  lose any trace that was in the buffer.
4628                  */
4629                 break;
4630         default:
4631                 break;
4632         }
4633         return NOTIFY_OK;
4634 }
4635 #endif
4636
4637 #ifdef CONFIG_RING_BUFFER_STARTUP_TEST
4638 /*
4639  * This is a basic integrity check of the ring buffer.
4640  * Late in the boot cycle this test will run when configured in.
4641  * It will kick off a thread per CPU that will go into a loop
4642  * writing to the per cpu ring buffer various sizes of data.
4643  * Some of the data will be large items, some small.
4644  *
4645  * Another thread is created that goes into a spin, sending out
4646  * IPIs to the other CPUs to also write into the ring buffer.
4647  * this is to test the nesting ability of the buffer.
4648  *
4649  * Basic stats are recorded and reported. If something in the
4650  * ring buffer should happen that's not expected, a big warning
4651  * is displayed and all ring buffers are disabled.
4652  */
4653 static struct task_struct *rb_threads[NR_CPUS] __initdata;
4654
4655 struct rb_test_data {
4656         struct ring_buffer      *buffer;
4657         unsigned long           events;
4658         unsigned long           bytes_written;
4659         unsigned long           bytes_alloc;
4660         unsigned long           bytes_dropped;
4661         unsigned long           events_nested;
4662         unsigned long           bytes_written_nested;
4663         unsigned long           bytes_alloc_nested;
4664         unsigned long           bytes_dropped_nested;
4665         int                     min_size_nested;
4666         int                     max_size_nested;
4667         int                     max_size;
4668         int                     min_size;
4669         int                     cpu;
4670         int                     cnt;
4671 };
4672
4673 static struct rb_test_data rb_data[NR_CPUS] __initdata;
4674
4675 /* 1 meg per cpu */
4676 #define RB_TEST_BUFFER_SIZE     1048576
4677
4678 static char rb_string[] __initdata =
4679         "abcdefghijklmnopqrstuvwxyz1234567890!@#$%^&*()?+\\"
4680         "?+|:';\",.<>/?abcdefghijklmnopqrstuvwxyz1234567890"
4681         "!@#$%^&*()?+\\?+|:';\",.<>/?abcdefghijklmnopqrstuv";
4682
4683 static bool rb_test_started __initdata;
4684
4685 struct rb_item {
4686         int size;
4687         char str[];
4688 };
4689
4690 static __init int rb_write_something(struct rb_test_data *data, bool nested)
4691 {
4692         struct ring_buffer_event *event;
4693         struct rb_item *item;
4694         bool started;
4695         int event_len;
4696         int size;
4697         int len;
4698         int cnt;
4699
4700         /* Have nested writes different that what is written */
4701         cnt = data->cnt + (nested ? 27 : 0);
4702
4703         /* Multiply cnt by ~e, to make some unique increment */
4704         size = (data->cnt * 68 / 25) % (sizeof(rb_string) - 1);
4705
4706         len = size + sizeof(struct rb_item);
4707
4708         started = rb_test_started;
4709         /* read rb_test_started before checking buffer enabled */
4710         smp_rmb();
4711
4712         event = ring_buffer_lock_reserve(data->buffer, len);
4713         if (!event) {
4714                 /* Ignore dropped events before test starts. */
4715                 if (started) {
4716                         if (nested)
4717                                 data->bytes_dropped += len;
4718                         else
4719                                 data->bytes_dropped_nested += len;
4720                 }
4721                 return len;
4722         }
4723
4724         event_len = ring_buffer_event_length(event);
4725
4726         if (RB_WARN_ON(data->buffer, event_len < len))
4727                 goto out;
4728
4729         item = ring_buffer_event_data(event);
4730         item->size = size;
4731         memcpy(item->str, rb_string, size);
4732
4733         if (nested) {
4734                 data->bytes_alloc_nested += event_len;
4735                 data->bytes_written_nested += len;
4736                 data->events_nested++;
4737                 if (!data->min_size_nested || len < data->min_size_nested)
4738                         data->min_size_nested = len;
4739                 if (len > data->max_size_nested)
4740                         data->max_size_nested = len;
4741         } else {
4742                 data->bytes_alloc += event_len;
4743                 data->bytes_written += len;
4744                 data->events++;
4745                 if (!data->min_size || len < data->min_size)
4746                         data->max_size = len;
4747                 if (len > data->max_size)
4748                         data->max_size = len;
4749         }
4750
4751  out:
4752         ring_buffer_unlock_commit(data->buffer, event);
4753
4754         return 0;
4755 }
4756
4757 static __init int rb_test(void *arg)
4758 {
4759         struct rb_test_data *data = arg;
4760
4761         while (!kthread_should_stop()) {
4762                 rb_write_something(data, false);
4763                 data->cnt++;
4764
4765                 set_current_state(TASK_INTERRUPTIBLE);
4766                 /* Now sleep between a min of 100-300us and a max of 1ms */
4767                 usleep_range(((data->cnt % 3) + 1) * 100, 1000);
4768         }
4769
4770         return 0;
4771 }
4772
4773 static __init void rb_ipi(void *ignore)
4774 {
4775         struct rb_test_data *data;
4776         int cpu = smp_processor_id();
4777
4778         data = &rb_data[cpu];
4779         rb_write_something(data, true);
4780 }
4781
4782 static __init int rb_hammer_test(void *arg)
4783 {
4784         while (!kthread_should_stop()) {
4785
4786                 /* Send an IPI to all cpus to write data! */
4787                 smp_call_function(rb_ipi, NULL, 1);
4788                 /* No sleep, but for non preempt, let others run */
4789                 schedule();
4790         }
4791
4792         return 0;
4793 }
4794
4795 static __init int test_ringbuffer(void)
4796 {
4797         struct task_struct *rb_hammer;
4798         struct ring_buffer *buffer;
4799         int cpu;
4800         int ret = 0;
4801
4802         pr_info("Running ring buffer tests...\n");
4803
4804         buffer = ring_buffer_alloc(RB_TEST_BUFFER_SIZE, RB_FL_OVERWRITE);
4805         if (WARN_ON(!buffer))
4806                 return 0;
4807
4808         /* Disable buffer so that threads can't write to it yet */
4809         ring_buffer_record_off(buffer);
4810
4811         for_each_online_cpu(cpu) {
4812                 rb_data[cpu].buffer = buffer;
4813                 rb_data[cpu].cpu = cpu;
4814                 rb_data[cpu].cnt = cpu;
4815                 rb_threads[cpu] = kthread_create(rb_test, &rb_data[cpu],
4816                                                  "rbtester/%d", cpu);
4817                 if (WARN_ON(!rb_threads[cpu])) {
4818                         pr_cont("FAILED\n");
4819                         ret = -1;
4820                         goto out_free;
4821                 }
4822
4823                 kthread_bind(rb_threads[cpu], cpu);
4824                 wake_up_process(rb_threads[cpu]);
4825         }
4826
4827         /* Now create the rb hammer! */
4828         rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer");
4829         if (WARN_ON(!rb_hammer)) {
4830                 pr_cont("FAILED\n");
4831                 ret = -1;
4832                 goto out_free;
4833         }
4834
4835         ring_buffer_record_on(buffer);
4836         /*
4837          * Show buffer is enabled before setting rb_test_started.
4838          * Yes there's a small race window where events could be
4839          * dropped and the thread wont catch it. But when a ring
4840          * buffer gets enabled, there will always be some kind of
4841          * delay before other CPUs see it. Thus, we don't care about
4842          * those dropped events. We care about events dropped after
4843          * the threads see that the buffer is active.
4844          */
4845         smp_wmb();
4846         rb_test_started = true;
4847
4848         set_current_state(TASK_INTERRUPTIBLE);
4849         /* Just run for 10 seconds */;
4850         schedule_timeout(10 * HZ);
4851
4852         kthread_stop(rb_hammer);
4853
4854  out_free:
4855         for_each_online_cpu(cpu) {
4856                 if (!rb_threads[cpu])
4857                         break;
4858                 kthread_stop(rb_threads[cpu]);
4859         }
4860         if (ret) {
4861                 ring_buffer_free(buffer);
4862                 return ret;
4863         }
4864
4865         /* Report! */
4866         pr_info("finished\n");
4867         for_each_online_cpu(cpu) {
4868                 struct ring_buffer_event *event;
4869                 struct rb_test_data *data = &rb_data[cpu];
4870                 struct rb_item *item;
4871                 unsigned long total_events;
4872                 unsigned long total_dropped;
4873                 unsigned long total_written;
4874                 unsigned long total_alloc;
4875                 unsigned long total_read = 0;
4876                 unsigned long total_size = 0;
4877                 unsigned long total_len = 0;
4878                 unsigned long total_lost = 0;
4879                 unsigned long lost;
4880                 int big_event_size;
4881                 int small_event_size;
4882
4883                 ret = -1;
4884
4885                 total_events = data->events + data->events_nested;
4886                 total_written = data->bytes_written + data->bytes_written_nested;
4887                 total_alloc = data->bytes_alloc + data->bytes_alloc_nested;
4888                 total_dropped = data->bytes_dropped + data->bytes_dropped_nested;
4889
4890                 big_event_size = data->max_size + data->max_size_nested;
4891                 small_event_size = data->min_size + data->min_size_nested;
4892
4893                 pr_info("CPU %d:\n", cpu);
4894                 pr_info("              events:    %ld\n", total_events);
4895                 pr_info("       dropped bytes:    %ld\n", total_dropped);
4896                 pr_info("       alloced bytes:    %ld\n", total_alloc);
4897                 pr_info("       written bytes:    %ld\n", total_written);
4898                 pr_info("       biggest event:    %d\n", big_event_size);
4899                 pr_info("      smallest event:    %d\n", small_event_size);
4900
4901                 if (RB_WARN_ON(buffer, total_dropped))
4902                         break;
4903
4904                 ret = 0;
4905
4906                 while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) {
4907                         total_lost += lost;
4908                         item = ring_buffer_event_data(event);
4909                         total_len += ring_buffer_event_length(event);
4910                         total_size += item->size + sizeof(struct rb_item);
4911                         if (memcmp(&item->str[0], rb_string, item->size) != 0) {
4912                                 pr_info("FAILED!\n");
4913                                 pr_info("buffer had: %.*s\n", item->size, item->str);
4914                                 pr_info("expected:   %.*s\n", item->size, rb_string);
4915                                 RB_WARN_ON(buffer, 1);
4916                                 ret = -1;
4917                                 break;
4918                         }
4919                         total_read++;
4920                 }
4921                 if (ret)
4922                         break;
4923
4924                 ret = -1;
4925
4926                 pr_info("         read events:   %ld\n", total_read);
4927                 pr_info("         lost events:   %ld\n", total_lost);
4928                 pr_info("        total events:   %ld\n", total_lost + total_read);
4929                 pr_info("  recorded len bytes:   %ld\n", total_len);
4930                 pr_info(" recorded size bytes:   %ld\n", total_size);
4931                 if (total_lost)
4932                         pr_info(" With dropped events, record len and size may not match\n"
4933                                 " alloced and written from above\n");
4934                 if (!total_lost) {
4935                         if (RB_WARN_ON(buffer, total_len != total_alloc ||
4936                                        total_size != total_written))
4937                                 break;
4938                 }
4939                 if (RB_WARN_ON(buffer, total_lost + total_read != total_events))
4940                         break;
4941
4942                 ret = 0;
4943         }
4944         if (!ret)
4945                 pr_info("Ring buffer PASSED!\n");
4946
4947         ring_buffer_free(buffer);
4948         return 0;
4949 }
4950
4951 late_initcall(test_ringbuffer);
4952 #endif /* CONFIG_RING_BUFFER_STARTUP_TEST */