Merge tag 'trace-3.10' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux...
[cascardo/linux.git] / kernel / trace / ring_buffer.c
1 /*
2  * Generic ring buffer
3  *
4  * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5  */
6 #include <linux/ftrace_event.h>
7 #include <linux/ring_buffer.h>
8 #include <linux/trace_clock.h>
9 #include <linux/trace_seq.h>
10 #include <linux/spinlock.h>
11 #include <linux/irq_work.h>
12 #include <linux/debugfs.h>
13 #include <linux/uaccess.h>
14 #include <linux/hardirq.h>
15 #include <linux/kthread.h>      /* for self test */
16 #include <linux/kmemcheck.h>
17 #include <linux/module.h>
18 #include <linux/percpu.h>
19 #include <linux/mutex.h>
20 #include <linux/delay.h>
21 #include <linux/slab.h>
22 #include <linux/init.h>
23 #include <linux/hash.h>
24 #include <linux/list.h>
25 #include <linux/cpu.h>
26 #include <linux/fs.h>
27
28 #include <asm/local.h>
29
30 static void update_pages_handler(struct work_struct *work);
31
32 /*
33  * The ring buffer header is special. We must manually up keep it.
34  */
35 int ring_buffer_print_entry_header(struct trace_seq *s)
36 {
37         int ret;
38
39         ret = trace_seq_printf(s, "# compressed entry header\n");
40         ret = trace_seq_printf(s, "\ttype_len    :    5 bits\n");
41         ret = trace_seq_printf(s, "\ttime_delta  :   27 bits\n");
42         ret = trace_seq_printf(s, "\tarray       :   32 bits\n");
43         ret = trace_seq_printf(s, "\n");
44         ret = trace_seq_printf(s, "\tpadding     : type == %d\n",
45                                RINGBUF_TYPE_PADDING);
46         ret = trace_seq_printf(s, "\ttime_extend : type == %d\n",
47                                RINGBUF_TYPE_TIME_EXTEND);
48         ret = trace_seq_printf(s, "\tdata max type_len  == %d\n",
49                                RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
50
51         return ret;
52 }
53
54 /*
55  * The ring buffer is made up of a list of pages. A separate list of pages is
56  * allocated for each CPU. A writer may only write to a buffer that is
57  * associated with the CPU it is currently executing on.  A reader may read
58  * from any per cpu buffer.
59  *
60  * The reader is special. For each per cpu buffer, the reader has its own
61  * reader page. When a reader has read the entire reader page, this reader
62  * page is swapped with another page in the ring buffer.
63  *
64  * Now, as long as the writer is off the reader page, the reader can do what
65  * ever it wants with that page. The writer will never write to that page
66  * again (as long as it is out of the ring buffer).
67  *
68  * Here's some silly ASCII art.
69  *
70  *   +------+
71  *   |reader|          RING BUFFER
72  *   |page  |
73  *   +------+        +---+   +---+   +---+
74  *                   |   |-->|   |-->|   |
75  *                   +---+   +---+   +---+
76  *                     ^               |
77  *                     |               |
78  *                     +---------------+
79  *
80  *
81  *   +------+
82  *   |reader|          RING BUFFER
83  *   |page  |------------------v
84  *   +------+        +---+   +---+   +---+
85  *                   |   |-->|   |-->|   |
86  *                   +---+   +---+   +---+
87  *                     ^               |
88  *                     |               |
89  *                     +---------------+
90  *
91  *
92  *   +------+
93  *   |reader|          RING BUFFER
94  *   |page  |------------------v
95  *   +------+        +---+   +---+   +---+
96  *      ^            |   |-->|   |-->|   |
97  *      |            +---+   +---+   +---+
98  *      |                              |
99  *      |                              |
100  *      +------------------------------+
101  *
102  *
103  *   +------+
104  *   |buffer|          RING BUFFER
105  *   |page  |------------------v
106  *   +------+        +---+   +---+   +---+
107  *      ^            |   |   |   |-->|   |
108  *      |   New      +---+   +---+   +---+
109  *      |  Reader------^               |
110  *      |   page                       |
111  *      +------------------------------+
112  *
113  *
114  * After we make this swap, the reader can hand this page off to the splice
115  * code and be done with it. It can even allocate a new page if it needs to
116  * and swap that into the ring buffer.
117  *
118  * We will be using cmpxchg soon to make all this lockless.
119  *
120  */
121
122 /*
123  * A fast way to enable or disable all ring buffers is to
124  * call tracing_on or tracing_off. Turning off the ring buffers
125  * prevents all ring buffers from being recorded to.
126  * Turning this switch on, makes it OK to write to the
127  * ring buffer, if the ring buffer is enabled itself.
128  *
129  * There's three layers that must be on in order to write
130  * to the ring buffer.
131  *
132  * 1) This global flag must be set.
133  * 2) The ring buffer must be enabled for recording.
134  * 3) The per cpu buffer must be enabled for recording.
135  *
136  * In case of an anomaly, this global flag has a bit set that
137  * will permantly disable all ring buffers.
138  */
139
140 /*
141  * Global flag to disable all recording to ring buffers
142  *  This has two bits: ON, DISABLED
143  *
144  *  ON   DISABLED
145  * ---- ----------
146  *   0      0        : ring buffers are off
147  *   1      0        : ring buffers are on
148  *   X      1        : ring buffers are permanently disabled
149  */
150
151 enum {
152         RB_BUFFERS_ON_BIT       = 0,
153         RB_BUFFERS_DISABLED_BIT = 1,
154 };
155
156 enum {
157         RB_BUFFERS_ON           = 1 << RB_BUFFERS_ON_BIT,
158         RB_BUFFERS_DISABLED     = 1 << RB_BUFFERS_DISABLED_BIT,
159 };
160
161 static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
162
163 /* Used for individual buffers (after the counter) */
164 #define RB_BUFFER_OFF           (1 << 20)
165
166 #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data)
167
168 /**
169  * tracing_off_permanent - permanently disable ring buffers
170  *
171  * This function, once called, will disable all ring buffers
172  * permanently.
173  */
174 void tracing_off_permanent(void)
175 {
176         set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
177 }
178
179 #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array))
180 #define RB_ALIGNMENT            4U
181 #define RB_MAX_SMALL_DATA       (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
182 #define RB_EVNT_MIN_SIZE        8U      /* two 32bit words */
183
184 #ifndef CONFIG_HAVE_64BIT_ALIGNED_ACCESS
185 # define RB_FORCE_8BYTE_ALIGNMENT       0
186 # define RB_ARCH_ALIGNMENT              RB_ALIGNMENT
187 #else
188 # define RB_FORCE_8BYTE_ALIGNMENT       1
189 # define RB_ARCH_ALIGNMENT              8U
190 #endif
191
192 #define RB_ALIGN_DATA           __aligned(RB_ARCH_ALIGNMENT)
193
194 /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
195 #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
196
197 enum {
198         RB_LEN_TIME_EXTEND = 8,
199         RB_LEN_TIME_STAMP = 16,
200 };
201
202 #define skip_time_extend(event) \
203         ((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND))
204
205 static inline int rb_null_event(struct ring_buffer_event *event)
206 {
207         return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta;
208 }
209
210 static void rb_event_set_padding(struct ring_buffer_event *event)
211 {
212         /* padding has a NULL time_delta */
213         event->type_len = RINGBUF_TYPE_PADDING;
214         event->time_delta = 0;
215 }
216
217 static unsigned
218 rb_event_data_length(struct ring_buffer_event *event)
219 {
220         unsigned length;
221
222         if (event->type_len)
223                 length = event->type_len * RB_ALIGNMENT;
224         else
225                 length = event->array[0];
226         return length + RB_EVNT_HDR_SIZE;
227 }
228
229 /*
230  * Return the length of the given event. Will return
231  * the length of the time extend if the event is a
232  * time extend.
233  */
234 static inline unsigned
235 rb_event_length(struct ring_buffer_event *event)
236 {
237         switch (event->type_len) {
238         case RINGBUF_TYPE_PADDING:
239                 if (rb_null_event(event))
240                         /* undefined */
241                         return -1;
242                 return  event->array[0] + RB_EVNT_HDR_SIZE;
243
244         case RINGBUF_TYPE_TIME_EXTEND:
245                 return RB_LEN_TIME_EXTEND;
246
247         case RINGBUF_TYPE_TIME_STAMP:
248                 return RB_LEN_TIME_STAMP;
249
250         case RINGBUF_TYPE_DATA:
251                 return rb_event_data_length(event);
252         default:
253                 BUG();
254         }
255         /* not hit */
256         return 0;
257 }
258
259 /*
260  * Return total length of time extend and data,
261  *   or just the event length for all other events.
262  */
263 static inline unsigned
264 rb_event_ts_length(struct ring_buffer_event *event)
265 {
266         unsigned len = 0;
267
268         if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
269                 /* time extends include the data event after it */
270                 len = RB_LEN_TIME_EXTEND;
271                 event = skip_time_extend(event);
272         }
273         return len + rb_event_length(event);
274 }
275
276 /**
277  * ring_buffer_event_length - return the length of the event
278  * @event: the event to get the length of
279  *
280  * Returns the size of the data load of a data event.
281  * If the event is something other than a data event, it
282  * returns the size of the event itself. With the exception
283  * of a TIME EXTEND, where it still returns the size of the
284  * data load of the data event after it.
285  */
286 unsigned ring_buffer_event_length(struct ring_buffer_event *event)
287 {
288         unsigned length;
289
290         if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
291                 event = skip_time_extend(event);
292
293         length = rb_event_length(event);
294         if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
295                 return length;
296         length -= RB_EVNT_HDR_SIZE;
297         if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0]))
298                 length -= sizeof(event->array[0]);
299         return length;
300 }
301 EXPORT_SYMBOL_GPL(ring_buffer_event_length);
302
303 /* inline for ring buffer fast paths */
304 static void *
305 rb_event_data(struct ring_buffer_event *event)
306 {
307         if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
308                 event = skip_time_extend(event);
309         BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
310         /* If length is in len field, then array[0] has the data */
311         if (event->type_len)
312                 return (void *)&event->array[0];
313         /* Otherwise length is in array[0] and array[1] has the data */
314         return (void *)&event->array[1];
315 }
316
317 /**
318  * ring_buffer_event_data - return the data of the event
319  * @event: the event to get the data from
320  */
321 void *ring_buffer_event_data(struct ring_buffer_event *event)
322 {
323         return rb_event_data(event);
324 }
325 EXPORT_SYMBOL_GPL(ring_buffer_event_data);
326
327 #define for_each_buffer_cpu(buffer, cpu)                \
328         for_each_cpu(cpu, buffer->cpumask)
329
330 #define TS_SHIFT        27
331 #define TS_MASK         ((1ULL << TS_SHIFT) - 1)
332 #define TS_DELTA_TEST   (~TS_MASK)
333
334 /* Flag when events were overwritten */
335 #define RB_MISSED_EVENTS        (1 << 31)
336 /* Missed count stored at end */
337 #define RB_MISSED_STORED        (1 << 30)
338
339 struct buffer_data_page {
340         u64              time_stamp;    /* page time stamp */
341         local_t          commit;        /* write committed index */
342         unsigned char    data[] RB_ALIGN_DATA;  /* data of buffer page */
343 };
344
345 /*
346  * Note, the buffer_page list must be first. The buffer pages
347  * are allocated in cache lines, which means that each buffer
348  * page will be at the beginning of a cache line, and thus
349  * the least significant bits will be zero. We use this to
350  * add flags in the list struct pointers, to make the ring buffer
351  * lockless.
352  */
353 struct buffer_page {
354         struct list_head list;          /* list of buffer pages */
355         local_t          write;         /* index for next write */
356         unsigned         read;          /* index for next read */
357         local_t          entries;       /* entries on this page */
358         unsigned long    real_end;      /* real end of data */
359         struct buffer_data_page *page;  /* Actual data page */
360 };
361
362 /*
363  * The buffer page counters, write and entries, must be reset
364  * atomically when crossing page boundaries. To synchronize this
365  * update, two counters are inserted into the number. One is
366  * the actual counter for the write position or count on the page.
367  *
368  * The other is a counter of updaters. Before an update happens
369  * the update partition of the counter is incremented. This will
370  * allow the updater to update the counter atomically.
371  *
372  * The counter is 20 bits, and the state data is 12.
373  */
374 #define RB_WRITE_MASK           0xfffff
375 #define RB_WRITE_INTCNT         (1 << 20)
376
377 static void rb_init_page(struct buffer_data_page *bpage)
378 {
379         local_set(&bpage->commit, 0);
380 }
381
382 /**
383  * ring_buffer_page_len - the size of data on the page.
384  * @page: The page to read
385  *
386  * Returns the amount of data on the page, including buffer page header.
387  */
388 size_t ring_buffer_page_len(void *page)
389 {
390         return local_read(&((struct buffer_data_page *)page)->commit)
391                 + BUF_PAGE_HDR_SIZE;
392 }
393
394 /*
395  * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
396  * this issue out.
397  */
398 static void free_buffer_page(struct buffer_page *bpage)
399 {
400         free_page((unsigned long)bpage->page);
401         kfree(bpage);
402 }
403
404 /*
405  * We need to fit the time_stamp delta into 27 bits.
406  */
407 static inline int test_time_stamp(u64 delta)
408 {
409         if (delta & TS_DELTA_TEST)
410                 return 1;
411         return 0;
412 }
413
414 #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
415
416 /* Max payload is BUF_PAGE_SIZE - header (8bytes) */
417 #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
418
419 int ring_buffer_print_page_header(struct trace_seq *s)
420 {
421         struct buffer_data_page field;
422         int ret;
423
424         ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t"
425                                "offset:0;\tsize:%u;\tsigned:%u;\n",
426                                (unsigned int)sizeof(field.time_stamp),
427                                (unsigned int)is_signed_type(u64));
428
429         ret = trace_seq_printf(s, "\tfield: local_t commit;\t"
430                                "offset:%u;\tsize:%u;\tsigned:%u;\n",
431                                (unsigned int)offsetof(typeof(field), commit),
432                                (unsigned int)sizeof(field.commit),
433                                (unsigned int)is_signed_type(long));
434
435         ret = trace_seq_printf(s, "\tfield: int overwrite;\t"
436                                "offset:%u;\tsize:%u;\tsigned:%u;\n",
437                                (unsigned int)offsetof(typeof(field), commit),
438                                1,
439                                (unsigned int)is_signed_type(long));
440
441         ret = trace_seq_printf(s, "\tfield: char data;\t"
442                                "offset:%u;\tsize:%u;\tsigned:%u;\n",
443                                (unsigned int)offsetof(typeof(field), data),
444                                (unsigned int)BUF_PAGE_SIZE,
445                                (unsigned int)is_signed_type(char));
446
447         return ret;
448 }
449
450 struct rb_irq_work {
451         struct irq_work                 work;
452         wait_queue_head_t               waiters;
453         bool                            waiters_pending;
454 };
455
456 /*
457  * head_page == tail_page && head == tail then buffer is empty.
458  */
459 struct ring_buffer_per_cpu {
460         int                             cpu;
461         atomic_t                        record_disabled;
462         struct ring_buffer              *buffer;
463         raw_spinlock_t                  reader_lock;    /* serialize readers */
464         arch_spinlock_t                 lock;
465         struct lock_class_key           lock_key;
466         unsigned int                    nr_pages;
467         struct list_head                *pages;
468         struct buffer_page              *head_page;     /* read from head */
469         struct buffer_page              *tail_page;     /* write to tail */
470         struct buffer_page              *commit_page;   /* committed pages */
471         struct buffer_page              *reader_page;
472         unsigned long                   lost_events;
473         unsigned long                   last_overrun;
474         local_t                         entries_bytes;
475         local_t                         entries;
476         local_t                         overrun;
477         local_t                         commit_overrun;
478         local_t                         dropped_events;
479         local_t                         committing;
480         local_t                         commits;
481         unsigned long                   read;
482         unsigned long                   read_bytes;
483         u64                             write_stamp;
484         u64                             read_stamp;
485         /* ring buffer pages to update, > 0 to add, < 0 to remove */
486         int                             nr_pages_to_update;
487         struct list_head                new_pages; /* new pages to add */
488         struct work_struct              update_pages_work;
489         struct completion               update_done;
490
491         struct rb_irq_work              irq_work;
492 };
493
494 struct ring_buffer {
495         unsigned                        flags;
496         int                             cpus;
497         atomic_t                        record_disabled;
498         atomic_t                        resize_disabled;
499         cpumask_var_t                   cpumask;
500
501         struct lock_class_key           *reader_lock_key;
502
503         struct mutex                    mutex;
504
505         struct ring_buffer_per_cpu      **buffers;
506
507 #ifdef CONFIG_HOTPLUG_CPU
508         struct notifier_block           cpu_notify;
509 #endif
510         u64                             (*clock)(void);
511
512         struct rb_irq_work              irq_work;
513 };
514
515 struct ring_buffer_iter {
516         struct ring_buffer_per_cpu      *cpu_buffer;
517         unsigned long                   head;
518         struct buffer_page              *head_page;
519         struct buffer_page              *cache_reader_page;
520         unsigned long                   cache_read;
521         u64                             read_stamp;
522 };
523
524 /*
525  * rb_wake_up_waiters - wake up tasks waiting for ring buffer input
526  *
527  * Schedules a delayed work to wake up any task that is blocked on the
528  * ring buffer waiters queue.
529  */
530 static void rb_wake_up_waiters(struct irq_work *work)
531 {
532         struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work);
533
534         wake_up_all(&rbwork->waiters);
535 }
536
537 /**
538  * ring_buffer_wait - wait for input to the ring buffer
539  * @buffer: buffer to wait on
540  * @cpu: the cpu buffer to wait on
541  *
542  * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
543  * as data is added to any of the @buffer's cpu buffers. Otherwise
544  * it will wait for data to be added to a specific cpu buffer.
545  */
546 void ring_buffer_wait(struct ring_buffer *buffer, int cpu)
547 {
548         struct ring_buffer_per_cpu *cpu_buffer;
549         DEFINE_WAIT(wait);
550         struct rb_irq_work *work;
551
552         /*
553          * Depending on what the caller is waiting for, either any
554          * data in any cpu buffer, or a specific buffer, put the
555          * caller on the appropriate wait queue.
556          */
557         if (cpu == RING_BUFFER_ALL_CPUS)
558                 work = &buffer->irq_work;
559         else {
560                 cpu_buffer = buffer->buffers[cpu];
561                 work = &cpu_buffer->irq_work;
562         }
563
564
565         prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE);
566
567         /*
568          * The events can happen in critical sections where
569          * checking a work queue can cause deadlocks.
570          * After adding a task to the queue, this flag is set
571          * only to notify events to try to wake up the queue
572          * using irq_work.
573          *
574          * We don't clear it even if the buffer is no longer
575          * empty. The flag only causes the next event to run
576          * irq_work to do the work queue wake up. The worse
577          * that can happen if we race with !trace_empty() is that
578          * an event will cause an irq_work to try to wake up
579          * an empty queue.
580          *
581          * There's no reason to protect this flag either, as
582          * the work queue and irq_work logic will do the necessary
583          * synchronization for the wake ups. The only thing
584          * that is necessary is that the wake up happens after
585          * a task has been queued. It's OK for spurious wake ups.
586          */
587         work->waiters_pending = true;
588
589         if ((cpu == RING_BUFFER_ALL_CPUS && ring_buffer_empty(buffer)) ||
590             (cpu != RING_BUFFER_ALL_CPUS && ring_buffer_empty_cpu(buffer, cpu)))
591                 schedule();
592
593         finish_wait(&work->waiters, &wait);
594 }
595
596 /**
597  * ring_buffer_poll_wait - poll on buffer input
598  * @buffer: buffer to wait on
599  * @cpu: the cpu buffer to wait on
600  * @filp: the file descriptor
601  * @poll_table: The poll descriptor
602  *
603  * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
604  * as data is added to any of the @buffer's cpu buffers. Otherwise
605  * it will wait for data to be added to a specific cpu buffer.
606  *
607  * Returns POLLIN | POLLRDNORM if data exists in the buffers,
608  * zero otherwise.
609  */
610 int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
611                           struct file *filp, poll_table *poll_table)
612 {
613         struct ring_buffer_per_cpu *cpu_buffer;
614         struct rb_irq_work *work;
615
616         if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
617             (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
618                 return POLLIN | POLLRDNORM;
619
620         if (cpu == RING_BUFFER_ALL_CPUS)
621                 work = &buffer->irq_work;
622         else {
623                 cpu_buffer = buffer->buffers[cpu];
624                 work = &cpu_buffer->irq_work;
625         }
626
627         work->waiters_pending = true;
628         poll_wait(filp, &work->waiters, poll_table);
629
630         if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
631             (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
632                 return POLLIN | POLLRDNORM;
633         return 0;
634 }
635
636 /* buffer may be either ring_buffer or ring_buffer_per_cpu */
637 #define RB_WARN_ON(b, cond)                                             \
638         ({                                                              \
639                 int _____ret = unlikely(cond);                          \
640                 if (_____ret) {                                         \
641                         if (__same_type(*(b), struct ring_buffer_per_cpu)) { \
642                                 struct ring_buffer_per_cpu *__b =       \
643                                         (void *)b;                      \
644                                 atomic_inc(&__b->buffer->record_disabled); \
645                         } else                                          \
646                                 atomic_inc(&b->record_disabled);        \
647                         WARN_ON(1);                                     \
648                 }                                                       \
649                 _____ret;                                               \
650         })
651
652 /* Up this if you want to test the TIME_EXTENTS and normalization */
653 #define DEBUG_SHIFT 0
654
655 static inline u64 rb_time_stamp(struct ring_buffer *buffer)
656 {
657         /* shift to debug/test normalization and TIME_EXTENTS */
658         return buffer->clock() << DEBUG_SHIFT;
659 }
660
661 u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
662 {
663         u64 time;
664
665         preempt_disable_notrace();
666         time = rb_time_stamp(buffer);
667         preempt_enable_no_resched_notrace();
668
669         return time;
670 }
671 EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
672
673 void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
674                                       int cpu, u64 *ts)
675 {
676         /* Just stupid testing the normalize function and deltas */
677         *ts >>= DEBUG_SHIFT;
678 }
679 EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp);
680
681 /*
682  * Making the ring buffer lockless makes things tricky.
683  * Although writes only happen on the CPU that they are on,
684  * and they only need to worry about interrupts. Reads can
685  * happen on any CPU.
686  *
687  * The reader page is always off the ring buffer, but when the
688  * reader finishes with a page, it needs to swap its page with
689  * a new one from the buffer. The reader needs to take from
690  * the head (writes go to the tail). But if a writer is in overwrite
691  * mode and wraps, it must push the head page forward.
692  *
693  * Here lies the problem.
694  *
695  * The reader must be careful to replace only the head page, and
696  * not another one. As described at the top of the file in the
697  * ASCII art, the reader sets its old page to point to the next
698  * page after head. It then sets the page after head to point to
699  * the old reader page. But if the writer moves the head page
700  * during this operation, the reader could end up with the tail.
701  *
702  * We use cmpxchg to help prevent this race. We also do something
703  * special with the page before head. We set the LSB to 1.
704  *
705  * When the writer must push the page forward, it will clear the
706  * bit that points to the head page, move the head, and then set
707  * the bit that points to the new head page.
708  *
709  * We also don't want an interrupt coming in and moving the head
710  * page on another writer. Thus we use the second LSB to catch
711  * that too. Thus:
712  *
713  * head->list->prev->next        bit 1          bit 0
714  *                              -------        -------
715  * Normal page                     0              0
716  * Points to head page             0              1
717  * New head page                   1              0
718  *
719  * Note we can not trust the prev pointer of the head page, because:
720  *
721  * +----+       +-----+        +-----+
722  * |    |------>|  T  |---X--->|  N  |
723  * |    |<------|     |        |     |
724  * +----+       +-----+        +-----+
725  *   ^                           ^ |
726  *   |          +-----+          | |
727  *   +----------|  R  |----------+ |
728  *              |     |<-----------+
729  *              +-----+
730  *
731  * Key:  ---X-->  HEAD flag set in pointer
732  *         T      Tail page
733  *         R      Reader page
734  *         N      Next page
735  *
736  * (see __rb_reserve_next() to see where this happens)
737  *
738  *  What the above shows is that the reader just swapped out
739  *  the reader page with a page in the buffer, but before it
740  *  could make the new header point back to the new page added
741  *  it was preempted by a writer. The writer moved forward onto
742  *  the new page added by the reader and is about to move forward
743  *  again.
744  *
745  *  You can see, it is legitimate for the previous pointer of
746  *  the head (or any page) not to point back to itself. But only
747  *  temporarially.
748  */
749
750 #define RB_PAGE_NORMAL          0UL
751 #define RB_PAGE_HEAD            1UL
752 #define RB_PAGE_UPDATE          2UL
753
754
755 #define RB_FLAG_MASK            3UL
756
757 /* PAGE_MOVED is not part of the mask */
758 #define RB_PAGE_MOVED           4UL
759
760 /*
761  * rb_list_head - remove any bit
762  */
763 static struct list_head *rb_list_head(struct list_head *list)
764 {
765         unsigned long val = (unsigned long)list;
766
767         return (struct list_head *)(val & ~RB_FLAG_MASK);
768 }
769
770 /*
771  * rb_is_head_page - test if the given page is the head page
772  *
773  * Because the reader may move the head_page pointer, we can
774  * not trust what the head page is (it may be pointing to
775  * the reader page). But if the next page is a header page,
776  * its flags will be non zero.
777  */
778 static inline int
779 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
780                 struct buffer_page *page, struct list_head *list)
781 {
782         unsigned long val;
783
784         val = (unsigned long)list->next;
785
786         if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list)
787                 return RB_PAGE_MOVED;
788
789         return val & RB_FLAG_MASK;
790 }
791
792 /*
793  * rb_is_reader_page
794  *
795  * The unique thing about the reader page, is that, if the
796  * writer is ever on it, the previous pointer never points
797  * back to the reader page.
798  */
799 static int rb_is_reader_page(struct buffer_page *page)
800 {
801         struct list_head *list = page->list.prev;
802
803         return rb_list_head(list->next) != &page->list;
804 }
805
806 /*
807  * rb_set_list_to_head - set a list_head to be pointing to head.
808  */
809 static void rb_set_list_to_head(struct ring_buffer_per_cpu *cpu_buffer,
810                                 struct list_head *list)
811 {
812         unsigned long *ptr;
813
814         ptr = (unsigned long *)&list->next;
815         *ptr |= RB_PAGE_HEAD;
816         *ptr &= ~RB_PAGE_UPDATE;
817 }
818
819 /*
820  * rb_head_page_activate - sets up head page
821  */
822 static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer)
823 {
824         struct buffer_page *head;
825
826         head = cpu_buffer->head_page;
827         if (!head)
828                 return;
829
830         /*
831          * Set the previous list pointer to have the HEAD flag.
832          */
833         rb_set_list_to_head(cpu_buffer, head->list.prev);
834 }
835
836 static void rb_list_head_clear(struct list_head *list)
837 {
838         unsigned long *ptr = (unsigned long *)&list->next;
839
840         *ptr &= ~RB_FLAG_MASK;
841 }
842
843 /*
844  * rb_head_page_dactivate - clears head page ptr (for free list)
845  */
846 static void
847 rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer)
848 {
849         struct list_head *hd;
850
851         /* Go through the whole list and clear any pointers found. */
852         rb_list_head_clear(cpu_buffer->pages);
853
854         list_for_each(hd, cpu_buffer->pages)
855                 rb_list_head_clear(hd);
856 }
857
858 static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer,
859                             struct buffer_page *head,
860                             struct buffer_page *prev,
861                             int old_flag, int new_flag)
862 {
863         struct list_head *list;
864         unsigned long val = (unsigned long)&head->list;
865         unsigned long ret;
866
867         list = &prev->list;
868
869         val &= ~RB_FLAG_MASK;
870
871         ret = cmpxchg((unsigned long *)&list->next,
872                       val | old_flag, val | new_flag);
873
874         /* check if the reader took the page */
875         if ((ret & ~RB_FLAG_MASK) != val)
876                 return RB_PAGE_MOVED;
877
878         return ret & RB_FLAG_MASK;
879 }
880
881 static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer,
882                                    struct buffer_page *head,
883                                    struct buffer_page *prev,
884                                    int old_flag)
885 {
886         return rb_head_page_set(cpu_buffer, head, prev,
887                                 old_flag, RB_PAGE_UPDATE);
888 }
889
890 static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer,
891                                  struct buffer_page *head,
892                                  struct buffer_page *prev,
893                                  int old_flag)
894 {
895         return rb_head_page_set(cpu_buffer, head, prev,
896                                 old_flag, RB_PAGE_HEAD);
897 }
898
899 static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer,
900                                    struct buffer_page *head,
901                                    struct buffer_page *prev,
902                                    int old_flag)
903 {
904         return rb_head_page_set(cpu_buffer, head, prev,
905                                 old_flag, RB_PAGE_NORMAL);
906 }
907
908 static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
909                                struct buffer_page **bpage)
910 {
911         struct list_head *p = rb_list_head((*bpage)->list.next);
912
913         *bpage = list_entry(p, struct buffer_page, list);
914 }
915
916 static struct buffer_page *
917 rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer)
918 {
919         struct buffer_page *head;
920         struct buffer_page *page;
921         struct list_head *list;
922         int i;
923
924         if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page))
925                 return NULL;
926
927         /* sanity check */
928         list = cpu_buffer->pages;
929         if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list))
930                 return NULL;
931
932         page = head = cpu_buffer->head_page;
933         /*
934          * It is possible that the writer moves the header behind
935          * where we started, and we miss in one loop.
936          * A second loop should grab the header, but we'll do
937          * three loops just because I'm paranoid.
938          */
939         for (i = 0; i < 3; i++) {
940                 do {
941                         if (rb_is_head_page(cpu_buffer, page, page->list.prev)) {
942                                 cpu_buffer->head_page = page;
943                                 return page;
944                         }
945                         rb_inc_page(cpu_buffer, &page);
946                 } while (page != head);
947         }
948
949         RB_WARN_ON(cpu_buffer, 1);
950
951         return NULL;
952 }
953
954 static int rb_head_page_replace(struct buffer_page *old,
955                                 struct buffer_page *new)
956 {
957         unsigned long *ptr = (unsigned long *)&old->list.prev->next;
958         unsigned long val;
959         unsigned long ret;
960
961         val = *ptr & ~RB_FLAG_MASK;
962         val |= RB_PAGE_HEAD;
963
964         ret = cmpxchg(ptr, val, (unsigned long)&new->list);
965
966         return ret == val;
967 }
968
969 /*
970  * rb_tail_page_update - move the tail page forward
971  *
972  * Returns 1 if moved tail page, 0 if someone else did.
973  */
974 static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
975                                struct buffer_page *tail_page,
976                                struct buffer_page *next_page)
977 {
978         struct buffer_page *old_tail;
979         unsigned long old_entries;
980         unsigned long old_write;
981         int ret = 0;
982
983         /*
984          * The tail page now needs to be moved forward.
985          *
986          * We need to reset the tail page, but without messing
987          * with possible erasing of data brought in by interrupts
988          * that have moved the tail page and are currently on it.
989          *
990          * We add a counter to the write field to denote this.
991          */
992         old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
993         old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
994
995         /*
996          * Just make sure we have seen our old_write and synchronize
997          * with any interrupts that come in.
998          */
999         barrier();
1000
1001         /*
1002          * If the tail page is still the same as what we think
1003          * it is, then it is up to us to update the tail
1004          * pointer.
1005          */
1006         if (tail_page == cpu_buffer->tail_page) {
1007                 /* Zero the write counter */
1008                 unsigned long val = old_write & ~RB_WRITE_MASK;
1009                 unsigned long eval = old_entries & ~RB_WRITE_MASK;
1010
1011                 /*
1012                  * This will only succeed if an interrupt did
1013                  * not come in and change it. In which case, we
1014                  * do not want to modify it.
1015                  *
1016                  * We add (void) to let the compiler know that we do not care
1017                  * about the return value of these functions. We use the
1018                  * cmpxchg to only update if an interrupt did not already
1019                  * do it for us. If the cmpxchg fails, we don't care.
1020                  */
1021                 (void)local_cmpxchg(&next_page->write, old_write, val);
1022                 (void)local_cmpxchg(&next_page->entries, old_entries, eval);
1023
1024                 /*
1025                  * No need to worry about races with clearing out the commit.
1026                  * it only can increment when a commit takes place. But that
1027                  * only happens in the outer most nested commit.
1028                  */
1029                 local_set(&next_page->page->commit, 0);
1030
1031                 old_tail = cmpxchg(&cpu_buffer->tail_page,
1032                                    tail_page, next_page);
1033
1034                 if (old_tail == tail_page)
1035                         ret = 1;
1036         }
1037
1038         return ret;
1039 }
1040
1041 static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer,
1042                           struct buffer_page *bpage)
1043 {
1044         unsigned long val = (unsigned long)bpage;
1045
1046         if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK))
1047                 return 1;
1048
1049         return 0;
1050 }
1051
1052 /**
1053  * rb_check_list - make sure a pointer to a list has the last bits zero
1054  */
1055 static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer,
1056                          struct list_head *list)
1057 {
1058         if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev))
1059                 return 1;
1060         if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next))
1061                 return 1;
1062         return 0;
1063 }
1064
1065 /**
1066  * check_pages - integrity check of buffer pages
1067  * @cpu_buffer: CPU buffer with pages to test
1068  *
1069  * As a safety measure we check to make sure the data pages have not
1070  * been corrupted.
1071  */
1072 static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
1073 {
1074         struct list_head *head = cpu_buffer->pages;
1075         struct buffer_page *bpage, *tmp;
1076
1077         /* Reset the head page if it exists */
1078         if (cpu_buffer->head_page)
1079                 rb_set_head_page(cpu_buffer);
1080
1081         rb_head_page_deactivate(cpu_buffer);
1082
1083         if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
1084                 return -1;
1085         if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
1086                 return -1;
1087
1088         if (rb_check_list(cpu_buffer, head))
1089                 return -1;
1090
1091         list_for_each_entry_safe(bpage, tmp, head, list) {
1092                 if (RB_WARN_ON(cpu_buffer,
1093                                bpage->list.next->prev != &bpage->list))
1094                         return -1;
1095                 if (RB_WARN_ON(cpu_buffer,
1096                                bpage->list.prev->next != &bpage->list))
1097                         return -1;
1098                 if (rb_check_list(cpu_buffer, &bpage->list))
1099                         return -1;
1100         }
1101
1102         rb_head_page_activate(cpu_buffer);
1103
1104         return 0;
1105 }
1106
1107 static int __rb_allocate_pages(int nr_pages, struct list_head *pages, int cpu)
1108 {
1109         int i;
1110         struct buffer_page *bpage, *tmp;
1111
1112         for (i = 0; i < nr_pages; i++) {
1113                 struct page *page;
1114                 /*
1115                  * __GFP_NORETRY flag makes sure that the allocation fails
1116                  * gracefully without invoking oom-killer and the system is
1117                  * not destabilized.
1118                  */
1119                 bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1120                                     GFP_KERNEL | __GFP_NORETRY,
1121                                     cpu_to_node(cpu));
1122                 if (!bpage)
1123                         goto free_pages;
1124
1125                 list_add(&bpage->list, pages);
1126
1127                 page = alloc_pages_node(cpu_to_node(cpu),
1128                                         GFP_KERNEL | __GFP_NORETRY, 0);
1129                 if (!page)
1130                         goto free_pages;
1131                 bpage->page = page_address(page);
1132                 rb_init_page(bpage->page);
1133         }
1134
1135         return 0;
1136
1137 free_pages:
1138         list_for_each_entry_safe(bpage, tmp, pages, list) {
1139                 list_del_init(&bpage->list);
1140                 free_buffer_page(bpage);
1141         }
1142
1143         return -ENOMEM;
1144 }
1145
1146 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
1147                              unsigned nr_pages)
1148 {
1149         LIST_HEAD(pages);
1150
1151         WARN_ON(!nr_pages);
1152
1153         if (__rb_allocate_pages(nr_pages, &pages, cpu_buffer->cpu))
1154                 return -ENOMEM;
1155
1156         /*
1157          * The ring buffer page list is a circular list that does not
1158          * start and end with a list head. All page list items point to
1159          * other pages.
1160          */
1161         cpu_buffer->pages = pages.next;
1162         list_del(&pages);
1163
1164         cpu_buffer->nr_pages = nr_pages;
1165
1166         rb_check_pages(cpu_buffer);
1167
1168         return 0;
1169 }
1170
1171 static struct ring_buffer_per_cpu *
1172 rb_allocate_cpu_buffer(struct ring_buffer *buffer, int nr_pages, int cpu)
1173 {
1174         struct ring_buffer_per_cpu *cpu_buffer;
1175         struct buffer_page *bpage;
1176         struct page *page;
1177         int ret;
1178
1179         cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
1180                                   GFP_KERNEL, cpu_to_node(cpu));
1181         if (!cpu_buffer)
1182                 return NULL;
1183
1184         cpu_buffer->cpu = cpu;
1185         cpu_buffer->buffer = buffer;
1186         raw_spin_lock_init(&cpu_buffer->reader_lock);
1187         lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key);
1188         cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
1189         INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler);
1190         init_completion(&cpu_buffer->update_done);
1191         init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters);
1192         init_waitqueue_head(&cpu_buffer->irq_work.waiters);
1193
1194         bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()),
1195                             GFP_KERNEL, cpu_to_node(cpu));
1196         if (!bpage)
1197                 goto fail_free_buffer;
1198
1199         rb_check_bpage(cpu_buffer, bpage);
1200
1201         cpu_buffer->reader_page = bpage;
1202         page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0);
1203         if (!page)
1204                 goto fail_free_reader;
1205         bpage->page = page_address(page);
1206         rb_init_page(bpage->page);
1207
1208         INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
1209         INIT_LIST_HEAD(&cpu_buffer->new_pages);
1210
1211         ret = rb_allocate_pages(cpu_buffer, nr_pages);
1212         if (ret < 0)
1213                 goto fail_free_reader;
1214
1215         cpu_buffer->head_page
1216                 = list_entry(cpu_buffer->pages, struct buffer_page, list);
1217         cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
1218
1219         rb_head_page_activate(cpu_buffer);
1220
1221         return cpu_buffer;
1222
1223  fail_free_reader:
1224         free_buffer_page(cpu_buffer->reader_page);
1225
1226  fail_free_buffer:
1227         kfree(cpu_buffer);
1228         return NULL;
1229 }
1230
1231 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
1232 {
1233         struct list_head *head = cpu_buffer->pages;
1234         struct buffer_page *bpage, *tmp;
1235
1236         free_buffer_page(cpu_buffer->reader_page);
1237
1238         rb_head_page_deactivate(cpu_buffer);
1239
1240         if (head) {
1241                 list_for_each_entry_safe(bpage, tmp, head, list) {
1242                         list_del_init(&bpage->list);
1243                         free_buffer_page(bpage);
1244                 }
1245                 bpage = list_entry(head, struct buffer_page, list);
1246                 free_buffer_page(bpage);
1247         }
1248
1249         kfree(cpu_buffer);
1250 }
1251
1252 #ifdef CONFIG_HOTPLUG_CPU
1253 static int rb_cpu_notify(struct notifier_block *self,
1254                          unsigned long action, void *hcpu);
1255 #endif
1256
1257 /**
1258  * ring_buffer_alloc - allocate a new ring_buffer
1259  * @size: the size in bytes per cpu that is needed.
1260  * @flags: attributes to set for the ring buffer.
1261  *
1262  * Currently the only flag that is available is the RB_FL_OVERWRITE
1263  * flag. This flag means that the buffer will overwrite old data
1264  * when the buffer wraps. If this flag is not set, the buffer will
1265  * drop data when the tail hits the head.
1266  */
1267 struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
1268                                         struct lock_class_key *key)
1269 {
1270         struct ring_buffer *buffer;
1271         int bsize;
1272         int cpu, nr_pages;
1273
1274         /* keep it in its own cache line */
1275         buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
1276                          GFP_KERNEL);
1277         if (!buffer)
1278                 return NULL;
1279
1280         if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
1281                 goto fail_free_buffer;
1282
1283         nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1284         buffer->flags = flags;
1285         buffer->clock = trace_clock_local;
1286         buffer->reader_lock_key = key;
1287
1288         init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters);
1289         init_waitqueue_head(&buffer->irq_work.waiters);
1290
1291         /* need at least two pages */
1292         if (nr_pages < 2)
1293                 nr_pages = 2;
1294
1295         /*
1296          * In case of non-hotplug cpu, if the ring-buffer is allocated
1297          * in early initcall, it will not be notified of secondary cpus.
1298          * In that off case, we need to allocate for all possible cpus.
1299          */
1300 #ifdef CONFIG_HOTPLUG_CPU
1301         get_online_cpus();
1302         cpumask_copy(buffer->cpumask, cpu_online_mask);
1303 #else
1304         cpumask_copy(buffer->cpumask, cpu_possible_mask);
1305 #endif
1306         buffer->cpus = nr_cpu_ids;
1307
1308         bsize = sizeof(void *) * nr_cpu_ids;
1309         buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
1310                                   GFP_KERNEL);
1311         if (!buffer->buffers)
1312                 goto fail_free_cpumask;
1313
1314         for_each_buffer_cpu(buffer, cpu) {
1315                 buffer->buffers[cpu] =
1316                         rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
1317                 if (!buffer->buffers[cpu])
1318                         goto fail_free_buffers;
1319         }
1320
1321 #ifdef CONFIG_HOTPLUG_CPU
1322         buffer->cpu_notify.notifier_call = rb_cpu_notify;
1323         buffer->cpu_notify.priority = 0;
1324         register_cpu_notifier(&buffer->cpu_notify);
1325 #endif
1326
1327         put_online_cpus();
1328         mutex_init(&buffer->mutex);
1329
1330         return buffer;
1331
1332  fail_free_buffers:
1333         for_each_buffer_cpu(buffer, cpu) {
1334                 if (buffer->buffers[cpu])
1335                         rb_free_cpu_buffer(buffer->buffers[cpu]);
1336         }
1337         kfree(buffer->buffers);
1338
1339  fail_free_cpumask:
1340         free_cpumask_var(buffer->cpumask);
1341         put_online_cpus();
1342
1343  fail_free_buffer:
1344         kfree(buffer);
1345         return NULL;
1346 }
1347 EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
1348
1349 /**
1350  * ring_buffer_free - free a ring buffer.
1351  * @buffer: the buffer to free.
1352  */
1353 void
1354 ring_buffer_free(struct ring_buffer *buffer)
1355 {
1356         int cpu;
1357
1358         get_online_cpus();
1359
1360 #ifdef CONFIG_HOTPLUG_CPU
1361         unregister_cpu_notifier(&buffer->cpu_notify);
1362 #endif
1363
1364         for_each_buffer_cpu(buffer, cpu)
1365                 rb_free_cpu_buffer(buffer->buffers[cpu]);
1366
1367         put_online_cpus();
1368
1369         kfree(buffer->buffers);
1370         free_cpumask_var(buffer->cpumask);
1371
1372         kfree(buffer);
1373 }
1374 EXPORT_SYMBOL_GPL(ring_buffer_free);
1375
1376 void ring_buffer_set_clock(struct ring_buffer *buffer,
1377                            u64 (*clock)(void))
1378 {
1379         buffer->clock = clock;
1380 }
1381
1382 static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
1383
1384 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
1385 {
1386         return local_read(&bpage->entries) & RB_WRITE_MASK;
1387 }
1388
1389 static inline unsigned long rb_page_write(struct buffer_page *bpage)
1390 {
1391         return local_read(&bpage->write) & RB_WRITE_MASK;
1392 }
1393
1394 static int
1395 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
1396 {
1397         struct list_head *tail_page, *to_remove, *next_page;
1398         struct buffer_page *to_remove_page, *tmp_iter_page;
1399         struct buffer_page *last_page, *first_page;
1400         unsigned int nr_removed;
1401         unsigned long head_bit;
1402         int page_entries;
1403
1404         head_bit = 0;
1405
1406         raw_spin_lock_irq(&cpu_buffer->reader_lock);
1407         atomic_inc(&cpu_buffer->record_disabled);
1408         /*
1409          * We don't race with the readers since we have acquired the reader
1410          * lock. We also don't race with writers after disabling recording.
1411          * This makes it easy to figure out the first and the last page to be
1412          * removed from the list. We unlink all the pages in between including
1413          * the first and last pages. This is done in a busy loop so that we
1414          * lose the least number of traces.
1415          * The pages are freed after we restart recording and unlock readers.
1416          */
1417         tail_page = &cpu_buffer->tail_page->list;
1418
1419         /*
1420          * tail page might be on reader page, we remove the next page
1421          * from the ring buffer
1422          */
1423         if (cpu_buffer->tail_page == cpu_buffer->reader_page)
1424                 tail_page = rb_list_head(tail_page->next);
1425         to_remove = tail_page;
1426
1427         /* start of pages to remove */
1428         first_page = list_entry(rb_list_head(to_remove->next),
1429                                 struct buffer_page, list);
1430
1431         for (nr_removed = 0; nr_removed < nr_pages; nr_removed++) {
1432                 to_remove = rb_list_head(to_remove)->next;
1433                 head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD;
1434         }
1435
1436         next_page = rb_list_head(to_remove)->next;
1437
1438         /*
1439          * Now we remove all pages between tail_page and next_page.
1440          * Make sure that we have head_bit value preserved for the
1441          * next page
1442          */
1443         tail_page->next = (struct list_head *)((unsigned long)next_page |
1444                                                 head_bit);
1445         next_page = rb_list_head(next_page);
1446         next_page->prev = tail_page;
1447
1448         /* make sure pages points to a valid page in the ring buffer */
1449         cpu_buffer->pages = next_page;
1450
1451         /* update head page */
1452         if (head_bit)
1453                 cpu_buffer->head_page = list_entry(next_page,
1454                                                 struct buffer_page, list);
1455
1456         /*
1457          * change read pointer to make sure any read iterators reset
1458          * themselves
1459          */
1460         cpu_buffer->read = 0;
1461
1462         /* pages are removed, resume tracing and then free the pages */
1463         atomic_dec(&cpu_buffer->record_disabled);
1464         raw_spin_unlock_irq(&cpu_buffer->reader_lock);
1465
1466         RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages));
1467
1468         /* last buffer page to remove */
1469         last_page = list_entry(rb_list_head(to_remove), struct buffer_page,
1470                                 list);
1471         tmp_iter_page = first_page;
1472
1473         do {
1474                 to_remove_page = tmp_iter_page;
1475                 rb_inc_page(cpu_buffer, &tmp_iter_page);
1476
1477                 /* update the counters */
1478                 page_entries = rb_page_entries(to_remove_page);
1479                 if (page_entries) {
1480                         /*
1481                          * If something was added to this page, it was full
1482                          * since it is not the tail page. So we deduct the
1483                          * bytes consumed in ring buffer from here.
1484                          * Increment overrun to account for the lost events.
1485                          */
1486                         local_add(page_entries, &cpu_buffer->overrun);
1487                         local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
1488                 }
1489
1490                 /*
1491                  * We have already removed references to this list item, just
1492                  * free up the buffer_page and its page
1493                  */
1494                 free_buffer_page(to_remove_page);
1495                 nr_removed--;
1496
1497         } while (to_remove_page != last_page);
1498
1499         RB_WARN_ON(cpu_buffer, nr_removed);
1500
1501         return nr_removed == 0;
1502 }
1503
1504 static int
1505 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer)
1506 {
1507         struct list_head *pages = &cpu_buffer->new_pages;
1508         int retries, success;
1509
1510         raw_spin_lock_irq(&cpu_buffer->reader_lock);
1511         /*
1512          * We are holding the reader lock, so the reader page won't be swapped
1513          * in the ring buffer. Now we are racing with the writer trying to
1514          * move head page and the tail page.
1515          * We are going to adapt the reader page update process where:
1516          * 1. We first splice the start and end of list of new pages between
1517          *    the head page and its previous page.
1518          * 2. We cmpxchg the prev_page->next to point from head page to the
1519          *    start of new pages list.
1520          * 3. Finally, we update the head->prev to the end of new list.
1521          *
1522          * We will try this process 10 times, to make sure that we don't keep
1523          * spinning.
1524          */
1525         retries = 10;
1526         success = 0;
1527         while (retries--) {
1528                 struct list_head *head_page, *prev_page, *r;
1529                 struct list_head *last_page, *first_page;
1530                 struct list_head *head_page_with_bit;
1531
1532                 head_page = &rb_set_head_page(cpu_buffer)->list;
1533                 if (!head_page)
1534                         break;
1535                 prev_page = head_page->prev;
1536
1537                 first_page = pages->next;
1538                 last_page  = pages->prev;
1539
1540                 head_page_with_bit = (struct list_head *)
1541                                      ((unsigned long)head_page | RB_PAGE_HEAD);
1542
1543                 last_page->next = head_page_with_bit;
1544                 first_page->prev = prev_page;
1545
1546                 r = cmpxchg(&prev_page->next, head_page_with_bit, first_page);
1547
1548                 if (r == head_page_with_bit) {
1549                         /*
1550                          * yay, we replaced the page pointer to our new list,
1551                          * now, we just have to update to head page's prev
1552                          * pointer to point to end of list
1553                          */
1554                         head_page->prev = last_page;
1555                         success = 1;
1556                         break;
1557                 }
1558         }
1559
1560         if (success)
1561                 INIT_LIST_HEAD(pages);
1562         /*
1563          * If we weren't successful in adding in new pages, warn and stop
1564          * tracing
1565          */
1566         RB_WARN_ON(cpu_buffer, !success);
1567         raw_spin_unlock_irq(&cpu_buffer->reader_lock);
1568
1569         /* free pages if they weren't inserted */
1570         if (!success) {
1571                 struct buffer_page *bpage, *tmp;
1572                 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
1573                                          list) {
1574                         list_del_init(&bpage->list);
1575                         free_buffer_page(bpage);
1576                 }
1577         }
1578         return success;
1579 }
1580
1581 static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer)
1582 {
1583         int success;
1584
1585         if (cpu_buffer->nr_pages_to_update > 0)
1586                 success = rb_insert_pages(cpu_buffer);
1587         else
1588                 success = rb_remove_pages(cpu_buffer,
1589                                         -cpu_buffer->nr_pages_to_update);
1590
1591         if (success)
1592                 cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update;
1593 }
1594
1595 static void update_pages_handler(struct work_struct *work)
1596 {
1597         struct ring_buffer_per_cpu *cpu_buffer = container_of(work,
1598                         struct ring_buffer_per_cpu, update_pages_work);
1599         rb_update_pages(cpu_buffer);
1600         complete(&cpu_buffer->update_done);
1601 }
1602
1603 /**
1604  * ring_buffer_resize - resize the ring buffer
1605  * @buffer: the buffer to resize.
1606  * @size: the new size.
1607  *
1608  * Minimum size is 2 * BUF_PAGE_SIZE.
1609  *
1610  * Returns 0 on success and < 0 on failure.
1611  */
1612 int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
1613                         int cpu_id)
1614 {
1615         struct ring_buffer_per_cpu *cpu_buffer;
1616         unsigned nr_pages;
1617         int cpu, err = 0;
1618
1619         /*
1620          * Always succeed at resizing a non-existent buffer:
1621          */
1622         if (!buffer)
1623                 return size;
1624
1625         /* Make sure the requested buffer exists */
1626         if (cpu_id != RING_BUFFER_ALL_CPUS &&
1627             !cpumask_test_cpu(cpu_id, buffer->cpumask))
1628                 return size;
1629
1630         size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1631         size *= BUF_PAGE_SIZE;
1632
1633         /* we need a minimum of two pages */
1634         if (size < BUF_PAGE_SIZE * 2)
1635                 size = BUF_PAGE_SIZE * 2;
1636
1637         nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
1638
1639         /*
1640          * Don't succeed if resizing is disabled, as a reader might be
1641          * manipulating the ring buffer and is expecting a sane state while
1642          * this is true.
1643          */
1644         if (atomic_read(&buffer->resize_disabled))
1645                 return -EBUSY;
1646
1647         /* prevent another thread from changing buffer sizes */
1648         mutex_lock(&buffer->mutex);
1649
1650         if (cpu_id == RING_BUFFER_ALL_CPUS) {
1651                 /* calculate the pages to update */
1652                 for_each_buffer_cpu(buffer, cpu) {
1653                         cpu_buffer = buffer->buffers[cpu];
1654
1655                         cpu_buffer->nr_pages_to_update = nr_pages -
1656                                                         cpu_buffer->nr_pages;
1657                         /*
1658                          * nothing more to do for removing pages or no update
1659                          */
1660                         if (cpu_buffer->nr_pages_to_update <= 0)
1661                                 continue;
1662                         /*
1663                          * to add pages, make sure all new pages can be
1664                          * allocated without receiving ENOMEM
1665                          */
1666                         INIT_LIST_HEAD(&cpu_buffer->new_pages);
1667                         if (__rb_allocate_pages(cpu_buffer->nr_pages_to_update,
1668                                                 &cpu_buffer->new_pages, cpu)) {
1669                                 /* not enough memory for new pages */
1670                                 err = -ENOMEM;
1671                                 goto out_err;
1672                         }
1673                 }
1674
1675                 get_online_cpus();
1676                 /*
1677                  * Fire off all the required work handlers
1678                  * We can't schedule on offline CPUs, but it's not necessary
1679                  * since we can change their buffer sizes without any race.
1680                  */
1681                 for_each_buffer_cpu(buffer, cpu) {
1682                         cpu_buffer = buffer->buffers[cpu];
1683                         if (!cpu_buffer->nr_pages_to_update)
1684                                 continue;
1685
1686                         /* The update must run on the CPU that is being updated. */
1687                         preempt_disable();
1688                         if (cpu == smp_processor_id() || !cpu_online(cpu)) {
1689                                 rb_update_pages(cpu_buffer);
1690                                 cpu_buffer->nr_pages_to_update = 0;
1691                         } else {
1692                                 /*
1693                                  * Can not disable preemption for schedule_work_on()
1694                                  * on PREEMPT_RT.
1695                                  */
1696                                 preempt_enable();
1697                                 schedule_work_on(cpu,
1698                                                 &cpu_buffer->update_pages_work);
1699                                 preempt_disable();
1700                         }
1701                         preempt_enable();
1702                 }
1703
1704                 /* wait for all the updates to complete */
1705                 for_each_buffer_cpu(buffer, cpu) {
1706                         cpu_buffer = buffer->buffers[cpu];
1707                         if (!cpu_buffer->nr_pages_to_update)
1708                                 continue;
1709
1710                         if (cpu_online(cpu))
1711                                 wait_for_completion(&cpu_buffer->update_done);
1712                         cpu_buffer->nr_pages_to_update = 0;
1713                 }
1714
1715                 put_online_cpus();
1716         } else {
1717                 /* Make sure this CPU has been intitialized */
1718                 if (!cpumask_test_cpu(cpu_id, buffer->cpumask))
1719                         goto out;
1720
1721                 cpu_buffer = buffer->buffers[cpu_id];
1722
1723                 if (nr_pages == cpu_buffer->nr_pages)
1724                         goto out;
1725
1726                 cpu_buffer->nr_pages_to_update = nr_pages -
1727                                                 cpu_buffer->nr_pages;
1728
1729                 INIT_LIST_HEAD(&cpu_buffer->new_pages);
1730                 if (cpu_buffer->nr_pages_to_update > 0 &&
1731                         __rb_allocate_pages(cpu_buffer->nr_pages_to_update,
1732                                             &cpu_buffer->new_pages, cpu_id)) {
1733                         err = -ENOMEM;
1734                         goto out_err;
1735                 }
1736
1737                 get_online_cpus();
1738
1739                 preempt_disable();
1740                 /* The update must run on the CPU that is being updated. */
1741                 if (cpu_id == smp_processor_id() || !cpu_online(cpu_id))
1742                         rb_update_pages(cpu_buffer);
1743                 else {
1744                         /*
1745                          * Can not disable preemption for schedule_work_on()
1746                          * on PREEMPT_RT.
1747                          */
1748                         preempt_enable();
1749                         schedule_work_on(cpu_id,
1750                                          &cpu_buffer->update_pages_work);
1751                         wait_for_completion(&cpu_buffer->update_done);
1752                         preempt_disable();
1753                 }
1754                 preempt_enable();
1755
1756                 cpu_buffer->nr_pages_to_update = 0;
1757                 put_online_cpus();
1758         }
1759
1760  out:
1761         /*
1762          * The ring buffer resize can happen with the ring buffer
1763          * enabled, so that the update disturbs the tracing as little
1764          * as possible. But if the buffer is disabled, we do not need
1765          * to worry about that, and we can take the time to verify
1766          * that the buffer is not corrupt.
1767          */
1768         if (atomic_read(&buffer->record_disabled)) {
1769                 atomic_inc(&buffer->record_disabled);
1770                 /*
1771                  * Even though the buffer was disabled, we must make sure
1772                  * that it is truly disabled before calling rb_check_pages.
1773                  * There could have been a race between checking
1774                  * record_disable and incrementing it.
1775                  */
1776                 synchronize_sched();
1777                 for_each_buffer_cpu(buffer, cpu) {
1778                         cpu_buffer = buffer->buffers[cpu];
1779                         rb_check_pages(cpu_buffer);
1780                 }
1781                 atomic_dec(&buffer->record_disabled);
1782         }
1783
1784         mutex_unlock(&buffer->mutex);
1785         return size;
1786
1787  out_err:
1788         for_each_buffer_cpu(buffer, cpu) {
1789                 struct buffer_page *bpage, *tmp;
1790
1791                 cpu_buffer = buffer->buffers[cpu];
1792                 cpu_buffer->nr_pages_to_update = 0;
1793
1794                 if (list_empty(&cpu_buffer->new_pages))
1795                         continue;
1796
1797                 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages,
1798                                         list) {
1799                         list_del_init(&bpage->list);
1800                         free_buffer_page(bpage);
1801                 }
1802         }
1803         mutex_unlock(&buffer->mutex);
1804         return err;
1805 }
1806 EXPORT_SYMBOL_GPL(ring_buffer_resize);
1807
1808 void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val)
1809 {
1810         mutex_lock(&buffer->mutex);
1811         if (val)
1812                 buffer->flags |= RB_FL_OVERWRITE;
1813         else
1814                 buffer->flags &= ~RB_FL_OVERWRITE;
1815         mutex_unlock(&buffer->mutex);
1816 }
1817 EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite);
1818
1819 static inline void *
1820 __rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
1821 {
1822         return bpage->data + index;
1823 }
1824
1825 static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
1826 {
1827         return bpage->page->data + index;
1828 }
1829
1830 static inline struct ring_buffer_event *
1831 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
1832 {
1833         return __rb_page_index(cpu_buffer->reader_page,
1834                                cpu_buffer->reader_page->read);
1835 }
1836
1837 static inline struct ring_buffer_event *
1838 rb_iter_head_event(struct ring_buffer_iter *iter)
1839 {
1840         return __rb_page_index(iter->head_page, iter->head);
1841 }
1842
1843 static inline unsigned rb_page_commit(struct buffer_page *bpage)
1844 {
1845         return local_read(&bpage->page->commit);
1846 }
1847
1848 /* Size is determined by what has been committed */
1849 static inline unsigned rb_page_size(struct buffer_page *bpage)
1850 {
1851         return rb_page_commit(bpage);
1852 }
1853
1854 static inline unsigned
1855 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
1856 {
1857         return rb_page_commit(cpu_buffer->commit_page);
1858 }
1859
1860 static inline unsigned
1861 rb_event_index(struct ring_buffer_event *event)
1862 {
1863         unsigned long addr = (unsigned long)event;
1864
1865         return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE;
1866 }
1867
1868 static inline int
1869 rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
1870                    struct ring_buffer_event *event)
1871 {
1872         unsigned long addr = (unsigned long)event;
1873         unsigned long index;
1874
1875         index = rb_event_index(event);
1876         addr &= PAGE_MASK;
1877
1878         return cpu_buffer->commit_page->page == (void *)addr &&
1879                 rb_commit_index(cpu_buffer) == index;
1880 }
1881
1882 static void
1883 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
1884 {
1885         unsigned long max_count;
1886
1887         /*
1888          * We only race with interrupts and NMIs on this CPU.
1889          * If we own the commit event, then we can commit
1890          * all others that interrupted us, since the interruptions
1891          * are in stack format (they finish before they come
1892          * back to us). This allows us to do a simple loop to
1893          * assign the commit to the tail.
1894          */
1895  again:
1896         max_count = cpu_buffer->nr_pages * 100;
1897
1898         while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
1899                 if (RB_WARN_ON(cpu_buffer, !(--max_count)))
1900                         return;
1901                 if (RB_WARN_ON(cpu_buffer,
1902                                rb_is_reader_page(cpu_buffer->tail_page)))
1903                         return;
1904                 local_set(&cpu_buffer->commit_page->page->commit,
1905                           rb_page_write(cpu_buffer->commit_page));
1906                 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
1907                 cpu_buffer->write_stamp =
1908                         cpu_buffer->commit_page->page->time_stamp;
1909                 /* add barrier to keep gcc from optimizing too much */
1910                 barrier();
1911         }
1912         while (rb_commit_index(cpu_buffer) !=
1913                rb_page_write(cpu_buffer->commit_page)) {
1914
1915                 local_set(&cpu_buffer->commit_page->page->commit,
1916                           rb_page_write(cpu_buffer->commit_page));
1917                 RB_WARN_ON(cpu_buffer,
1918                            local_read(&cpu_buffer->commit_page->page->commit) &
1919                            ~RB_WRITE_MASK);
1920                 barrier();
1921         }
1922
1923         /* again, keep gcc from optimizing */
1924         barrier();
1925
1926         /*
1927          * If an interrupt came in just after the first while loop
1928          * and pushed the tail page forward, we will be left with
1929          * a dangling commit that will never go forward.
1930          */
1931         if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page))
1932                 goto again;
1933 }
1934
1935 static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1936 {
1937         cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp;
1938         cpu_buffer->reader_page->read = 0;
1939 }
1940
1941 static void rb_inc_iter(struct ring_buffer_iter *iter)
1942 {
1943         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1944
1945         /*
1946          * The iterator could be on the reader page (it starts there).
1947          * But the head could have moved, since the reader was
1948          * found. Check for this case and assign the iterator
1949          * to the head page instead of next.
1950          */
1951         if (iter->head_page == cpu_buffer->reader_page)
1952                 iter->head_page = rb_set_head_page(cpu_buffer);
1953         else
1954                 rb_inc_page(cpu_buffer, &iter->head_page);
1955
1956         iter->read_stamp = iter->head_page->page->time_stamp;
1957         iter->head = 0;
1958 }
1959
1960 /* Slow path, do not inline */
1961 static noinline struct ring_buffer_event *
1962 rb_add_time_stamp(struct ring_buffer_event *event, u64 delta)
1963 {
1964         event->type_len = RINGBUF_TYPE_TIME_EXTEND;
1965
1966         /* Not the first event on the page? */
1967         if (rb_event_index(event)) {
1968                 event->time_delta = delta & TS_MASK;
1969                 event->array[0] = delta >> TS_SHIFT;
1970         } else {
1971                 /* nope, just zero it */
1972                 event->time_delta = 0;
1973                 event->array[0] = 0;
1974         }
1975
1976         return skip_time_extend(event);
1977 }
1978
1979 /**
1980  * rb_update_event - update event type and data
1981  * @event: the even to update
1982  * @type: the type of event
1983  * @length: the size of the event field in the ring buffer
1984  *
1985  * Update the type and data fields of the event. The length
1986  * is the actual size that is written to the ring buffer,
1987  * and with this, we can determine what to place into the
1988  * data field.
1989  */
1990 static void
1991 rb_update_event(struct ring_buffer_per_cpu *cpu_buffer,
1992                 struct ring_buffer_event *event, unsigned length,
1993                 int add_timestamp, u64 delta)
1994 {
1995         /* Only a commit updates the timestamp */
1996         if (unlikely(!rb_event_is_commit(cpu_buffer, event)))
1997                 delta = 0;
1998
1999         /*
2000          * If we need to add a timestamp, then we
2001          * add it to the start of the resevered space.
2002          */
2003         if (unlikely(add_timestamp)) {
2004                 event = rb_add_time_stamp(event, delta);
2005                 length -= RB_LEN_TIME_EXTEND;
2006                 delta = 0;
2007         }
2008
2009         event->time_delta = delta;
2010         length -= RB_EVNT_HDR_SIZE;
2011         if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) {
2012                 event->type_len = 0;
2013                 event->array[0] = length;
2014         } else
2015                 event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
2016 }
2017
2018 /*
2019  * rb_handle_head_page - writer hit the head page
2020  *
2021  * Returns: +1 to retry page
2022  *           0 to continue
2023  *          -1 on error
2024  */
2025 static int
2026 rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
2027                     struct buffer_page *tail_page,
2028                     struct buffer_page *next_page)
2029 {
2030         struct buffer_page *new_head;
2031         int entries;
2032         int type;
2033         int ret;
2034
2035         entries = rb_page_entries(next_page);
2036
2037         /*
2038          * The hard part is here. We need to move the head
2039          * forward, and protect against both readers on
2040          * other CPUs and writers coming in via interrupts.
2041          */
2042         type = rb_head_page_set_update(cpu_buffer, next_page, tail_page,
2043                                        RB_PAGE_HEAD);
2044
2045         /*
2046          * type can be one of four:
2047          *  NORMAL - an interrupt already moved it for us
2048          *  HEAD   - we are the first to get here.
2049          *  UPDATE - we are the interrupt interrupting
2050          *           a current move.
2051          *  MOVED  - a reader on another CPU moved the next
2052          *           pointer to its reader page. Give up
2053          *           and try again.
2054          */
2055
2056         switch (type) {
2057         case RB_PAGE_HEAD:
2058                 /*
2059                  * We changed the head to UPDATE, thus
2060                  * it is our responsibility to update
2061                  * the counters.
2062                  */
2063                 local_add(entries, &cpu_buffer->overrun);
2064                 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
2065
2066                 /*
2067                  * The entries will be zeroed out when we move the
2068                  * tail page.
2069                  */
2070
2071                 /* still more to do */
2072                 break;
2073
2074         case RB_PAGE_UPDATE:
2075                 /*
2076                  * This is an interrupt that interrupt the
2077                  * previous update. Still more to do.
2078                  */
2079                 break;
2080         case RB_PAGE_NORMAL:
2081                 /*
2082                  * An interrupt came in before the update
2083                  * and processed this for us.
2084                  * Nothing left to do.
2085                  */
2086                 return 1;
2087         case RB_PAGE_MOVED:
2088                 /*
2089                  * The reader is on another CPU and just did
2090                  * a swap with our next_page.
2091                  * Try again.
2092                  */
2093                 return 1;
2094         default:
2095                 RB_WARN_ON(cpu_buffer, 1); /* WTF??? */
2096                 return -1;
2097         }
2098
2099         /*
2100          * Now that we are here, the old head pointer is
2101          * set to UPDATE. This will keep the reader from
2102          * swapping the head page with the reader page.
2103          * The reader (on another CPU) will spin till
2104          * we are finished.
2105          *
2106          * We just need to protect against interrupts
2107          * doing the job. We will set the next pointer
2108          * to HEAD. After that, we set the old pointer
2109          * to NORMAL, but only if it was HEAD before.
2110          * otherwise we are an interrupt, and only
2111          * want the outer most commit to reset it.
2112          */
2113         new_head = next_page;
2114         rb_inc_page(cpu_buffer, &new_head);
2115
2116         ret = rb_head_page_set_head(cpu_buffer, new_head, next_page,
2117                                     RB_PAGE_NORMAL);
2118
2119         /*
2120          * Valid returns are:
2121          *  HEAD   - an interrupt came in and already set it.
2122          *  NORMAL - One of two things:
2123          *            1) We really set it.
2124          *            2) A bunch of interrupts came in and moved
2125          *               the page forward again.
2126          */
2127         switch (ret) {
2128         case RB_PAGE_HEAD:
2129         case RB_PAGE_NORMAL:
2130                 /* OK */
2131                 break;
2132         default:
2133                 RB_WARN_ON(cpu_buffer, 1);
2134                 return -1;
2135         }
2136
2137         /*
2138          * It is possible that an interrupt came in,
2139          * set the head up, then more interrupts came in
2140          * and moved it again. When we get back here,
2141          * the page would have been set to NORMAL but we
2142          * just set it back to HEAD.
2143          *
2144          * How do you detect this? Well, if that happened
2145          * the tail page would have moved.
2146          */
2147         if (ret == RB_PAGE_NORMAL) {
2148                 /*
2149                  * If the tail had moved passed next, then we need
2150                  * to reset the pointer.
2151                  */
2152                 if (cpu_buffer->tail_page != tail_page &&
2153                     cpu_buffer->tail_page != next_page)
2154                         rb_head_page_set_normal(cpu_buffer, new_head,
2155                                                 next_page,
2156                                                 RB_PAGE_HEAD);
2157         }
2158
2159         /*
2160          * If this was the outer most commit (the one that
2161          * changed the original pointer from HEAD to UPDATE),
2162          * then it is up to us to reset it to NORMAL.
2163          */
2164         if (type == RB_PAGE_HEAD) {
2165                 ret = rb_head_page_set_normal(cpu_buffer, next_page,
2166                                               tail_page,
2167                                               RB_PAGE_UPDATE);
2168                 if (RB_WARN_ON(cpu_buffer,
2169                                ret != RB_PAGE_UPDATE))
2170                         return -1;
2171         }
2172
2173         return 0;
2174 }
2175
2176 static unsigned rb_calculate_event_length(unsigned length)
2177 {
2178         struct ring_buffer_event event; /* Used only for sizeof array */
2179
2180         /* zero length can cause confusions */
2181         if (!length)
2182                 length = 1;
2183
2184         if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
2185                 length += sizeof(event.array[0]);
2186
2187         length += RB_EVNT_HDR_SIZE;
2188         length = ALIGN(length, RB_ARCH_ALIGNMENT);
2189
2190         return length;
2191 }
2192
2193 static inline void
2194 rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
2195               struct buffer_page *tail_page,
2196               unsigned long tail, unsigned long length)
2197 {
2198         struct ring_buffer_event *event;
2199
2200         /*
2201          * Only the event that crossed the page boundary
2202          * must fill the old tail_page with padding.
2203          */
2204         if (tail >= BUF_PAGE_SIZE) {
2205                 /*
2206                  * If the page was filled, then we still need
2207                  * to update the real_end. Reset it to zero
2208                  * and the reader will ignore it.
2209                  */
2210                 if (tail == BUF_PAGE_SIZE)
2211                         tail_page->real_end = 0;
2212
2213                 local_sub(length, &tail_page->write);
2214                 return;
2215         }
2216
2217         event = __rb_page_index(tail_page, tail);
2218         kmemcheck_annotate_bitfield(event, bitfield);
2219
2220         /* account for padding bytes */
2221         local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes);
2222
2223         /*
2224          * Save the original length to the meta data.
2225          * This will be used by the reader to add lost event
2226          * counter.
2227          */
2228         tail_page->real_end = tail;
2229
2230         /*
2231          * If this event is bigger than the minimum size, then
2232          * we need to be careful that we don't subtract the
2233          * write counter enough to allow another writer to slip
2234          * in on this page.
2235          * We put in a discarded commit instead, to make sure
2236          * that this space is not used again.
2237          *
2238          * If we are less than the minimum size, we don't need to
2239          * worry about it.
2240          */
2241         if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) {
2242                 /* No room for any events */
2243
2244                 /* Mark the rest of the page with padding */
2245                 rb_event_set_padding(event);
2246
2247                 /* Set the write back to the previous setting */
2248                 local_sub(length, &tail_page->write);
2249                 return;
2250         }
2251
2252         /* Put in a discarded event */
2253         event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE;
2254         event->type_len = RINGBUF_TYPE_PADDING;
2255         /* time delta must be non zero */
2256         event->time_delta = 1;
2257
2258         /* Set write to end of buffer */
2259         length = (tail + length) - BUF_PAGE_SIZE;
2260         local_sub(length, &tail_page->write);
2261 }
2262
2263 /*
2264  * This is the slow path, force gcc not to inline it.
2265  */
2266 static noinline struct ring_buffer_event *
2267 rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
2268              unsigned long length, unsigned long tail,
2269              struct buffer_page *tail_page, u64 ts)
2270 {
2271         struct buffer_page *commit_page = cpu_buffer->commit_page;
2272         struct ring_buffer *buffer = cpu_buffer->buffer;
2273         struct buffer_page *next_page;
2274         int ret;
2275
2276         next_page = tail_page;
2277
2278         rb_inc_page(cpu_buffer, &next_page);
2279
2280         /*
2281          * If for some reason, we had an interrupt storm that made
2282          * it all the way around the buffer, bail, and warn
2283          * about it.
2284          */
2285         if (unlikely(next_page == commit_page)) {
2286                 local_inc(&cpu_buffer->commit_overrun);
2287                 goto out_reset;
2288         }
2289
2290         /*
2291          * This is where the fun begins!
2292          *
2293          * We are fighting against races between a reader that
2294          * could be on another CPU trying to swap its reader
2295          * page with the buffer head.
2296          *
2297          * We are also fighting against interrupts coming in and
2298          * moving the head or tail on us as well.
2299          *
2300          * If the next page is the head page then we have filled
2301          * the buffer, unless the commit page is still on the
2302          * reader page.
2303          */
2304         if (rb_is_head_page(cpu_buffer, next_page, &tail_page->list)) {
2305
2306                 /*
2307                  * If the commit is not on the reader page, then
2308                  * move the header page.
2309                  */
2310                 if (!rb_is_reader_page(cpu_buffer->commit_page)) {
2311                         /*
2312                          * If we are not in overwrite mode,
2313                          * this is easy, just stop here.
2314                          */
2315                         if (!(buffer->flags & RB_FL_OVERWRITE)) {
2316                                 local_inc(&cpu_buffer->dropped_events);
2317                                 goto out_reset;
2318                         }
2319
2320                         ret = rb_handle_head_page(cpu_buffer,
2321                                                   tail_page,
2322                                                   next_page);
2323                         if (ret < 0)
2324                                 goto out_reset;
2325                         if (ret)
2326                                 goto out_again;
2327                 } else {
2328                         /*
2329                          * We need to be careful here too. The
2330                          * commit page could still be on the reader
2331                          * page. We could have a small buffer, and
2332                          * have filled up the buffer with events
2333                          * from interrupts and such, and wrapped.
2334                          *
2335                          * Note, if the tail page is also the on the
2336                          * reader_page, we let it move out.
2337                          */
2338                         if (unlikely((cpu_buffer->commit_page !=
2339                                       cpu_buffer->tail_page) &&
2340                                      (cpu_buffer->commit_page ==
2341                                       cpu_buffer->reader_page))) {
2342                                 local_inc(&cpu_buffer->commit_overrun);
2343                                 goto out_reset;
2344                         }
2345                 }
2346         }
2347
2348         ret = rb_tail_page_update(cpu_buffer, tail_page, next_page);
2349         if (ret) {
2350                 /*
2351                  * Nested commits always have zero deltas, so
2352                  * just reread the time stamp
2353                  */
2354                 ts = rb_time_stamp(buffer);
2355                 next_page->page->time_stamp = ts;
2356         }
2357
2358  out_again:
2359
2360         rb_reset_tail(cpu_buffer, tail_page, tail, length);
2361
2362         /* fail and let the caller try again */
2363         return ERR_PTR(-EAGAIN);
2364
2365  out_reset:
2366         /* reset write */
2367         rb_reset_tail(cpu_buffer, tail_page, tail, length);
2368
2369         return NULL;
2370 }
2371
2372 static struct ring_buffer_event *
2373 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
2374                   unsigned long length, u64 ts,
2375                   u64 delta, int add_timestamp)
2376 {
2377         struct buffer_page *tail_page;
2378         struct ring_buffer_event *event;
2379         unsigned long tail, write;
2380
2381         /*
2382          * If the time delta since the last event is too big to
2383          * hold in the time field of the event, then we append a
2384          * TIME EXTEND event ahead of the data event.
2385          */
2386         if (unlikely(add_timestamp))
2387                 length += RB_LEN_TIME_EXTEND;
2388
2389         tail_page = cpu_buffer->tail_page;
2390         write = local_add_return(length, &tail_page->write);
2391
2392         /* set write to only the index of the write */
2393         write &= RB_WRITE_MASK;
2394         tail = write - length;
2395
2396         /* See if we shot pass the end of this buffer page */
2397         if (unlikely(write > BUF_PAGE_SIZE))
2398                 return rb_move_tail(cpu_buffer, length, tail,
2399                                     tail_page, ts);
2400
2401         /* We reserved something on the buffer */
2402
2403         event = __rb_page_index(tail_page, tail);
2404         kmemcheck_annotate_bitfield(event, bitfield);
2405         rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
2406
2407         local_inc(&tail_page->entries);
2408
2409         /*
2410          * If this is the first commit on the page, then update
2411          * its timestamp.
2412          */
2413         if (!tail)
2414                 tail_page->page->time_stamp = ts;
2415
2416         /* account for these added bytes */
2417         local_add(length, &cpu_buffer->entries_bytes);
2418
2419         return event;
2420 }
2421
2422 static inline int
2423 rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
2424                   struct ring_buffer_event *event)
2425 {
2426         unsigned long new_index, old_index;
2427         struct buffer_page *bpage;
2428         unsigned long index;
2429         unsigned long addr;
2430
2431         new_index = rb_event_index(event);
2432         old_index = new_index + rb_event_ts_length(event);
2433         addr = (unsigned long)event;
2434         addr &= PAGE_MASK;
2435
2436         bpage = cpu_buffer->tail_page;
2437
2438         if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
2439                 unsigned long write_mask =
2440                         local_read(&bpage->write) & ~RB_WRITE_MASK;
2441                 unsigned long event_length = rb_event_length(event);
2442                 /*
2443                  * This is on the tail page. It is possible that
2444                  * a write could come in and move the tail page
2445                  * and write to the next page. That is fine
2446                  * because we just shorten what is on this page.
2447                  */
2448                 old_index += write_mask;
2449                 new_index += write_mask;
2450                 index = local_cmpxchg(&bpage->write, old_index, new_index);
2451                 if (index == old_index) {
2452                         /* update counters */
2453                         local_sub(event_length, &cpu_buffer->entries_bytes);
2454                         return 1;
2455                 }
2456         }
2457
2458         /* could not discard */
2459         return 0;
2460 }
2461
2462 static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer)
2463 {
2464         local_inc(&cpu_buffer->committing);
2465         local_inc(&cpu_buffer->commits);
2466 }
2467
2468 static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer)
2469 {
2470         unsigned long commits;
2471
2472         if (RB_WARN_ON(cpu_buffer,
2473                        !local_read(&cpu_buffer->committing)))
2474                 return;
2475
2476  again:
2477         commits = local_read(&cpu_buffer->commits);
2478         /* synchronize with interrupts */
2479         barrier();
2480         if (local_read(&cpu_buffer->committing) == 1)
2481                 rb_set_commit_to_write(cpu_buffer);
2482
2483         local_dec(&cpu_buffer->committing);
2484
2485         /* synchronize with interrupts */
2486         barrier();
2487
2488         /*
2489          * Need to account for interrupts coming in between the
2490          * updating of the commit page and the clearing of the
2491          * committing counter.
2492          */
2493         if (unlikely(local_read(&cpu_buffer->commits) != commits) &&
2494             !local_read(&cpu_buffer->committing)) {
2495                 local_inc(&cpu_buffer->committing);
2496                 goto again;
2497         }
2498 }
2499
2500 static struct ring_buffer_event *
2501 rb_reserve_next_event(struct ring_buffer *buffer,
2502                       struct ring_buffer_per_cpu *cpu_buffer,
2503                       unsigned long length)
2504 {
2505         struct ring_buffer_event *event;
2506         u64 ts, delta;
2507         int nr_loops = 0;
2508         int add_timestamp;
2509         u64 diff;
2510
2511         rb_start_commit(cpu_buffer);
2512
2513 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
2514         /*
2515          * Due to the ability to swap a cpu buffer from a buffer
2516          * it is possible it was swapped before we committed.
2517          * (committing stops a swap). We check for it here and
2518          * if it happened, we have to fail the write.
2519          */
2520         barrier();
2521         if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) {
2522                 local_dec(&cpu_buffer->committing);
2523                 local_dec(&cpu_buffer->commits);
2524                 return NULL;
2525         }
2526 #endif
2527
2528         length = rb_calculate_event_length(length);
2529  again:
2530         add_timestamp = 0;
2531         delta = 0;
2532
2533         /*
2534          * We allow for interrupts to reenter here and do a trace.
2535          * If one does, it will cause this original code to loop
2536          * back here. Even with heavy interrupts happening, this
2537          * should only happen a few times in a row. If this happens
2538          * 1000 times in a row, there must be either an interrupt
2539          * storm or we have something buggy.
2540          * Bail!
2541          */
2542         if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
2543                 goto out_fail;
2544
2545         ts = rb_time_stamp(cpu_buffer->buffer);
2546         diff = ts - cpu_buffer->write_stamp;
2547
2548         /* make sure this diff is calculated here */
2549         barrier();
2550
2551         /* Did the write stamp get updated already? */
2552         if (likely(ts >= cpu_buffer->write_stamp)) {
2553                 delta = diff;
2554                 if (unlikely(test_time_stamp(delta))) {
2555                         int local_clock_stable = 1;
2556 #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
2557                         local_clock_stable = sched_clock_stable;
2558 #endif
2559                         WARN_ONCE(delta > (1ULL << 59),
2560                                   KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n%s",
2561                                   (unsigned long long)delta,
2562                                   (unsigned long long)ts,
2563                                   (unsigned long long)cpu_buffer->write_stamp,
2564                                   local_clock_stable ? "" :
2565                                   "If you just came from a suspend/resume,\n"
2566                                   "please switch to the trace global clock:\n"
2567                                   "  echo global > /sys/kernel/debug/tracing/trace_clock\n");
2568                         add_timestamp = 1;
2569                 }
2570         }
2571
2572         event = __rb_reserve_next(cpu_buffer, length, ts,
2573                                   delta, add_timestamp);
2574         if (unlikely(PTR_ERR(event) == -EAGAIN))
2575                 goto again;
2576
2577         if (!event)
2578                 goto out_fail;
2579
2580         return event;
2581
2582  out_fail:
2583         rb_end_commit(cpu_buffer);
2584         return NULL;
2585 }
2586
2587 #ifdef CONFIG_TRACING
2588
2589 /*
2590  * The lock and unlock are done within a preempt disable section.
2591  * The current_context per_cpu variable can only be modified
2592  * by the current task between lock and unlock. But it can
2593  * be modified more than once via an interrupt. To pass this
2594  * information from the lock to the unlock without having to
2595  * access the 'in_interrupt()' functions again (which do show
2596  * a bit of overhead in something as critical as function tracing,
2597  * we use a bitmask trick.
2598  *
2599  *  bit 0 =  NMI context
2600  *  bit 1 =  IRQ context
2601  *  bit 2 =  SoftIRQ context
2602  *  bit 3 =  normal context.
2603  *
2604  * This works because this is the order of contexts that can
2605  * preempt other contexts. A SoftIRQ never preempts an IRQ
2606  * context.
2607  *
2608  * When the context is determined, the corresponding bit is
2609  * checked and set (if it was set, then a recursion of that context
2610  * happened).
2611  *
2612  * On unlock, we need to clear this bit. To do so, just subtract
2613  * 1 from the current_context and AND it to itself.
2614  *
2615  * (binary)
2616  *  101 - 1 = 100
2617  *  101 & 100 = 100 (clearing bit zero)
2618  *
2619  *  1010 - 1 = 1001
2620  *  1010 & 1001 = 1000 (clearing bit 1)
2621  *
2622  * The least significant bit can be cleared this way, and it
2623  * just so happens that it is the same bit corresponding to
2624  * the current context.
2625  */
2626 static DEFINE_PER_CPU(unsigned int, current_context);
2627
2628 static __always_inline int trace_recursive_lock(void)
2629 {
2630         unsigned int val = this_cpu_read(current_context);
2631         int bit;
2632
2633         if (in_interrupt()) {
2634                 if (in_nmi())
2635                         bit = 0;
2636                 else if (in_irq())
2637                         bit = 1;
2638                 else
2639                         bit = 2;
2640         } else
2641                 bit = 3;
2642
2643         if (unlikely(val & (1 << bit)))
2644                 return 1;
2645
2646         val |= (1 << bit);
2647         this_cpu_write(current_context, val);
2648
2649         return 0;
2650 }
2651
2652 static __always_inline void trace_recursive_unlock(void)
2653 {
2654         unsigned int val = this_cpu_read(current_context);
2655
2656         val--;
2657         val &= this_cpu_read(current_context);
2658         this_cpu_write(current_context, val);
2659 }
2660
2661 #else
2662
2663 #define trace_recursive_lock()          (0)
2664 #define trace_recursive_unlock()        do { } while (0)
2665
2666 #endif
2667
2668 /**
2669  * ring_buffer_lock_reserve - reserve a part of the buffer
2670  * @buffer: the ring buffer to reserve from
2671  * @length: the length of the data to reserve (excluding event header)
2672  *
2673  * Returns a reseverd event on the ring buffer to copy directly to.
2674  * The user of this interface will need to get the body to write into
2675  * and can use the ring_buffer_event_data() interface.
2676  *
2677  * The length is the length of the data needed, not the event length
2678  * which also includes the event header.
2679  *
2680  * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
2681  * If NULL is returned, then nothing has been allocated or locked.
2682  */
2683 struct ring_buffer_event *
2684 ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length)
2685 {
2686         struct ring_buffer_per_cpu *cpu_buffer;
2687         struct ring_buffer_event *event;
2688         int cpu;
2689
2690         if (ring_buffer_flags != RB_BUFFERS_ON)
2691                 return NULL;
2692
2693         /* If we are tracing schedule, we don't want to recurse */
2694         preempt_disable_notrace();
2695
2696         if (atomic_read(&buffer->record_disabled))
2697                 goto out_nocheck;
2698
2699         if (trace_recursive_lock())
2700                 goto out_nocheck;
2701
2702         cpu = raw_smp_processor_id();
2703
2704         if (!cpumask_test_cpu(cpu, buffer->cpumask))
2705                 goto out;
2706
2707         cpu_buffer = buffer->buffers[cpu];
2708
2709         if (atomic_read(&cpu_buffer->record_disabled))
2710                 goto out;
2711
2712         if (length > BUF_MAX_DATA_SIZE)
2713                 goto out;
2714
2715         event = rb_reserve_next_event(buffer, cpu_buffer, length);
2716         if (!event)
2717                 goto out;
2718
2719         return event;
2720
2721  out:
2722         trace_recursive_unlock();
2723
2724  out_nocheck:
2725         preempt_enable_notrace();
2726         return NULL;
2727 }
2728 EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
2729
2730 static void
2731 rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer,
2732                       struct ring_buffer_event *event)
2733 {
2734         u64 delta;
2735
2736         /*
2737          * The event first in the commit queue updates the
2738          * time stamp.
2739          */
2740         if (rb_event_is_commit(cpu_buffer, event)) {
2741                 /*
2742                  * A commit event that is first on a page
2743                  * updates the write timestamp with the page stamp
2744                  */
2745                 if (!rb_event_index(event))
2746                         cpu_buffer->write_stamp =
2747                                 cpu_buffer->commit_page->page->time_stamp;
2748                 else if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) {
2749                         delta = event->array[0];
2750                         delta <<= TS_SHIFT;
2751                         delta += event->time_delta;
2752                         cpu_buffer->write_stamp += delta;
2753                 } else
2754                         cpu_buffer->write_stamp += event->time_delta;
2755         }
2756 }
2757
2758 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
2759                       struct ring_buffer_event *event)
2760 {
2761         local_inc(&cpu_buffer->entries);
2762         rb_update_write_stamp(cpu_buffer, event);
2763         rb_end_commit(cpu_buffer);
2764 }
2765
2766 static __always_inline void
2767 rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
2768 {
2769         if (buffer->irq_work.waiters_pending) {
2770                 buffer->irq_work.waiters_pending = false;
2771                 /* irq_work_queue() supplies it's own memory barriers */
2772                 irq_work_queue(&buffer->irq_work.work);
2773         }
2774
2775         if (cpu_buffer->irq_work.waiters_pending) {
2776                 cpu_buffer->irq_work.waiters_pending = false;
2777                 /* irq_work_queue() supplies it's own memory barriers */
2778                 irq_work_queue(&cpu_buffer->irq_work.work);
2779         }
2780 }
2781
2782 /**
2783  * ring_buffer_unlock_commit - commit a reserved
2784  * @buffer: The buffer to commit to
2785  * @event: The event pointer to commit.
2786  *
2787  * This commits the data to the ring buffer, and releases any locks held.
2788  *
2789  * Must be paired with ring_buffer_lock_reserve.
2790  */
2791 int ring_buffer_unlock_commit(struct ring_buffer *buffer,
2792                               struct ring_buffer_event *event)
2793 {
2794         struct ring_buffer_per_cpu *cpu_buffer;
2795         int cpu = raw_smp_processor_id();
2796
2797         cpu_buffer = buffer->buffers[cpu];
2798
2799         rb_commit(cpu_buffer, event);
2800
2801         rb_wakeups(buffer, cpu_buffer);
2802
2803         trace_recursive_unlock();
2804
2805         preempt_enable_notrace();
2806
2807         return 0;
2808 }
2809 EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit);
2810
2811 static inline void rb_event_discard(struct ring_buffer_event *event)
2812 {
2813         if (event->type_len == RINGBUF_TYPE_TIME_EXTEND)
2814                 event = skip_time_extend(event);
2815
2816         /* array[0] holds the actual length for the discarded event */
2817         event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE;
2818         event->type_len = RINGBUF_TYPE_PADDING;
2819         /* time delta must be non zero */
2820         if (!event->time_delta)
2821                 event->time_delta = 1;
2822 }
2823
2824 /*
2825  * Decrement the entries to the page that an event is on.
2826  * The event does not even need to exist, only the pointer
2827  * to the page it is on. This may only be called before the commit
2828  * takes place.
2829  */
2830 static inline void
2831 rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
2832                    struct ring_buffer_event *event)
2833 {
2834         unsigned long addr = (unsigned long)event;
2835         struct buffer_page *bpage = cpu_buffer->commit_page;
2836         struct buffer_page *start;
2837
2838         addr &= PAGE_MASK;
2839
2840         /* Do the likely case first */
2841         if (likely(bpage->page == (void *)addr)) {
2842                 local_dec(&bpage->entries);
2843                 return;
2844         }
2845
2846         /*
2847          * Because the commit page may be on the reader page we
2848          * start with the next page and check the end loop there.
2849          */
2850         rb_inc_page(cpu_buffer, &bpage);
2851         start = bpage;
2852         do {
2853                 if (bpage->page == (void *)addr) {
2854                         local_dec(&bpage->entries);
2855                         return;
2856                 }
2857                 rb_inc_page(cpu_buffer, &bpage);
2858         } while (bpage != start);
2859
2860         /* commit not part of this buffer?? */
2861         RB_WARN_ON(cpu_buffer, 1);
2862 }
2863
2864 /**
2865  * ring_buffer_commit_discard - discard an event that has not been committed
2866  * @buffer: the ring buffer
2867  * @event: non committed event to discard
2868  *
2869  * Sometimes an event that is in the ring buffer needs to be ignored.
2870  * This function lets the user discard an event in the ring buffer
2871  * and then that event will not be read later.
2872  *
2873  * This function only works if it is called before the the item has been
2874  * committed. It will try to free the event from the ring buffer
2875  * if another event has not been added behind it.
2876  *
2877  * If another event has been added behind it, it will set the event
2878  * up as discarded, and perform the commit.
2879  *
2880  * If this function is called, do not call ring_buffer_unlock_commit on
2881  * the event.
2882  */
2883 void ring_buffer_discard_commit(struct ring_buffer *buffer,
2884                                 struct ring_buffer_event *event)
2885 {
2886         struct ring_buffer_per_cpu *cpu_buffer;
2887         int cpu;
2888
2889         /* The event is discarded regardless */
2890         rb_event_discard(event);
2891
2892         cpu = smp_processor_id();
2893         cpu_buffer = buffer->buffers[cpu];
2894
2895         /*
2896          * This must only be called if the event has not been
2897          * committed yet. Thus we can assume that preemption
2898          * is still disabled.
2899          */
2900         RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
2901
2902         rb_decrement_entry(cpu_buffer, event);
2903         if (rb_try_to_discard(cpu_buffer, event))
2904                 goto out;
2905
2906         /*
2907          * The commit is still visible by the reader, so we
2908          * must still update the timestamp.
2909          */
2910         rb_update_write_stamp(cpu_buffer, event);
2911  out:
2912         rb_end_commit(cpu_buffer);
2913
2914         trace_recursive_unlock();
2915
2916         preempt_enable_notrace();
2917
2918 }
2919 EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
2920
2921 /**
2922  * ring_buffer_write - write data to the buffer without reserving
2923  * @buffer: The ring buffer to write to.
2924  * @length: The length of the data being written (excluding the event header)
2925  * @data: The data to write to the buffer.
2926  *
2927  * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
2928  * one function. If you already have the data to write to the buffer, it
2929  * may be easier to simply call this function.
2930  *
2931  * Note, like ring_buffer_lock_reserve, the length is the length of the data
2932  * and not the length of the event which would hold the header.
2933  */
2934 int ring_buffer_write(struct ring_buffer *buffer,
2935                       unsigned long length,
2936                       void *data)
2937 {
2938         struct ring_buffer_per_cpu *cpu_buffer;
2939         struct ring_buffer_event *event;
2940         void *body;
2941         int ret = -EBUSY;
2942         int cpu;
2943
2944         if (ring_buffer_flags != RB_BUFFERS_ON)
2945                 return -EBUSY;
2946
2947         preempt_disable_notrace();
2948
2949         if (atomic_read(&buffer->record_disabled))
2950                 goto out;
2951
2952         cpu = raw_smp_processor_id();
2953
2954         if (!cpumask_test_cpu(cpu, buffer->cpumask))
2955                 goto out;
2956
2957         cpu_buffer = buffer->buffers[cpu];
2958
2959         if (atomic_read(&cpu_buffer->record_disabled))
2960                 goto out;
2961
2962         if (length > BUF_MAX_DATA_SIZE)
2963                 goto out;
2964
2965         event = rb_reserve_next_event(buffer, cpu_buffer, length);
2966         if (!event)
2967                 goto out;
2968
2969         body = rb_event_data(event);
2970
2971         memcpy(body, data, length);
2972
2973         rb_commit(cpu_buffer, event);
2974
2975         rb_wakeups(buffer, cpu_buffer);
2976
2977         ret = 0;
2978  out:
2979         preempt_enable_notrace();
2980
2981         return ret;
2982 }
2983 EXPORT_SYMBOL_GPL(ring_buffer_write);
2984
2985 static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
2986 {
2987         struct buffer_page *reader = cpu_buffer->reader_page;
2988         struct buffer_page *head = rb_set_head_page(cpu_buffer);
2989         struct buffer_page *commit = cpu_buffer->commit_page;
2990
2991         /* In case of error, head will be NULL */
2992         if (unlikely(!head))
2993                 return 1;
2994
2995         return reader->read == rb_page_commit(reader) &&
2996                 (commit == reader ||
2997                  (commit == head &&
2998                   head->read == rb_page_commit(commit)));
2999 }
3000
3001 /**
3002  * ring_buffer_record_disable - stop all writes into the buffer
3003  * @buffer: The ring buffer to stop writes to.
3004  *
3005  * This prevents all writes to the buffer. Any attempt to write
3006  * to the buffer after this will fail and return NULL.
3007  *
3008  * The caller should call synchronize_sched() after this.
3009  */
3010 void ring_buffer_record_disable(struct ring_buffer *buffer)
3011 {
3012         atomic_inc(&buffer->record_disabled);
3013 }
3014 EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
3015
3016 /**
3017  * ring_buffer_record_enable - enable writes to the buffer
3018  * @buffer: The ring buffer to enable writes
3019  *
3020  * Note, multiple disables will need the same number of enables
3021  * to truly enable the writing (much like preempt_disable).
3022  */
3023 void ring_buffer_record_enable(struct ring_buffer *buffer)
3024 {
3025         atomic_dec(&buffer->record_disabled);
3026 }
3027 EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
3028
3029 /**
3030  * ring_buffer_record_off - stop all writes into the buffer
3031  * @buffer: The ring buffer to stop writes to.
3032  *
3033  * This prevents all writes to the buffer. Any attempt to write
3034  * to the buffer after this will fail and return NULL.
3035  *
3036  * This is different than ring_buffer_record_disable() as
3037  * it works like an on/off switch, where as the disable() version
3038  * must be paired with a enable().
3039  */
3040 void ring_buffer_record_off(struct ring_buffer *buffer)
3041 {
3042         unsigned int rd;
3043         unsigned int new_rd;
3044
3045         do {
3046                 rd = atomic_read(&buffer->record_disabled);
3047                 new_rd = rd | RB_BUFFER_OFF;
3048         } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
3049 }
3050 EXPORT_SYMBOL_GPL(ring_buffer_record_off);
3051
3052 /**
3053  * ring_buffer_record_on - restart writes into the buffer
3054  * @buffer: The ring buffer to start writes to.
3055  *
3056  * This enables all writes to the buffer that was disabled by
3057  * ring_buffer_record_off().
3058  *
3059  * This is different than ring_buffer_record_enable() as
3060  * it works like an on/off switch, where as the enable() version
3061  * must be paired with a disable().
3062  */
3063 void ring_buffer_record_on(struct ring_buffer *buffer)
3064 {
3065         unsigned int rd;
3066         unsigned int new_rd;
3067
3068         do {
3069                 rd = atomic_read(&buffer->record_disabled);
3070                 new_rd = rd & ~RB_BUFFER_OFF;
3071         } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd);
3072 }
3073 EXPORT_SYMBOL_GPL(ring_buffer_record_on);
3074
3075 /**
3076  * ring_buffer_record_is_on - return true if the ring buffer can write
3077  * @buffer: The ring buffer to see if write is enabled
3078  *
3079  * Returns true if the ring buffer is in a state that it accepts writes.
3080  */
3081 int ring_buffer_record_is_on(struct ring_buffer *buffer)
3082 {
3083         return !atomic_read(&buffer->record_disabled);
3084 }
3085
3086 /**
3087  * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
3088  * @buffer: The ring buffer to stop writes to.
3089  * @cpu: The CPU buffer to stop
3090  *
3091  * This prevents all writes to the buffer. Any attempt to write
3092  * to the buffer after this will fail and return NULL.
3093  *
3094  * The caller should call synchronize_sched() after this.
3095  */
3096 void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
3097 {
3098         struct ring_buffer_per_cpu *cpu_buffer;
3099
3100         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3101                 return;
3102
3103         cpu_buffer = buffer->buffers[cpu];
3104         atomic_inc(&cpu_buffer->record_disabled);
3105 }
3106 EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
3107
3108 /**
3109  * ring_buffer_record_enable_cpu - enable writes to the buffer
3110  * @buffer: The ring buffer to enable writes
3111  * @cpu: The CPU to enable.
3112  *
3113  * Note, multiple disables will need the same number of enables
3114  * to truly enable the writing (much like preempt_disable).
3115  */
3116 void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
3117 {
3118         struct ring_buffer_per_cpu *cpu_buffer;
3119
3120         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3121                 return;
3122
3123         cpu_buffer = buffer->buffers[cpu];
3124         atomic_dec(&cpu_buffer->record_disabled);
3125 }
3126 EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu);
3127
3128 /*
3129  * The total entries in the ring buffer is the running counter
3130  * of entries entered into the ring buffer, minus the sum of
3131  * the entries read from the ring buffer and the number of
3132  * entries that were overwritten.
3133  */
3134 static inline unsigned long
3135 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
3136 {
3137         return local_read(&cpu_buffer->entries) -
3138                 (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
3139 }
3140
3141 /**
3142  * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer
3143  * @buffer: The ring buffer
3144  * @cpu: The per CPU buffer to read from.
3145  */
3146 u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu)
3147 {
3148         unsigned long flags;
3149         struct ring_buffer_per_cpu *cpu_buffer;
3150         struct buffer_page *bpage;
3151         u64 ret = 0;
3152
3153         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3154                 return 0;
3155
3156         cpu_buffer = buffer->buffers[cpu];
3157         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3158         /*
3159          * if the tail is on reader_page, oldest time stamp is on the reader
3160          * page
3161          */
3162         if (cpu_buffer->tail_page == cpu_buffer->reader_page)
3163                 bpage = cpu_buffer->reader_page;
3164         else
3165                 bpage = rb_set_head_page(cpu_buffer);
3166         if (bpage)
3167                 ret = bpage->page->time_stamp;
3168         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3169
3170         return ret;
3171 }
3172 EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts);
3173
3174 /**
3175  * ring_buffer_bytes_cpu - get the number of bytes consumed in a cpu buffer
3176  * @buffer: The ring buffer
3177  * @cpu: The per CPU buffer to read from.
3178  */
3179 unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu)
3180 {
3181         struct ring_buffer_per_cpu *cpu_buffer;
3182         unsigned long ret;
3183
3184         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3185                 return 0;
3186
3187         cpu_buffer = buffer->buffers[cpu];
3188         ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes;
3189
3190         return ret;
3191 }
3192 EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu);
3193
3194 /**
3195  * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
3196  * @buffer: The ring buffer
3197  * @cpu: The per CPU buffer to get the entries from.
3198  */
3199 unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
3200 {
3201         struct ring_buffer_per_cpu *cpu_buffer;
3202
3203         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3204                 return 0;
3205
3206         cpu_buffer = buffer->buffers[cpu];
3207
3208         return rb_num_of_entries(cpu_buffer);
3209 }
3210 EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
3211
3212 /**
3213  * ring_buffer_overrun_cpu - get the number of overruns caused by the ring
3214  * buffer wrapping around (only if RB_FL_OVERWRITE is on).
3215  * @buffer: The ring buffer
3216  * @cpu: The per CPU buffer to get the number of overruns from
3217  */
3218 unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
3219 {
3220         struct ring_buffer_per_cpu *cpu_buffer;
3221         unsigned long ret;
3222
3223         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3224                 return 0;
3225
3226         cpu_buffer = buffer->buffers[cpu];
3227         ret = local_read(&cpu_buffer->overrun);
3228
3229         return ret;
3230 }
3231 EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
3232
3233 /**
3234  * ring_buffer_commit_overrun_cpu - get the number of overruns caused by
3235  * commits failing due to the buffer wrapping around while there are uncommitted
3236  * events, such as during an interrupt storm.
3237  * @buffer: The ring buffer
3238  * @cpu: The per CPU buffer to get the number of overruns from
3239  */
3240 unsigned long
3241 ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
3242 {
3243         struct ring_buffer_per_cpu *cpu_buffer;
3244         unsigned long ret;
3245
3246         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3247                 return 0;
3248
3249         cpu_buffer = buffer->buffers[cpu];
3250         ret = local_read(&cpu_buffer->commit_overrun);
3251
3252         return ret;
3253 }
3254 EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
3255
3256 /**
3257  * ring_buffer_dropped_events_cpu - get the number of dropped events caused by
3258  * the ring buffer filling up (only if RB_FL_OVERWRITE is off).
3259  * @buffer: The ring buffer
3260  * @cpu: The per CPU buffer to get the number of overruns from
3261  */
3262 unsigned long
3263 ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu)
3264 {
3265         struct ring_buffer_per_cpu *cpu_buffer;
3266         unsigned long ret;
3267
3268         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3269                 return 0;
3270
3271         cpu_buffer = buffer->buffers[cpu];
3272         ret = local_read(&cpu_buffer->dropped_events);
3273
3274         return ret;
3275 }
3276 EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu);
3277
3278 /**
3279  * ring_buffer_read_events_cpu - get the number of events successfully read
3280  * @buffer: The ring buffer
3281  * @cpu: The per CPU buffer to get the number of events read
3282  */
3283 unsigned long
3284 ring_buffer_read_events_cpu(struct ring_buffer *buffer, int cpu)
3285 {
3286         struct ring_buffer_per_cpu *cpu_buffer;
3287
3288         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3289                 return 0;
3290
3291         cpu_buffer = buffer->buffers[cpu];
3292         return cpu_buffer->read;
3293 }
3294 EXPORT_SYMBOL_GPL(ring_buffer_read_events_cpu);
3295
3296 /**
3297  * ring_buffer_entries - get the number of entries in a buffer
3298  * @buffer: The ring buffer
3299  *
3300  * Returns the total number of entries in the ring buffer
3301  * (all CPU entries)
3302  */
3303 unsigned long ring_buffer_entries(struct ring_buffer *buffer)
3304 {
3305         struct ring_buffer_per_cpu *cpu_buffer;
3306         unsigned long entries = 0;
3307         int cpu;
3308
3309         /* if you care about this being correct, lock the buffer */
3310         for_each_buffer_cpu(buffer, cpu) {
3311                 cpu_buffer = buffer->buffers[cpu];
3312                 entries += rb_num_of_entries(cpu_buffer);
3313         }
3314
3315         return entries;
3316 }
3317 EXPORT_SYMBOL_GPL(ring_buffer_entries);
3318
3319 /**
3320  * ring_buffer_overruns - get the number of overruns in buffer
3321  * @buffer: The ring buffer
3322  *
3323  * Returns the total number of overruns in the ring buffer
3324  * (all CPU entries)
3325  */
3326 unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
3327 {
3328         struct ring_buffer_per_cpu *cpu_buffer;
3329         unsigned long overruns = 0;
3330         int cpu;
3331
3332         /* if you care about this being correct, lock the buffer */
3333         for_each_buffer_cpu(buffer, cpu) {
3334                 cpu_buffer = buffer->buffers[cpu];
3335                 overruns += local_read(&cpu_buffer->overrun);
3336         }
3337
3338         return overruns;
3339 }
3340 EXPORT_SYMBOL_GPL(ring_buffer_overruns);
3341
3342 static void rb_iter_reset(struct ring_buffer_iter *iter)
3343 {
3344         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3345
3346         /* Iterator usage is expected to have record disabled */
3347         if (list_empty(&cpu_buffer->reader_page->list)) {
3348                 iter->head_page = rb_set_head_page(cpu_buffer);
3349                 if (unlikely(!iter->head_page))
3350                         return;
3351                 iter->head = iter->head_page->read;
3352         } else {
3353                 iter->head_page = cpu_buffer->reader_page;
3354                 iter->head = cpu_buffer->reader_page->read;
3355         }
3356         if (iter->head)
3357                 iter->read_stamp = cpu_buffer->read_stamp;
3358         else
3359                 iter->read_stamp = iter->head_page->page->time_stamp;
3360         iter->cache_reader_page = cpu_buffer->reader_page;
3361         iter->cache_read = cpu_buffer->read;
3362 }
3363
3364 /**
3365  * ring_buffer_iter_reset - reset an iterator
3366  * @iter: The iterator to reset
3367  *
3368  * Resets the iterator, so that it will start from the beginning
3369  * again.
3370  */
3371 void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
3372 {
3373         struct ring_buffer_per_cpu *cpu_buffer;
3374         unsigned long flags;
3375
3376         if (!iter)
3377                 return;
3378
3379         cpu_buffer = iter->cpu_buffer;
3380
3381         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3382         rb_iter_reset(iter);
3383         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3384 }
3385 EXPORT_SYMBOL_GPL(ring_buffer_iter_reset);
3386
3387 /**
3388  * ring_buffer_iter_empty - check if an iterator has no more to read
3389  * @iter: The iterator to check
3390  */
3391 int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
3392 {
3393         struct ring_buffer_per_cpu *cpu_buffer;
3394
3395         cpu_buffer = iter->cpu_buffer;
3396
3397         return iter->head_page == cpu_buffer->commit_page &&
3398                 iter->head == rb_commit_index(cpu_buffer);
3399 }
3400 EXPORT_SYMBOL_GPL(ring_buffer_iter_empty);
3401
3402 static void
3403 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
3404                      struct ring_buffer_event *event)
3405 {
3406         u64 delta;
3407
3408         switch (event->type_len) {
3409         case RINGBUF_TYPE_PADDING:
3410                 return;
3411
3412         case RINGBUF_TYPE_TIME_EXTEND:
3413                 delta = event->array[0];
3414                 delta <<= TS_SHIFT;
3415                 delta += event->time_delta;
3416                 cpu_buffer->read_stamp += delta;
3417                 return;
3418
3419         case RINGBUF_TYPE_TIME_STAMP:
3420                 /* FIXME: not implemented */
3421                 return;
3422
3423         case RINGBUF_TYPE_DATA:
3424                 cpu_buffer->read_stamp += event->time_delta;
3425                 return;
3426
3427         default:
3428                 BUG();
3429         }
3430         return;
3431 }
3432
3433 static void
3434 rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
3435                           struct ring_buffer_event *event)
3436 {
3437         u64 delta;
3438
3439         switch (event->type_len) {
3440         case RINGBUF_TYPE_PADDING:
3441                 return;
3442
3443         case RINGBUF_TYPE_TIME_EXTEND:
3444                 delta = event->array[0];
3445                 delta <<= TS_SHIFT;
3446                 delta += event->time_delta;
3447                 iter->read_stamp += delta;
3448                 return;
3449
3450         case RINGBUF_TYPE_TIME_STAMP:
3451                 /* FIXME: not implemented */
3452                 return;
3453
3454         case RINGBUF_TYPE_DATA:
3455                 iter->read_stamp += event->time_delta;
3456                 return;
3457
3458         default:
3459                 BUG();
3460         }
3461         return;
3462 }
3463
3464 static struct buffer_page *
3465 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
3466 {
3467         struct buffer_page *reader = NULL;
3468         unsigned long overwrite;
3469         unsigned long flags;
3470         int nr_loops = 0;
3471         int ret;
3472
3473         local_irq_save(flags);
3474         arch_spin_lock(&cpu_buffer->lock);
3475
3476  again:
3477         /*
3478          * This should normally only loop twice. But because the
3479          * start of the reader inserts an empty page, it causes
3480          * a case where we will loop three times. There should be no
3481          * reason to loop four times (that I know of).
3482          */
3483         if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
3484                 reader = NULL;
3485                 goto out;
3486         }
3487
3488         reader = cpu_buffer->reader_page;
3489
3490         /* If there's more to read, return this page */
3491         if (cpu_buffer->reader_page->read < rb_page_size(reader))
3492                 goto out;
3493
3494         /* Never should we have an index greater than the size */
3495         if (RB_WARN_ON(cpu_buffer,
3496                        cpu_buffer->reader_page->read > rb_page_size(reader)))
3497                 goto out;
3498
3499         /* check if we caught up to the tail */
3500         reader = NULL;
3501         if (cpu_buffer->commit_page == cpu_buffer->reader_page)
3502                 goto out;
3503
3504         /* Don't bother swapping if the ring buffer is empty */
3505         if (rb_num_of_entries(cpu_buffer) == 0)
3506                 goto out;
3507
3508         /*
3509          * Reset the reader page to size zero.
3510          */
3511         local_set(&cpu_buffer->reader_page->write, 0);
3512         local_set(&cpu_buffer->reader_page->entries, 0);
3513         local_set(&cpu_buffer->reader_page->page->commit, 0);
3514         cpu_buffer->reader_page->real_end = 0;
3515
3516  spin:
3517         /*
3518          * Splice the empty reader page into the list around the head.
3519          */
3520         reader = rb_set_head_page(cpu_buffer);
3521         if (!reader)
3522                 goto out;
3523         cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next);
3524         cpu_buffer->reader_page->list.prev = reader->list.prev;
3525
3526         /*
3527          * cpu_buffer->pages just needs to point to the buffer, it
3528          *  has no specific buffer page to point to. Lets move it out
3529          *  of our way so we don't accidentally swap it.
3530          */
3531         cpu_buffer->pages = reader->list.prev;
3532
3533         /* The reader page will be pointing to the new head */
3534         rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list);
3535
3536         /*
3537          * We want to make sure we read the overruns after we set up our
3538          * pointers to the next object. The writer side does a
3539          * cmpxchg to cross pages which acts as the mb on the writer
3540          * side. Note, the reader will constantly fail the swap
3541          * while the writer is updating the pointers, so this
3542          * guarantees that the overwrite recorded here is the one we
3543          * want to compare with the last_overrun.
3544          */
3545         smp_mb();
3546         overwrite = local_read(&(cpu_buffer->overrun));
3547
3548         /*
3549          * Here's the tricky part.
3550          *
3551          * We need to move the pointer past the header page.
3552          * But we can only do that if a writer is not currently
3553          * moving it. The page before the header page has the
3554          * flag bit '1' set if it is pointing to the page we want.
3555          * but if the writer is in the process of moving it
3556          * than it will be '2' or already moved '0'.
3557          */
3558
3559         ret = rb_head_page_replace(reader, cpu_buffer->reader_page);
3560
3561         /*
3562          * If we did not convert it, then we must try again.
3563          */
3564         if (!ret)
3565                 goto spin;
3566
3567         /*
3568          * Yeah! We succeeded in replacing the page.
3569          *
3570          * Now make the new head point back to the reader page.
3571          */
3572         rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list;
3573         rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
3574
3575         /* Finally update the reader page to the new head */
3576         cpu_buffer->reader_page = reader;
3577         rb_reset_reader_page(cpu_buffer);
3578
3579         if (overwrite != cpu_buffer->last_overrun) {
3580                 cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
3581                 cpu_buffer->last_overrun = overwrite;
3582         }
3583
3584         goto again;
3585
3586  out:
3587         arch_spin_unlock(&cpu_buffer->lock);
3588         local_irq_restore(flags);
3589
3590         return reader;
3591 }
3592
3593 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
3594 {
3595         struct ring_buffer_event *event;
3596         struct buffer_page *reader;
3597         unsigned length;
3598
3599         reader = rb_get_reader_page(cpu_buffer);
3600
3601         /* This function should not be called when buffer is empty */
3602         if (RB_WARN_ON(cpu_buffer, !reader))
3603                 return;
3604
3605         event = rb_reader_event(cpu_buffer);
3606
3607         if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
3608                 cpu_buffer->read++;
3609
3610         rb_update_read_stamp(cpu_buffer, event);
3611
3612         length = rb_event_length(event);
3613         cpu_buffer->reader_page->read += length;
3614 }
3615
3616 static void rb_advance_iter(struct ring_buffer_iter *iter)
3617 {
3618         struct ring_buffer_per_cpu *cpu_buffer;
3619         struct ring_buffer_event *event;
3620         unsigned length;
3621
3622         cpu_buffer = iter->cpu_buffer;
3623
3624         /*
3625          * Check if we are at the end of the buffer.
3626          */
3627         if (iter->head >= rb_page_size(iter->head_page)) {
3628                 /* discarded commits can make the page empty */
3629                 if (iter->head_page == cpu_buffer->commit_page)
3630                         return;
3631                 rb_inc_iter(iter);
3632                 return;
3633         }
3634
3635         event = rb_iter_head_event(iter);
3636
3637         length = rb_event_length(event);
3638
3639         /*
3640          * This should not be called to advance the header if we are
3641          * at the tail of the buffer.
3642          */
3643         if (RB_WARN_ON(cpu_buffer,
3644                        (iter->head_page == cpu_buffer->commit_page) &&
3645                        (iter->head + length > rb_commit_index(cpu_buffer))))
3646                 return;
3647
3648         rb_update_iter_read_stamp(iter, event);
3649
3650         iter->head += length;
3651
3652         /* check for end of page padding */
3653         if ((iter->head >= rb_page_size(iter->head_page)) &&
3654             (iter->head_page != cpu_buffer->commit_page))
3655                 rb_inc_iter(iter);
3656 }
3657
3658 static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer)
3659 {
3660         return cpu_buffer->lost_events;
3661 }
3662
3663 static struct ring_buffer_event *
3664 rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts,
3665                unsigned long *lost_events)
3666 {
3667         struct ring_buffer_event *event;
3668         struct buffer_page *reader;
3669         int nr_loops = 0;
3670
3671  again:
3672         /*
3673          * We repeat when a time extend is encountered.
3674          * Since the time extend is always attached to a data event,
3675          * we should never loop more than once.
3676          * (We never hit the following condition more than twice).
3677          */
3678         if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
3679                 return NULL;
3680
3681         reader = rb_get_reader_page(cpu_buffer);
3682         if (!reader)
3683                 return NULL;
3684
3685         event = rb_reader_event(cpu_buffer);
3686
3687         switch (event->type_len) {
3688         case RINGBUF_TYPE_PADDING:
3689                 if (rb_null_event(event))
3690                         RB_WARN_ON(cpu_buffer, 1);
3691                 /*
3692                  * Because the writer could be discarding every
3693                  * event it creates (which would probably be bad)
3694                  * if we were to go back to "again" then we may never
3695                  * catch up, and will trigger the warn on, or lock
3696                  * the box. Return the padding, and we will release
3697                  * the current locks, and try again.
3698                  */
3699                 return event;
3700
3701         case RINGBUF_TYPE_TIME_EXTEND:
3702                 /* Internal data, OK to advance */
3703                 rb_advance_reader(cpu_buffer);
3704                 goto again;
3705
3706         case RINGBUF_TYPE_TIME_STAMP:
3707                 /* FIXME: not implemented */
3708                 rb_advance_reader(cpu_buffer);
3709                 goto again;
3710
3711         case RINGBUF_TYPE_DATA:
3712                 if (ts) {
3713                         *ts = cpu_buffer->read_stamp + event->time_delta;
3714                         ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
3715                                                          cpu_buffer->cpu, ts);
3716                 }
3717                 if (lost_events)
3718                         *lost_events = rb_lost_events(cpu_buffer);
3719                 return event;
3720
3721         default:
3722                 BUG();
3723         }
3724
3725         return NULL;
3726 }
3727 EXPORT_SYMBOL_GPL(ring_buffer_peek);
3728
3729 static struct ring_buffer_event *
3730 rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3731 {
3732         struct ring_buffer *buffer;
3733         struct ring_buffer_per_cpu *cpu_buffer;
3734         struct ring_buffer_event *event;
3735         int nr_loops = 0;
3736
3737         cpu_buffer = iter->cpu_buffer;
3738         buffer = cpu_buffer->buffer;
3739
3740         /*
3741          * Check if someone performed a consuming read to
3742          * the buffer. A consuming read invalidates the iterator
3743          * and we need to reset the iterator in this case.
3744          */
3745         if (unlikely(iter->cache_read != cpu_buffer->read ||
3746                      iter->cache_reader_page != cpu_buffer->reader_page))
3747                 rb_iter_reset(iter);
3748
3749  again:
3750         if (ring_buffer_iter_empty(iter))
3751                 return NULL;
3752
3753         /*
3754          * We repeat when a time extend is encountered.
3755          * Since the time extend is always attached to a data event,
3756          * we should never loop more than once.
3757          * (We never hit the following condition more than twice).
3758          */
3759         if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2))
3760                 return NULL;
3761
3762         if (rb_per_cpu_empty(cpu_buffer))
3763                 return NULL;
3764
3765         if (iter->head >= local_read(&iter->head_page->page->commit)) {
3766                 rb_inc_iter(iter);
3767                 goto again;
3768         }
3769
3770         event = rb_iter_head_event(iter);
3771
3772         switch (event->type_len) {
3773         case RINGBUF_TYPE_PADDING:
3774                 if (rb_null_event(event)) {
3775                         rb_inc_iter(iter);
3776                         goto again;
3777                 }
3778                 rb_advance_iter(iter);
3779                 return event;
3780
3781         case RINGBUF_TYPE_TIME_EXTEND:
3782                 /* Internal data, OK to advance */
3783                 rb_advance_iter(iter);
3784                 goto again;
3785
3786         case RINGBUF_TYPE_TIME_STAMP:
3787                 /* FIXME: not implemented */
3788                 rb_advance_iter(iter);
3789                 goto again;
3790
3791         case RINGBUF_TYPE_DATA:
3792                 if (ts) {
3793                         *ts = iter->read_stamp + event->time_delta;
3794                         ring_buffer_normalize_time_stamp(buffer,
3795                                                          cpu_buffer->cpu, ts);
3796                 }
3797                 return event;
3798
3799         default:
3800                 BUG();
3801         }
3802
3803         return NULL;
3804 }
3805 EXPORT_SYMBOL_GPL(ring_buffer_iter_peek);
3806
3807 static inline int rb_ok_to_lock(void)
3808 {
3809         /*
3810          * If an NMI die dumps out the content of the ring buffer
3811          * do not grab locks. We also permanently disable the ring
3812          * buffer too. A one time deal is all you get from reading
3813          * the ring buffer from an NMI.
3814          */
3815         if (likely(!in_nmi()))
3816                 return 1;
3817
3818         tracing_off_permanent();
3819         return 0;
3820 }
3821
3822 /**
3823  * ring_buffer_peek - peek at the next event to be read
3824  * @buffer: The ring buffer to read
3825  * @cpu: The cpu to peak at
3826  * @ts: The timestamp counter of this event.
3827  * @lost_events: a variable to store if events were lost (may be NULL)
3828  *
3829  * This will return the event that will be read next, but does
3830  * not consume the data.
3831  */
3832 struct ring_buffer_event *
3833 ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
3834                  unsigned long *lost_events)
3835 {
3836         struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
3837         struct ring_buffer_event *event;
3838         unsigned long flags;
3839         int dolock;
3840
3841         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3842                 return NULL;
3843
3844         dolock = rb_ok_to_lock();
3845  again:
3846         local_irq_save(flags);
3847         if (dolock)
3848                 raw_spin_lock(&cpu_buffer->reader_lock);
3849         event = rb_buffer_peek(cpu_buffer, ts, lost_events);
3850         if (event && event->type_len == RINGBUF_TYPE_PADDING)
3851                 rb_advance_reader(cpu_buffer);
3852         if (dolock)
3853                 raw_spin_unlock(&cpu_buffer->reader_lock);
3854         local_irq_restore(flags);
3855
3856         if (event && event->type_len == RINGBUF_TYPE_PADDING)
3857                 goto again;
3858
3859         return event;
3860 }
3861
3862 /**
3863  * ring_buffer_iter_peek - peek at the next event to be read
3864  * @iter: The ring buffer iterator
3865  * @ts: The timestamp counter of this event.
3866  *
3867  * This will return the event that will be read next, but does
3868  * not increment the iterator.
3869  */
3870 struct ring_buffer_event *
3871 ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
3872 {
3873         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
3874         struct ring_buffer_event *event;
3875         unsigned long flags;
3876
3877  again:
3878         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
3879         event = rb_iter_peek(iter, ts);
3880         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
3881
3882         if (event && event->type_len == RINGBUF_TYPE_PADDING)
3883                 goto again;
3884
3885         return event;
3886 }
3887
3888 /**
3889  * ring_buffer_consume - return an event and consume it
3890  * @buffer: The ring buffer to get the next event from
3891  * @cpu: the cpu to read the buffer from
3892  * @ts: a variable to store the timestamp (may be NULL)
3893  * @lost_events: a variable to store if events were lost (may be NULL)
3894  *
3895  * Returns the next event in the ring buffer, and that event is consumed.
3896  * Meaning, that sequential reads will keep returning a different event,
3897  * and eventually empty the ring buffer if the producer is slower.
3898  */
3899 struct ring_buffer_event *
3900 ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
3901                     unsigned long *lost_events)
3902 {
3903         struct ring_buffer_per_cpu *cpu_buffer;
3904         struct ring_buffer_event *event = NULL;
3905         unsigned long flags;
3906         int dolock;
3907
3908         dolock = rb_ok_to_lock();
3909
3910  again:
3911         /* might be called in atomic */
3912         preempt_disable();
3913
3914         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3915                 goto out;
3916
3917         cpu_buffer = buffer->buffers[cpu];
3918         local_irq_save(flags);
3919         if (dolock)
3920                 raw_spin_lock(&cpu_buffer->reader_lock);
3921
3922         event = rb_buffer_peek(cpu_buffer, ts, lost_events);
3923         if (event) {
3924                 cpu_buffer->lost_events = 0;
3925                 rb_advance_reader(cpu_buffer);
3926         }
3927
3928         if (dolock)
3929                 raw_spin_unlock(&cpu_buffer->reader_lock);
3930         local_irq_restore(flags);
3931
3932  out:
3933         preempt_enable();
3934
3935         if (event && event->type_len == RINGBUF_TYPE_PADDING)
3936                 goto again;
3937
3938         return event;
3939 }
3940 EXPORT_SYMBOL_GPL(ring_buffer_consume);
3941
3942 /**
3943  * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer
3944  * @buffer: The ring buffer to read from
3945  * @cpu: The cpu buffer to iterate over
3946  *
3947  * This performs the initial preparations necessary to iterate
3948  * through the buffer.  Memory is allocated, buffer recording
3949  * is disabled, and the iterator pointer is returned to the caller.
3950  *
3951  * Disabling buffer recordng prevents the reading from being
3952  * corrupted. This is not a consuming read, so a producer is not
3953  * expected.
3954  *
3955  * After a sequence of ring_buffer_read_prepare calls, the user is
3956  * expected to make at least one call to ring_buffer_prepare_sync.
3957  * Afterwards, ring_buffer_read_start is invoked to get things going
3958  * for real.
3959  *
3960  * This overall must be paired with ring_buffer_finish.
3961  */
3962 struct ring_buffer_iter *
3963 ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu)
3964 {
3965         struct ring_buffer_per_cpu *cpu_buffer;
3966         struct ring_buffer_iter *iter;
3967
3968         if (!cpumask_test_cpu(cpu, buffer->cpumask))
3969                 return NULL;
3970
3971         iter = kmalloc(sizeof(*iter), GFP_KERNEL);
3972         if (!iter)
3973                 return NULL;
3974
3975         cpu_buffer = buffer->buffers[cpu];
3976
3977         iter->cpu_buffer = cpu_buffer;
3978
3979         atomic_inc(&buffer->resize_disabled);
3980         atomic_inc(&cpu_buffer->record_disabled);
3981
3982         return iter;
3983 }
3984 EXPORT_SYMBOL_GPL(ring_buffer_read_prepare);
3985
3986 /**
3987  * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls
3988  *
3989  * All previously invoked ring_buffer_read_prepare calls to prepare
3990  * iterators will be synchronized.  Afterwards, read_buffer_read_start
3991  * calls on those iterators are allowed.
3992  */
3993 void
3994 ring_buffer_read_prepare_sync(void)
3995 {
3996         synchronize_sched();
3997 }
3998 EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync);
3999
4000 /**
4001  * ring_buffer_read_start - start a non consuming read of the buffer
4002  * @iter: The iterator returned by ring_buffer_read_prepare
4003  *
4004  * This finalizes the startup of an iteration through the buffer.
4005  * The iterator comes from a call to ring_buffer_read_prepare and
4006  * an intervening ring_buffer_read_prepare_sync must have been
4007  * performed.
4008  *
4009  * Must be paired with ring_buffer_finish.
4010  */
4011 void
4012 ring_buffer_read_start(struct ring_buffer_iter *iter)
4013 {
4014         struct ring_buffer_per_cpu *cpu_buffer;
4015         unsigned long flags;
4016
4017         if (!iter)
4018                 return;
4019
4020         cpu_buffer = iter->cpu_buffer;
4021
4022         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4023         arch_spin_lock(&cpu_buffer->lock);
4024         rb_iter_reset(iter);
4025         arch_spin_unlock(&cpu_buffer->lock);
4026         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4027 }
4028 EXPORT_SYMBOL_GPL(ring_buffer_read_start);
4029
4030 /**
4031  * ring_buffer_finish - finish reading the iterator of the buffer
4032  * @iter: The iterator retrieved by ring_buffer_start
4033  *
4034  * This re-enables the recording to the buffer, and frees the
4035  * iterator.
4036  */
4037 void
4038 ring_buffer_read_finish(struct ring_buffer_iter *iter)
4039 {
4040         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
4041         unsigned long flags;
4042
4043         /*
4044          * Ring buffer is disabled from recording, here's a good place
4045          * to check the integrity of the ring buffer.
4046          * Must prevent readers from trying to read, as the check
4047          * clears the HEAD page and readers require it.
4048          */
4049         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4050         rb_check_pages(cpu_buffer);
4051         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4052
4053         atomic_dec(&cpu_buffer->record_disabled);
4054         atomic_dec(&cpu_buffer->buffer->resize_disabled);
4055         kfree(iter);
4056 }
4057 EXPORT_SYMBOL_GPL(ring_buffer_read_finish);
4058
4059 /**
4060  * ring_buffer_read - read the next item in the ring buffer by the iterator
4061  * @iter: The ring buffer iterator
4062  * @ts: The time stamp of the event read.
4063  *
4064  * This reads the next event in the ring buffer and increments the iterator.
4065  */
4066 struct ring_buffer_event *
4067 ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
4068 {
4069         struct ring_buffer_event *event;
4070         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
4071         unsigned long flags;
4072
4073         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4074  again:
4075         event = rb_iter_peek(iter, ts);
4076         if (!event)
4077                 goto out;
4078
4079         if (event->type_len == RINGBUF_TYPE_PADDING)
4080                 goto again;
4081
4082         rb_advance_iter(iter);
4083  out:
4084         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4085
4086         return event;
4087 }
4088 EXPORT_SYMBOL_GPL(ring_buffer_read);
4089
4090 /**
4091  * ring_buffer_size - return the size of the ring buffer (in bytes)
4092  * @buffer: The ring buffer.
4093  */
4094 unsigned long ring_buffer_size(struct ring_buffer *buffer, int cpu)
4095 {
4096         /*
4097          * Earlier, this method returned
4098          *      BUF_PAGE_SIZE * buffer->nr_pages
4099          * Since the nr_pages field is now removed, we have converted this to
4100          * return the per cpu buffer value.
4101          */
4102         if (!cpumask_test_cpu(cpu, buffer->cpumask))
4103                 return 0;
4104
4105         return BUF_PAGE_SIZE * buffer->buffers[cpu]->nr_pages;
4106 }
4107 EXPORT_SYMBOL_GPL(ring_buffer_size);
4108
4109 static void
4110 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
4111 {
4112         rb_head_page_deactivate(cpu_buffer);
4113
4114         cpu_buffer->head_page
4115                 = list_entry(cpu_buffer->pages, struct buffer_page, list);
4116         local_set(&cpu_buffer->head_page->write, 0);
4117         local_set(&cpu_buffer->head_page->entries, 0);
4118         local_set(&cpu_buffer->head_page->page->commit, 0);
4119
4120         cpu_buffer->head_page->read = 0;
4121
4122         cpu_buffer->tail_page = cpu_buffer->head_page;
4123         cpu_buffer->commit_page = cpu_buffer->head_page;
4124
4125         INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
4126         INIT_LIST_HEAD(&cpu_buffer->new_pages);
4127         local_set(&cpu_buffer->reader_page->write, 0);
4128         local_set(&cpu_buffer->reader_page->entries, 0);
4129         local_set(&cpu_buffer->reader_page->page->commit, 0);
4130         cpu_buffer->reader_page->read = 0;
4131
4132         local_set(&cpu_buffer->entries_bytes, 0);
4133         local_set(&cpu_buffer->overrun, 0);
4134         local_set(&cpu_buffer->commit_overrun, 0);
4135         local_set(&cpu_buffer->dropped_events, 0);
4136         local_set(&cpu_buffer->entries, 0);
4137         local_set(&cpu_buffer->committing, 0);
4138         local_set(&cpu_buffer->commits, 0);
4139         cpu_buffer->read = 0;
4140         cpu_buffer->read_bytes = 0;
4141
4142         cpu_buffer->write_stamp = 0;
4143         cpu_buffer->read_stamp = 0;
4144
4145         cpu_buffer->lost_events = 0;
4146         cpu_buffer->last_overrun = 0;
4147
4148         rb_head_page_activate(cpu_buffer);
4149 }
4150
4151 /**
4152  * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
4153  * @buffer: The ring buffer to reset a per cpu buffer of
4154  * @cpu: The CPU buffer to be reset
4155  */
4156 void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
4157 {
4158         struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
4159         unsigned long flags;
4160
4161         if (!cpumask_test_cpu(cpu, buffer->cpumask))
4162                 return;
4163
4164         atomic_inc(&buffer->resize_disabled);
4165         atomic_inc(&cpu_buffer->record_disabled);
4166
4167         /* Make sure all commits have finished */
4168         synchronize_sched();
4169
4170         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4171
4172         if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing)))
4173                 goto out;
4174
4175         arch_spin_lock(&cpu_buffer->lock);
4176
4177         rb_reset_cpu(cpu_buffer);
4178
4179         arch_spin_unlock(&cpu_buffer->lock);
4180
4181  out:
4182         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4183
4184         atomic_dec(&cpu_buffer->record_disabled);
4185         atomic_dec(&buffer->resize_disabled);
4186 }
4187 EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
4188
4189 /**
4190  * ring_buffer_reset - reset a ring buffer
4191  * @buffer: The ring buffer to reset all cpu buffers
4192  */
4193 void ring_buffer_reset(struct ring_buffer *buffer)
4194 {
4195         int cpu;
4196
4197         for_each_buffer_cpu(buffer, cpu)
4198                 ring_buffer_reset_cpu(buffer, cpu);
4199 }
4200 EXPORT_SYMBOL_GPL(ring_buffer_reset);
4201
4202 /**
4203  * rind_buffer_empty - is the ring buffer empty?
4204  * @buffer: The ring buffer to test
4205  */
4206 int ring_buffer_empty(struct ring_buffer *buffer)
4207 {
4208         struct ring_buffer_per_cpu *cpu_buffer;
4209         unsigned long flags;
4210         int dolock;
4211         int cpu;
4212         int ret;
4213
4214         dolock = rb_ok_to_lock();
4215
4216         /* yes this is racy, but if you don't like the race, lock the buffer */
4217         for_each_buffer_cpu(buffer, cpu) {
4218                 cpu_buffer = buffer->buffers[cpu];
4219                 local_irq_save(flags);
4220                 if (dolock)
4221                         raw_spin_lock(&cpu_buffer->reader_lock);
4222                 ret = rb_per_cpu_empty(cpu_buffer);
4223                 if (dolock)
4224                         raw_spin_unlock(&cpu_buffer->reader_lock);
4225                 local_irq_restore(flags);
4226
4227                 if (!ret)
4228                         return 0;
4229         }
4230
4231         return 1;
4232 }
4233 EXPORT_SYMBOL_GPL(ring_buffer_empty);
4234
4235 /**
4236  * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
4237  * @buffer: The ring buffer
4238  * @cpu: The CPU buffer to test
4239  */
4240 int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
4241 {
4242         struct ring_buffer_per_cpu *cpu_buffer;
4243         unsigned long flags;
4244         int dolock;
4245         int ret;
4246
4247         if (!cpumask_test_cpu(cpu, buffer->cpumask))
4248                 return 1;
4249
4250         dolock = rb_ok_to_lock();
4251
4252         cpu_buffer = buffer->buffers[cpu];
4253         local_irq_save(flags);
4254         if (dolock)
4255                 raw_spin_lock(&cpu_buffer->reader_lock);
4256         ret = rb_per_cpu_empty(cpu_buffer);
4257         if (dolock)
4258                 raw_spin_unlock(&cpu_buffer->reader_lock);
4259         local_irq_restore(flags);
4260
4261         return ret;
4262 }
4263 EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
4264
4265 #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
4266 /**
4267  * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
4268  * @buffer_a: One buffer to swap with
4269  * @buffer_b: The other buffer to swap with
4270  *
4271  * This function is useful for tracers that want to take a "snapshot"
4272  * of a CPU buffer and has another back up buffer lying around.
4273  * it is expected that the tracer handles the cpu buffer not being
4274  * used at the moment.
4275  */
4276 int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
4277                          struct ring_buffer *buffer_b, int cpu)
4278 {
4279         struct ring_buffer_per_cpu *cpu_buffer_a;
4280         struct ring_buffer_per_cpu *cpu_buffer_b;
4281         int ret = -EINVAL;
4282
4283         if (!cpumask_test_cpu(cpu, buffer_a->cpumask) ||
4284             !cpumask_test_cpu(cpu, buffer_b->cpumask))
4285                 goto out;
4286
4287         cpu_buffer_a = buffer_a->buffers[cpu];
4288         cpu_buffer_b = buffer_b->buffers[cpu];
4289
4290         /* At least make sure the two buffers are somewhat the same */
4291         if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages)
4292                 goto out;
4293
4294         ret = -EAGAIN;
4295
4296         if (ring_buffer_flags != RB_BUFFERS_ON)
4297                 goto out;
4298
4299         if (atomic_read(&buffer_a->record_disabled))
4300                 goto out;
4301
4302         if (atomic_read(&buffer_b->record_disabled))
4303                 goto out;
4304
4305         if (atomic_read(&cpu_buffer_a->record_disabled))
4306                 goto out;
4307
4308         if (atomic_read(&cpu_buffer_b->record_disabled))
4309                 goto out;
4310
4311         /*
4312          * We can't do a synchronize_sched here because this
4313          * function can be called in atomic context.
4314          * Normally this will be called from the same CPU as cpu.
4315          * If not it's up to the caller to protect this.
4316          */
4317         atomic_inc(&cpu_buffer_a->record_disabled);
4318         atomic_inc(&cpu_buffer_b->record_disabled);
4319
4320         ret = -EBUSY;
4321         if (local_read(&cpu_buffer_a->committing))
4322                 goto out_dec;
4323         if (local_read(&cpu_buffer_b->committing))
4324                 goto out_dec;
4325
4326         buffer_a->buffers[cpu] = cpu_buffer_b;
4327         buffer_b->buffers[cpu] = cpu_buffer_a;
4328
4329         cpu_buffer_b->buffer = buffer_a;
4330         cpu_buffer_a->buffer = buffer_b;
4331
4332         ret = 0;
4333
4334 out_dec:
4335         atomic_dec(&cpu_buffer_a->record_disabled);
4336         atomic_dec(&cpu_buffer_b->record_disabled);
4337 out:
4338         return ret;
4339 }
4340 EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
4341 #endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */
4342
4343 /**
4344  * ring_buffer_alloc_read_page - allocate a page to read from buffer
4345  * @buffer: the buffer to allocate for.
4346  *
4347  * This function is used in conjunction with ring_buffer_read_page.
4348  * When reading a full page from the ring buffer, these functions
4349  * can be used to speed up the process. The calling function should
4350  * allocate a few pages first with this function. Then when it
4351  * needs to get pages from the ring buffer, it passes the result
4352  * of this function into ring_buffer_read_page, which will swap
4353  * the page that was allocated, with the read page of the buffer.
4354  *
4355  * Returns:
4356  *  The page allocated, or NULL on error.
4357  */
4358 void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
4359 {
4360         struct buffer_data_page *bpage;
4361         struct page *page;
4362
4363         page = alloc_pages_node(cpu_to_node(cpu),
4364                                 GFP_KERNEL | __GFP_NORETRY, 0);
4365         if (!page)
4366                 return NULL;
4367
4368         bpage = page_address(page);
4369
4370         rb_init_page(bpage);
4371
4372         return bpage;
4373 }
4374 EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
4375
4376 /**
4377  * ring_buffer_free_read_page - free an allocated read page
4378  * @buffer: the buffer the page was allocate for
4379  * @data: the page to free
4380  *
4381  * Free a page allocated from ring_buffer_alloc_read_page.
4382  */
4383 void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data)
4384 {
4385         free_page((unsigned long)data);
4386 }
4387 EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
4388
4389 /**
4390  * ring_buffer_read_page - extract a page from the ring buffer
4391  * @buffer: buffer to extract from
4392  * @data_page: the page to use allocated from ring_buffer_alloc_read_page
4393  * @len: amount to extract
4394  * @cpu: the cpu of the buffer to extract
4395  * @full: should the extraction only happen when the page is full.
4396  *
4397  * This function will pull out a page from the ring buffer and consume it.
4398  * @data_page must be the address of the variable that was returned
4399  * from ring_buffer_alloc_read_page. This is because the page might be used
4400  * to swap with a page in the ring buffer.
4401  *
4402  * for example:
4403  *      rpage = ring_buffer_alloc_read_page(buffer);
4404  *      if (!rpage)
4405  *              return error;
4406  *      ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
4407  *      if (ret >= 0)
4408  *              process_page(rpage, ret);
4409  *
4410  * When @full is set, the function will not return true unless
4411  * the writer is off the reader page.
4412  *
4413  * Note: it is up to the calling functions to handle sleeps and wakeups.
4414  *  The ring buffer can be used anywhere in the kernel and can not
4415  *  blindly call wake_up. The layer that uses the ring buffer must be
4416  *  responsible for that.
4417  *
4418  * Returns:
4419  *  >=0 if data has been transferred, returns the offset of consumed data.
4420  *  <0 if no data has been transferred.
4421  */
4422 int ring_buffer_read_page(struct ring_buffer *buffer,
4423                           void **data_page, size_t len, int cpu, int full)
4424 {
4425         struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
4426         struct ring_buffer_event *event;
4427         struct buffer_data_page *bpage;
4428         struct buffer_page *reader;
4429         unsigned long missed_events;
4430         unsigned long flags;
4431         unsigned int commit;
4432         unsigned int read;
4433         u64 save_timestamp;
4434         int ret = -1;
4435
4436         if (!cpumask_test_cpu(cpu, buffer->cpumask))
4437                 goto out;
4438
4439         /*
4440          * If len is not big enough to hold the page header, then
4441          * we can not copy anything.
4442          */
4443         if (len <= BUF_PAGE_HDR_SIZE)
4444                 goto out;
4445
4446         len -= BUF_PAGE_HDR_SIZE;
4447
4448         if (!data_page)
4449                 goto out;
4450
4451         bpage = *data_page;
4452         if (!bpage)
4453                 goto out;
4454
4455         raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
4456
4457         reader = rb_get_reader_page(cpu_buffer);
4458         if (!reader)
4459                 goto out_unlock;
4460
4461         event = rb_reader_event(cpu_buffer);
4462
4463         read = reader->read;
4464         commit = rb_page_commit(reader);
4465
4466         /* Check if any events were dropped */
4467         missed_events = cpu_buffer->lost_events;
4468
4469         /*
4470          * If this page has been partially read or
4471          * if len is not big enough to read the rest of the page or
4472          * a writer is still on the page, then
4473          * we must copy the data from the page to the buffer.
4474          * Otherwise, we can simply swap the page with the one passed in.
4475          */
4476         if (read || (len < (commit - read)) ||
4477             cpu_buffer->reader_page == cpu_buffer->commit_page) {
4478                 struct buffer_data_page *rpage = cpu_buffer->reader_page->page;
4479                 unsigned int rpos = read;
4480                 unsigned int pos = 0;
4481                 unsigned int size;
4482
4483                 if (full)
4484                         goto out_unlock;
4485
4486                 if (len > (commit - read))
4487                         len = (commit - read);
4488
4489                 /* Always keep the time extend and data together */
4490                 size = rb_event_ts_length(event);
4491
4492                 if (len < size)
4493                         goto out_unlock;
4494
4495                 /* save the current timestamp, since the user will need it */
4496                 save_timestamp = cpu_buffer->read_stamp;
4497
4498                 /* Need to copy one event at a time */
4499                 do {
4500                         /* We need the size of one event, because
4501                          * rb_advance_reader only advances by one event,
4502                          * whereas rb_event_ts_length may include the size of
4503                          * one or two events.
4504                          * We have already ensured there's enough space if this
4505                          * is a time extend. */
4506                         size = rb_event_length(event);
4507                         memcpy(bpage->data + pos, rpage->data + rpos, size);
4508
4509                         len -= size;
4510
4511                         rb_advance_reader(cpu_buffer);
4512                         rpos = reader->read;
4513                         pos += size;
4514
4515                         if (rpos >= commit)
4516                                 break;
4517
4518                         event = rb_reader_event(cpu_buffer);
4519                         /* Always keep the time extend and data together */
4520                         size = rb_event_ts_length(event);
4521                 } while (len >= size);
4522
4523                 /* update bpage */
4524                 local_set(&bpage->commit, pos);
4525                 bpage->time_stamp = save_timestamp;
4526
4527                 /* we copied everything to the beginning */
4528                 read = 0;
4529         } else {
4530                 /* update the entry counter */
4531                 cpu_buffer->read += rb_page_entries(reader);
4532                 cpu_buffer->read_bytes += BUF_PAGE_SIZE;
4533
4534                 /* swap the pages */
4535                 rb_init_page(bpage);
4536                 bpage = reader->page;
4537                 reader->page = *data_page;
4538                 local_set(&reader->write, 0);
4539                 local_set(&reader->entries, 0);
4540                 reader->read = 0;
4541                 *data_page = bpage;
4542
4543                 /*
4544                  * Use the real_end for the data size,
4545                  * This gives us a chance to store the lost events
4546                  * on the page.
4547                  */
4548                 if (reader->real_end)
4549                         local_set(&bpage->commit, reader->real_end);
4550         }
4551         ret = read;
4552
4553         cpu_buffer->lost_events = 0;
4554
4555         commit = local_read(&bpage->commit);
4556         /*
4557          * Set a flag in the commit field if we lost events
4558          */
4559         if (missed_events) {
4560                 /* If there is room at the end of the page to save the
4561                  * missed events, then record it there.
4562                  */
4563                 if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) {
4564                         memcpy(&bpage->data[commit], &missed_events,
4565                                sizeof(missed_events));
4566                         local_add(RB_MISSED_STORED, &bpage->commit);
4567                         commit += sizeof(missed_events);
4568                 }
4569                 local_add(RB_MISSED_EVENTS, &bpage->commit);
4570         }
4571
4572         /*
4573          * This page may be off to user land. Zero it out here.
4574          */
4575         if (commit < BUF_PAGE_SIZE)
4576                 memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit);
4577
4578  out_unlock:
4579         raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
4580
4581  out:
4582         return ret;
4583 }
4584 EXPORT_SYMBOL_GPL(ring_buffer_read_page);
4585
4586 #ifdef CONFIG_HOTPLUG_CPU
4587 static int rb_cpu_notify(struct notifier_block *self,
4588                          unsigned long action, void *hcpu)
4589 {
4590         struct ring_buffer *buffer =
4591                 container_of(self, struct ring_buffer, cpu_notify);
4592         long cpu = (long)hcpu;
4593         int cpu_i, nr_pages_same;
4594         unsigned int nr_pages;
4595
4596         switch (action) {
4597         case CPU_UP_PREPARE:
4598         case CPU_UP_PREPARE_FROZEN:
4599                 if (cpumask_test_cpu(cpu, buffer->cpumask))
4600                         return NOTIFY_OK;
4601
4602                 nr_pages = 0;
4603                 nr_pages_same = 1;
4604                 /* check if all cpu sizes are same */
4605                 for_each_buffer_cpu(buffer, cpu_i) {
4606                         /* fill in the size from first enabled cpu */
4607                         if (nr_pages == 0)
4608                                 nr_pages = buffer->buffers[cpu_i]->nr_pages;
4609                         if (nr_pages != buffer->buffers[cpu_i]->nr_pages) {
4610                                 nr_pages_same = 0;
4611                                 break;
4612                         }
4613                 }
4614                 /* allocate minimum pages, user can later expand it */
4615                 if (!nr_pages_same)
4616                         nr_pages = 2;
4617                 buffer->buffers[cpu] =
4618                         rb_allocate_cpu_buffer(buffer, nr_pages, cpu);
4619                 if (!buffer->buffers[cpu]) {
4620                         WARN(1, "failed to allocate ring buffer on CPU %ld\n",
4621                              cpu);
4622                         return NOTIFY_OK;
4623                 }
4624                 smp_wmb();
4625                 cpumask_set_cpu(cpu, buffer->cpumask);
4626                 break;
4627         case CPU_DOWN_PREPARE:
4628         case CPU_DOWN_PREPARE_FROZEN:
4629                 /*
4630                  * Do nothing.
4631                  *  If we were to free the buffer, then the user would
4632                  *  lose any trace that was in the buffer.
4633                  */
4634                 break;
4635         default:
4636                 break;
4637         }
4638         return NOTIFY_OK;
4639 }
4640 #endif
4641
4642 #ifdef CONFIG_RING_BUFFER_STARTUP_TEST
4643 /*
4644  * This is a basic integrity check of the ring buffer.
4645  * Late in the boot cycle this test will run when configured in.
4646  * It will kick off a thread per CPU that will go into a loop
4647  * writing to the per cpu ring buffer various sizes of data.
4648  * Some of the data will be large items, some small.
4649  *
4650  * Another thread is created that goes into a spin, sending out
4651  * IPIs to the other CPUs to also write into the ring buffer.
4652  * this is to test the nesting ability of the buffer.
4653  *
4654  * Basic stats are recorded and reported. If something in the
4655  * ring buffer should happen that's not expected, a big warning
4656  * is displayed and all ring buffers are disabled.
4657  */
4658 static struct task_struct *rb_threads[NR_CPUS] __initdata;
4659
4660 struct rb_test_data {
4661         struct ring_buffer      *buffer;
4662         unsigned long           events;
4663         unsigned long           bytes_written;
4664         unsigned long           bytes_alloc;
4665         unsigned long           bytes_dropped;
4666         unsigned long           events_nested;
4667         unsigned long           bytes_written_nested;
4668         unsigned long           bytes_alloc_nested;
4669         unsigned long           bytes_dropped_nested;
4670         int                     min_size_nested;
4671         int                     max_size_nested;
4672         int                     max_size;
4673         int                     min_size;
4674         int                     cpu;
4675         int                     cnt;
4676 };
4677
4678 static struct rb_test_data rb_data[NR_CPUS] __initdata;
4679
4680 /* 1 meg per cpu */
4681 #define RB_TEST_BUFFER_SIZE     1048576
4682
4683 static char rb_string[] __initdata =
4684         "abcdefghijklmnopqrstuvwxyz1234567890!@#$%^&*()?+\\"
4685         "?+|:';\",.<>/?abcdefghijklmnopqrstuvwxyz1234567890"
4686         "!@#$%^&*()?+\\?+|:';\",.<>/?abcdefghijklmnopqrstuv";
4687
4688 static bool rb_test_started __initdata;
4689
4690 struct rb_item {
4691         int size;
4692         char str[];
4693 };
4694
4695 static __init int rb_write_something(struct rb_test_data *data, bool nested)
4696 {
4697         struct ring_buffer_event *event;
4698         struct rb_item *item;
4699         bool started;
4700         int event_len;
4701         int size;
4702         int len;
4703         int cnt;
4704
4705         /* Have nested writes different that what is written */
4706         cnt = data->cnt + (nested ? 27 : 0);
4707
4708         /* Multiply cnt by ~e, to make some unique increment */
4709         size = (data->cnt * 68 / 25) % (sizeof(rb_string) - 1);
4710
4711         len = size + sizeof(struct rb_item);
4712
4713         started = rb_test_started;
4714         /* read rb_test_started before checking buffer enabled */
4715         smp_rmb();
4716
4717         event = ring_buffer_lock_reserve(data->buffer, len);
4718         if (!event) {
4719                 /* Ignore dropped events before test starts. */
4720                 if (started) {
4721                         if (nested)
4722                                 data->bytes_dropped += len;
4723                         else
4724                                 data->bytes_dropped_nested += len;
4725                 }
4726                 return len;
4727         }
4728
4729         event_len = ring_buffer_event_length(event);
4730
4731         if (RB_WARN_ON(data->buffer, event_len < len))
4732                 goto out;
4733
4734         item = ring_buffer_event_data(event);
4735         item->size = size;
4736         memcpy(item->str, rb_string, size);
4737
4738         if (nested) {
4739                 data->bytes_alloc_nested += event_len;
4740                 data->bytes_written_nested += len;
4741                 data->events_nested++;
4742                 if (!data->min_size_nested || len < data->min_size_nested)
4743                         data->min_size_nested = len;
4744                 if (len > data->max_size_nested)
4745                         data->max_size_nested = len;
4746         } else {
4747                 data->bytes_alloc += event_len;
4748                 data->bytes_written += len;
4749                 data->events++;
4750                 if (!data->min_size || len < data->min_size)
4751                         data->max_size = len;
4752                 if (len > data->max_size)
4753                         data->max_size = len;
4754         }
4755
4756  out:
4757         ring_buffer_unlock_commit(data->buffer, event);
4758
4759         return 0;
4760 }
4761
4762 static __init int rb_test(void *arg)
4763 {
4764         struct rb_test_data *data = arg;
4765
4766         while (!kthread_should_stop()) {
4767                 rb_write_something(data, false);
4768                 data->cnt++;
4769
4770                 set_current_state(TASK_INTERRUPTIBLE);
4771                 /* Now sleep between a min of 100-300us and a max of 1ms */
4772                 usleep_range(((data->cnt % 3) + 1) * 100, 1000);
4773         }
4774
4775         return 0;
4776 }
4777
4778 static __init void rb_ipi(void *ignore)
4779 {
4780         struct rb_test_data *data;
4781         int cpu = smp_processor_id();
4782
4783         data = &rb_data[cpu];
4784         rb_write_something(data, true);
4785 }
4786
4787 static __init int rb_hammer_test(void *arg)
4788 {
4789         while (!kthread_should_stop()) {
4790
4791                 /* Send an IPI to all cpus to write data! */
4792                 smp_call_function(rb_ipi, NULL, 1);
4793                 /* No sleep, but for non preempt, let others run */
4794                 schedule();
4795         }
4796
4797         return 0;
4798 }
4799
4800 static __init int test_ringbuffer(void)
4801 {
4802         struct task_struct *rb_hammer;
4803         struct ring_buffer *buffer;
4804         int cpu;
4805         int ret = 0;
4806
4807         pr_info("Running ring buffer tests...\n");
4808
4809         buffer = ring_buffer_alloc(RB_TEST_BUFFER_SIZE, RB_FL_OVERWRITE);
4810         if (WARN_ON(!buffer))
4811                 return 0;
4812
4813         /* Disable buffer so that threads can't write to it yet */
4814         ring_buffer_record_off(buffer);
4815
4816         for_each_online_cpu(cpu) {
4817                 rb_data[cpu].buffer = buffer;
4818                 rb_data[cpu].cpu = cpu;
4819                 rb_data[cpu].cnt = cpu;
4820                 rb_threads[cpu] = kthread_create(rb_test, &rb_data[cpu],
4821                                                  "rbtester/%d", cpu);
4822                 if (WARN_ON(!rb_threads[cpu])) {
4823                         pr_cont("FAILED\n");
4824                         ret = -1;
4825                         goto out_free;
4826                 }
4827
4828                 kthread_bind(rb_threads[cpu], cpu);
4829                 wake_up_process(rb_threads[cpu]);
4830         }
4831
4832         /* Now create the rb hammer! */
4833         rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer");
4834         if (WARN_ON(!rb_hammer)) {
4835                 pr_cont("FAILED\n");
4836                 ret = -1;
4837                 goto out_free;
4838         }
4839
4840         ring_buffer_record_on(buffer);
4841         /*
4842          * Show buffer is enabled before setting rb_test_started.
4843          * Yes there's a small race window where events could be
4844          * dropped and the thread wont catch it. But when a ring
4845          * buffer gets enabled, there will always be some kind of
4846          * delay before other CPUs see it. Thus, we don't care about
4847          * those dropped events. We care about events dropped after
4848          * the threads see that the buffer is active.
4849          */
4850         smp_wmb();
4851         rb_test_started = true;
4852
4853         set_current_state(TASK_INTERRUPTIBLE);
4854         /* Just run for 10 seconds */;
4855         schedule_timeout(10 * HZ);
4856
4857         kthread_stop(rb_hammer);
4858
4859  out_free:
4860         for_each_online_cpu(cpu) {
4861                 if (!rb_threads[cpu])
4862                         break;
4863                 kthread_stop(rb_threads[cpu]);
4864         }
4865         if (ret) {
4866                 ring_buffer_free(buffer);
4867                 return ret;
4868         }
4869
4870         /* Report! */
4871         pr_info("finished\n");
4872         for_each_online_cpu(cpu) {
4873                 struct ring_buffer_event *event;
4874                 struct rb_test_data *data = &rb_data[cpu];
4875                 struct rb_item *item;
4876                 unsigned long total_events;
4877                 unsigned long total_dropped;
4878                 unsigned long total_written;
4879                 unsigned long total_alloc;
4880                 unsigned long total_read = 0;
4881                 unsigned long total_size = 0;
4882                 unsigned long total_len = 0;
4883                 unsigned long total_lost = 0;
4884                 unsigned long lost;
4885                 int big_event_size;
4886                 int small_event_size;
4887
4888                 ret = -1;
4889
4890                 total_events = data->events + data->events_nested;
4891                 total_written = data->bytes_written + data->bytes_written_nested;
4892                 total_alloc = data->bytes_alloc + data->bytes_alloc_nested;
4893                 total_dropped = data->bytes_dropped + data->bytes_dropped_nested;
4894
4895                 big_event_size = data->max_size + data->max_size_nested;
4896                 small_event_size = data->min_size + data->min_size_nested;
4897
4898                 pr_info("CPU %d:\n", cpu);
4899                 pr_info("              events:    %ld\n", total_events);
4900                 pr_info("       dropped bytes:    %ld\n", total_dropped);
4901                 pr_info("       alloced bytes:    %ld\n", total_alloc);
4902                 pr_info("       written bytes:    %ld\n", total_written);
4903                 pr_info("       biggest event:    %d\n", big_event_size);
4904                 pr_info("      smallest event:    %d\n", small_event_size);
4905
4906                 if (RB_WARN_ON(buffer, total_dropped))
4907                         break;
4908
4909                 ret = 0;
4910
4911                 while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) {
4912                         total_lost += lost;
4913                         item = ring_buffer_event_data(event);
4914                         total_len += ring_buffer_event_length(event);
4915                         total_size += item->size + sizeof(struct rb_item);
4916                         if (memcmp(&item->str[0], rb_string, item->size) != 0) {
4917                                 pr_info("FAILED!\n");
4918                                 pr_info("buffer had: %.*s\n", item->size, item->str);
4919                                 pr_info("expected:   %.*s\n", item->size, rb_string);
4920                                 RB_WARN_ON(buffer, 1);
4921                                 ret = -1;
4922                                 break;
4923                         }
4924                         total_read++;
4925                 }
4926                 if (ret)
4927                         break;
4928
4929                 ret = -1;
4930
4931                 pr_info("         read events:   %ld\n", total_read);
4932                 pr_info("         lost events:   %ld\n", total_lost);
4933                 pr_info("        total events:   %ld\n", total_lost + total_read);
4934                 pr_info("  recorded len bytes:   %ld\n", total_len);
4935                 pr_info(" recorded size bytes:   %ld\n", total_size);
4936                 if (total_lost)
4937                         pr_info(" With dropped events, record len and size may not match\n"
4938                                 " alloced and written from above\n");
4939                 if (!total_lost) {
4940                         if (RB_WARN_ON(buffer, total_len != total_alloc ||
4941                                        total_size != total_written))
4942                                 break;
4943                 }
4944                 if (RB_WARN_ON(buffer, total_lost + total_read != total_events))
4945                         break;
4946
4947                 ret = 0;
4948         }
4949         if (!ret)
4950                 pr_info("Ring buffer PASSED!\n");
4951
4952         ring_buffer_free(buffer);
4953         return 0;
4954 }
4955
4956 late_initcall(test_ringbuffer);
4957 #endif /* CONFIG_RING_BUFFER_STARTUP_TEST */