Merge branch 'tracing/urgent' into tracing/core
[cascardo/linux.git] / kernel / trace / ring_buffer.c
1 /*
2  * Generic ring buffer
3  *
4  * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5  */
6 #include <linux/ring_buffer.h>
7 #include <linux/spinlock.h>
8 #include <linux/debugfs.h>
9 #include <linux/uaccess.h>
10 #include <linux/module.h>
11 #include <linux/percpu.h>
12 #include <linux/mutex.h>
13 #include <linux/sched.h>        /* used for sched_clock() (for now) */
14 #include <linux/init.h>
15 #include <linux/hash.h>
16 #include <linux/list.h>
17 #include <linux/fs.h>
18
19 #include "trace.h"
20
21 /*
22  * A fast way to enable or disable all ring buffers is to
23  * call tracing_on or tracing_off. Turning off the ring buffers
24  * prevents all ring buffers from being recorded to.
25  * Turning this switch on, makes it OK to write to the
26  * ring buffer, if the ring buffer is enabled itself.
27  *
28  * There's three layers that must be on in order to write
29  * to the ring buffer.
30  *
31  * 1) This global flag must be set.
32  * 2) The ring buffer must be enabled for recording.
33  * 3) The per cpu buffer must be enabled for recording.
34  *
35  * In case of an anomaly, this global flag has a bit set that
36  * will permantly disable all ring buffers.
37  */
38
39 /*
40  * Global flag to disable all recording to ring buffers
41  *  This has two bits: ON, DISABLED
42  *
43  *  ON   DISABLED
44  * ---- ----------
45  *   0      0        : ring buffers are off
46  *   1      0        : ring buffers are on
47  *   X      1        : ring buffers are permanently disabled
48  */
49
50 enum {
51         RB_BUFFERS_ON_BIT       = 0,
52         RB_BUFFERS_DISABLED_BIT = 1,
53 };
54
55 enum {
56         RB_BUFFERS_ON           = 1 << RB_BUFFERS_ON_BIT,
57         RB_BUFFERS_DISABLED     = 1 << RB_BUFFERS_DISABLED_BIT,
58 };
59
60 static long ring_buffer_flags __read_mostly = RB_BUFFERS_ON;
61
62 /**
63  * tracing_on - enable all tracing buffers
64  *
65  * This function enables all tracing buffers that may have been
66  * disabled with tracing_off.
67  */
68 void tracing_on(void)
69 {
70         set_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
71 }
72
73 /**
74  * tracing_off - turn off all tracing buffers
75  *
76  * This function stops all tracing buffers from recording data.
77  * It does not disable any overhead the tracers themselves may
78  * be causing. This function simply causes all recording to
79  * the ring buffers to fail.
80  */
81 void tracing_off(void)
82 {
83         clear_bit(RB_BUFFERS_ON_BIT, &ring_buffer_flags);
84 }
85
86 /**
87  * tracing_off_permanent - permanently disable ring buffers
88  *
89  * This function, once called, will disable all ring buffers
90  * permanenty.
91  */
92 void tracing_off_permanent(void)
93 {
94         set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags);
95 }
96
97 #include "trace.h"
98
99 /* Up this if you want to test the TIME_EXTENTS and normalization */
100 #define DEBUG_SHIFT 0
101
102 /* FIXME!!! */
103 u64 ring_buffer_time_stamp(int cpu)
104 {
105         u64 time;
106
107         preempt_disable_notrace();
108         /* shift to debug/test normalization and TIME_EXTENTS */
109         time = sched_clock() << DEBUG_SHIFT;
110         preempt_enable_notrace();
111
112         return time;
113 }
114
115 void ring_buffer_normalize_time_stamp(int cpu, u64 *ts)
116 {
117         /* Just stupid testing the normalize function and deltas */
118         *ts >>= DEBUG_SHIFT;
119 }
120
121 #define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event))
122 #define RB_ALIGNMENT_SHIFT      2
123 #define RB_ALIGNMENT            (1 << RB_ALIGNMENT_SHIFT)
124 #define RB_MAX_SMALL_DATA       28
125
126 enum {
127         RB_LEN_TIME_EXTEND = 8,
128         RB_LEN_TIME_STAMP = 16,
129 };
130
131 /* inline for ring buffer fast paths */
132 static inline unsigned
133 rb_event_length(struct ring_buffer_event *event)
134 {
135         unsigned length;
136
137         switch (event->type) {
138         case RINGBUF_TYPE_PADDING:
139                 /* undefined */
140                 return -1;
141
142         case RINGBUF_TYPE_TIME_EXTEND:
143                 return RB_LEN_TIME_EXTEND;
144
145         case RINGBUF_TYPE_TIME_STAMP:
146                 return RB_LEN_TIME_STAMP;
147
148         case RINGBUF_TYPE_DATA:
149                 if (event->len)
150                         length = event->len << RB_ALIGNMENT_SHIFT;
151                 else
152                         length = event->array[0];
153                 return length + RB_EVNT_HDR_SIZE;
154         default:
155                 BUG();
156         }
157         /* not hit */
158         return 0;
159 }
160
161 /**
162  * ring_buffer_event_length - return the length of the event
163  * @event: the event to get the length of
164  */
165 unsigned ring_buffer_event_length(struct ring_buffer_event *event)
166 {
167         return rb_event_length(event);
168 }
169
170 /* inline for ring buffer fast paths */
171 static inline void *
172 rb_event_data(struct ring_buffer_event *event)
173 {
174         BUG_ON(event->type != RINGBUF_TYPE_DATA);
175         /* If length is in len field, then array[0] has the data */
176         if (event->len)
177                 return (void *)&event->array[0];
178         /* Otherwise length is in array[0] and array[1] has the data */
179         return (void *)&event->array[1];
180 }
181
182 /**
183  * ring_buffer_event_data - return the data of the event
184  * @event: the event to get the data from
185  */
186 void *ring_buffer_event_data(struct ring_buffer_event *event)
187 {
188         return rb_event_data(event);
189 }
190
191 #define for_each_buffer_cpu(buffer, cpu)                \
192         for_each_cpu_mask(cpu, buffer->cpumask)
193
194 #define TS_SHIFT        27
195 #define TS_MASK         ((1ULL << TS_SHIFT) - 1)
196 #define TS_DELTA_TEST   (~TS_MASK)
197
198 /*
199  * This hack stolen from mm/slob.c.
200  * We can store per page timing information in the page frame of the page.
201  * Thanks to Peter Zijlstra for suggesting this idea.
202  */
203 struct buffer_page {
204         u64              time_stamp;    /* page time stamp */
205         local_t          write;         /* index for next write */
206         local_t          commit;        /* write commited index */
207         unsigned         read;          /* index for next read */
208         struct list_head list;          /* list of free pages */
209         void *page;                     /* Actual data page */
210 };
211
212 /*
213  * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
214  * this issue out.
215  */
216 static inline void free_buffer_page(struct buffer_page *bpage)
217 {
218         if (bpage->page)
219                 free_page((unsigned long)bpage->page);
220         kfree(bpage);
221 }
222
223 /*
224  * We need to fit the time_stamp delta into 27 bits.
225  */
226 static inline int test_time_stamp(u64 delta)
227 {
228         if (delta & TS_DELTA_TEST)
229                 return 1;
230         return 0;
231 }
232
233 #define BUF_PAGE_SIZE PAGE_SIZE
234
235 /*
236  * head_page == tail_page && head == tail then buffer is empty.
237  */
238 struct ring_buffer_per_cpu {
239         int                             cpu;
240         struct ring_buffer              *buffer;
241         spinlock_t                      reader_lock; /* serialize readers */
242         raw_spinlock_t                  lock;
243         struct lock_class_key           lock_key;
244         struct list_head                pages;
245         struct buffer_page              *head_page;     /* read from head */
246         struct buffer_page              *tail_page;     /* write to tail */
247         struct buffer_page              *commit_page;   /* commited pages */
248         struct buffer_page              *reader_page;
249         unsigned long                   overrun;
250         unsigned long                   entries;
251         u64                             write_stamp;
252         u64                             read_stamp;
253         atomic_t                        record_disabled;
254 };
255
256 struct ring_buffer {
257         unsigned long                   size;
258         unsigned                        pages;
259         unsigned                        flags;
260         int                             cpus;
261         cpumask_t                       cpumask;
262         atomic_t                        record_disabled;
263
264         struct mutex                    mutex;
265
266         struct ring_buffer_per_cpu      **buffers;
267 };
268
269 struct ring_buffer_iter {
270         struct ring_buffer_per_cpu      *cpu_buffer;
271         unsigned long                   head;
272         struct buffer_page              *head_page;
273         u64                             read_stamp;
274 };
275
276 /* buffer may be either ring_buffer or ring_buffer_per_cpu */
277 #define RB_WARN_ON(buffer, cond)                                \
278         ({                                                      \
279                 int _____ret = unlikely(cond);                  \
280                 if (_____ret) {                                 \
281                         atomic_inc(&buffer->record_disabled);   \
282                         WARN_ON(1);                             \
283                 }                                               \
284                 _____ret;                                       \
285         })
286
287 /**
288  * check_pages - integrity check of buffer pages
289  * @cpu_buffer: CPU buffer with pages to test
290  *
291  * As a safty measure we check to make sure the data pages have not
292  * been corrupted.
293  */
294 static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
295 {
296         struct list_head *head = &cpu_buffer->pages;
297         struct buffer_page *page, *tmp;
298
299         if (RB_WARN_ON(cpu_buffer, head->next->prev != head))
300                 return -1;
301         if (RB_WARN_ON(cpu_buffer, head->prev->next != head))
302                 return -1;
303
304         list_for_each_entry_safe(page, tmp, head, list) {
305                 if (RB_WARN_ON(cpu_buffer,
306                                page->list.next->prev != &page->list))
307                         return -1;
308                 if (RB_WARN_ON(cpu_buffer,
309                                page->list.prev->next != &page->list))
310                         return -1;
311         }
312
313         return 0;
314 }
315
316 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
317                              unsigned nr_pages)
318 {
319         struct list_head *head = &cpu_buffer->pages;
320         struct buffer_page *page, *tmp;
321         unsigned long addr;
322         LIST_HEAD(pages);
323         unsigned i;
324
325         for (i = 0; i < nr_pages; i++) {
326                 page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()),
327                                     GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
328                 if (!page)
329                         goto free_pages;
330                 list_add(&page->list, &pages);
331
332                 addr = __get_free_page(GFP_KERNEL);
333                 if (!addr)
334                         goto free_pages;
335                 page->page = (void *)addr;
336         }
337
338         list_splice(&pages, head);
339
340         rb_check_pages(cpu_buffer);
341
342         return 0;
343
344  free_pages:
345         list_for_each_entry_safe(page, tmp, &pages, list) {
346                 list_del_init(&page->list);
347                 free_buffer_page(page);
348         }
349         return -ENOMEM;
350 }
351
352 static struct ring_buffer_per_cpu *
353 rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
354 {
355         struct ring_buffer_per_cpu *cpu_buffer;
356         struct buffer_page *page;
357         unsigned long addr;
358         int ret;
359
360         cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
361                                   GFP_KERNEL, cpu_to_node(cpu));
362         if (!cpu_buffer)
363                 return NULL;
364
365         cpu_buffer->cpu = cpu;
366         cpu_buffer->buffer = buffer;
367         spin_lock_init(&cpu_buffer->reader_lock);
368         cpu_buffer->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
369         INIT_LIST_HEAD(&cpu_buffer->pages);
370
371         page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()),
372                             GFP_KERNEL, cpu_to_node(cpu));
373         if (!page)
374                 goto fail_free_buffer;
375
376         cpu_buffer->reader_page = page;
377         addr = __get_free_page(GFP_KERNEL);
378         if (!addr)
379                 goto fail_free_reader;
380         page->page = (void *)addr;
381
382         INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
383
384         ret = rb_allocate_pages(cpu_buffer, buffer->pages);
385         if (ret < 0)
386                 goto fail_free_reader;
387
388         cpu_buffer->head_page
389                 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
390         cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
391
392         return cpu_buffer;
393
394  fail_free_reader:
395         free_buffer_page(cpu_buffer->reader_page);
396
397  fail_free_buffer:
398         kfree(cpu_buffer);
399         return NULL;
400 }
401
402 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
403 {
404         struct list_head *head = &cpu_buffer->pages;
405         struct buffer_page *page, *tmp;
406
407         list_del_init(&cpu_buffer->reader_page->list);
408         free_buffer_page(cpu_buffer->reader_page);
409
410         list_for_each_entry_safe(page, tmp, head, list) {
411                 list_del_init(&page->list);
412                 free_buffer_page(page);
413         }
414         kfree(cpu_buffer);
415 }
416
417 /*
418  * Causes compile errors if the struct buffer_page gets bigger
419  * than the struct page.
420  */
421 extern int ring_buffer_page_too_big(void);
422
423 /**
424  * ring_buffer_alloc - allocate a new ring_buffer
425  * @size: the size in bytes that is needed.
426  * @flags: attributes to set for the ring buffer.
427  *
428  * Currently the only flag that is available is the RB_FL_OVERWRITE
429  * flag. This flag means that the buffer will overwrite old data
430  * when the buffer wraps. If this flag is not set, the buffer will
431  * drop data when the tail hits the head.
432  */
433 struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
434 {
435         struct ring_buffer *buffer;
436         int bsize;
437         int cpu;
438
439         /* Paranoid! Optimizes out when all is well */
440         if (sizeof(struct buffer_page) > sizeof(struct page))
441                 ring_buffer_page_too_big();
442
443
444         /* keep it in its own cache line */
445         buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
446                          GFP_KERNEL);
447         if (!buffer)
448                 return NULL;
449
450         buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
451         buffer->flags = flags;
452
453         /* need at least two pages */
454         if (buffer->pages == 1)
455                 buffer->pages++;
456
457         buffer->cpumask = cpu_possible_map;
458         buffer->cpus = nr_cpu_ids;
459
460         bsize = sizeof(void *) * nr_cpu_ids;
461         buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
462                                   GFP_KERNEL);
463         if (!buffer->buffers)
464                 goto fail_free_buffer;
465
466         for_each_buffer_cpu(buffer, cpu) {
467                 buffer->buffers[cpu] =
468                         rb_allocate_cpu_buffer(buffer, cpu);
469                 if (!buffer->buffers[cpu])
470                         goto fail_free_buffers;
471         }
472
473         mutex_init(&buffer->mutex);
474
475         return buffer;
476
477  fail_free_buffers:
478         for_each_buffer_cpu(buffer, cpu) {
479                 if (buffer->buffers[cpu])
480                         rb_free_cpu_buffer(buffer->buffers[cpu]);
481         }
482         kfree(buffer->buffers);
483
484  fail_free_buffer:
485         kfree(buffer);
486         return NULL;
487 }
488
489 /**
490  * ring_buffer_free - free a ring buffer.
491  * @buffer: the buffer to free.
492  */
493 void
494 ring_buffer_free(struct ring_buffer *buffer)
495 {
496         int cpu;
497
498         for_each_buffer_cpu(buffer, cpu)
499                 rb_free_cpu_buffer(buffer->buffers[cpu]);
500
501         kfree(buffer);
502 }
503
504 static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
505
506 static void
507 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
508 {
509         struct buffer_page *page;
510         struct list_head *p;
511         unsigned i;
512
513         atomic_inc(&cpu_buffer->record_disabled);
514         synchronize_sched();
515
516         for (i = 0; i < nr_pages; i++) {
517                 if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
518                         return;
519                 p = cpu_buffer->pages.next;
520                 page = list_entry(p, struct buffer_page, list);
521                 list_del_init(&page->list);
522                 free_buffer_page(page);
523         }
524         if (RB_WARN_ON(cpu_buffer, list_empty(&cpu_buffer->pages)))
525                 return;
526
527         rb_reset_cpu(cpu_buffer);
528
529         rb_check_pages(cpu_buffer);
530
531         atomic_dec(&cpu_buffer->record_disabled);
532
533 }
534
535 static void
536 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
537                 struct list_head *pages, unsigned nr_pages)
538 {
539         struct buffer_page *page;
540         struct list_head *p;
541         unsigned i;
542
543         atomic_inc(&cpu_buffer->record_disabled);
544         synchronize_sched();
545
546         for (i = 0; i < nr_pages; i++) {
547                 if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
548                         return;
549                 p = pages->next;
550                 page = list_entry(p, struct buffer_page, list);
551                 list_del_init(&page->list);
552                 list_add_tail(&page->list, &cpu_buffer->pages);
553         }
554         rb_reset_cpu(cpu_buffer);
555
556         rb_check_pages(cpu_buffer);
557
558         atomic_dec(&cpu_buffer->record_disabled);
559 }
560
561 /**
562  * ring_buffer_resize - resize the ring buffer
563  * @buffer: the buffer to resize.
564  * @size: the new size.
565  *
566  * The tracer is responsible for making sure that the buffer is
567  * not being used while changing the size.
568  * Note: We may be able to change the above requirement by using
569  *  RCU synchronizations.
570  *
571  * Minimum size is 2 * BUF_PAGE_SIZE.
572  *
573  * Returns -1 on failure.
574  */
575 int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
576 {
577         struct ring_buffer_per_cpu *cpu_buffer;
578         unsigned nr_pages, rm_pages, new_pages;
579         struct buffer_page *page, *tmp;
580         unsigned long buffer_size;
581         unsigned long addr;
582         LIST_HEAD(pages);
583         int i, cpu;
584
585         /*
586          * Always succeed at resizing a non-existent buffer:
587          */
588         if (!buffer)
589                 return size;
590
591         size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
592         size *= BUF_PAGE_SIZE;
593         buffer_size = buffer->pages * BUF_PAGE_SIZE;
594
595         /* we need a minimum of two pages */
596         if (size < BUF_PAGE_SIZE * 2)
597                 size = BUF_PAGE_SIZE * 2;
598
599         if (size == buffer_size)
600                 return size;
601
602         mutex_lock(&buffer->mutex);
603
604         nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
605
606         if (size < buffer_size) {
607
608                 /* easy case, just free pages */
609                 if (RB_WARN_ON(buffer, nr_pages >= buffer->pages)) {
610                         mutex_unlock(&buffer->mutex);
611                         return -1;
612                 }
613
614                 rm_pages = buffer->pages - nr_pages;
615
616                 for_each_buffer_cpu(buffer, cpu) {
617                         cpu_buffer = buffer->buffers[cpu];
618                         rb_remove_pages(cpu_buffer, rm_pages);
619                 }
620                 goto out;
621         }
622
623         /*
624          * This is a bit more difficult. We only want to add pages
625          * when we can allocate enough for all CPUs. We do this
626          * by allocating all the pages and storing them on a local
627          * link list. If we succeed in our allocation, then we
628          * add these pages to the cpu_buffers. Otherwise we just free
629          * them all and return -ENOMEM;
630          */
631         if (RB_WARN_ON(buffer, nr_pages <= buffer->pages)) {
632                 mutex_unlock(&buffer->mutex);
633                 return -1;
634         }
635
636         new_pages = nr_pages - buffer->pages;
637
638         for_each_buffer_cpu(buffer, cpu) {
639                 for (i = 0; i < new_pages; i++) {
640                         page = kzalloc_node(ALIGN(sizeof(*page),
641                                                   cache_line_size()),
642                                             GFP_KERNEL, cpu_to_node(cpu));
643                         if (!page)
644                                 goto free_pages;
645                         list_add(&page->list, &pages);
646                         addr = __get_free_page(GFP_KERNEL);
647                         if (!addr)
648                                 goto free_pages;
649                         page->page = (void *)addr;
650                 }
651         }
652
653         for_each_buffer_cpu(buffer, cpu) {
654                 cpu_buffer = buffer->buffers[cpu];
655                 rb_insert_pages(cpu_buffer, &pages, new_pages);
656         }
657
658         if (RB_WARN_ON(buffer, !list_empty(&pages))) {
659                 mutex_unlock(&buffer->mutex);
660                 return -1;
661         }
662
663  out:
664         buffer->pages = nr_pages;
665         mutex_unlock(&buffer->mutex);
666
667         return size;
668
669  free_pages:
670         list_for_each_entry_safe(page, tmp, &pages, list) {
671                 list_del_init(&page->list);
672                 free_buffer_page(page);
673         }
674         mutex_unlock(&buffer->mutex);
675         return -ENOMEM;
676 }
677
678 static inline int rb_null_event(struct ring_buffer_event *event)
679 {
680         return event->type == RINGBUF_TYPE_PADDING;
681 }
682
683 static inline void *__rb_page_index(struct buffer_page *page, unsigned index)
684 {
685         return page->page + index;
686 }
687
688 static inline struct ring_buffer_event *
689 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
690 {
691         return __rb_page_index(cpu_buffer->reader_page,
692                                cpu_buffer->reader_page->read);
693 }
694
695 static inline struct ring_buffer_event *
696 rb_head_event(struct ring_buffer_per_cpu *cpu_buffer)
697 {
698         return __rb_page_index(cpu_buffer->head_page,
699                                cpu_buffer->head_page->read);
700 }
701
702 static inline struct ring_buffer_event *
703 rb_iter_head_event(struct ring_buffer_iter *iter)
704 {
705         return __rb_page_index(iter->head_page, iter->head);
706 }
707
708 static inline unsigned rb_page_write(struct buffer_page *bpage)
709 {
710         return local_read(&bpage->write);
711 }
712
713 static inline unsigned rb_page_commit(struct buffer_page *bpage)
714 {
715         return local_read(&bpage->commit);
716 }
717
718 /* Size is determined by what has been commited */
719 static inline unsigned rb_page_size(struct buffer_page *bpage)
720 {
721         return rb_page_commit(bpage);
722 }
723
724 static inline unsigned
725 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
726 {
727         return rb_page_commit(cpu_buffer->commit_page);
728 }
729
730 static inline unsigned rb_head_size(struct ring_buffer_per_cpu *cpu_buffer)
731 {
732         return rb_page_commit(cpu_buffer->head_page);
733 }
734
735 /*
736  * When the tail hits the head and the buffer is in overwrite mode,
737  * the head jumps to the next page and all content on the previous
738  * page is discarded. But before doing so, we update the overrun
739  * variable of the buffer.
740  */
741 static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer)
742 {
743         struct ring_buffer_event *event;
744         unsigned long head;
745
746         for (head = 0; head < rb_head_size(cpu_buffer);
747              head += rb_event_length(event)) {
748
749                 event = __rb_page_index(cpu_buffer->head_page, head);
750                 if (RB_WARN_ON(cpu_buffer, rb_null_event(event)))
751                         return;
752                 /* Only count data entries */
753                 if (event->type != RINGBUF_TYPE_DATA)
754                         continue;
755                 cpu_buffer->overrun++;
756                 cpu_buffer->entries--;
757         }
758 }
759
760 static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
761                                struct buffer_page **page)
762 {
763         struct list_head *p = (*page)->list.next;
764
765         if (p == &cpu_buffer->pages)
766                 p = p->next;
767
768         *page = list_entry(p, struct buffer_page, list);
769 }
770
771 static inline unsigned
772 rb_event_index(struct ring_buffer_event *event)
773 {
774         unsigned long addr = (unsigned long)event;
775
776         return (addr & ~PAGE_MASK) - (PAGE_SIZE - BUF_PAGE_SIZE);
777 }
778
779 static inline int
780 rb_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
781              struct ring_buffer_event *event)
782 {
783         unsigned long addr = (unsigned long)event;
784         unsigned long index;
785
786         index = rb_event_index(event);
787         addr &= PAGE_MASK;
788
789         return cpu_buffer->commit_page->page == (void *)addr &&
790                 rb_commit_index(cpu_buffer) == index;
791 }
792
793 static inline void
794 rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer,
795                     struct ring_buffer_event *event)
796 {
797         unsigned long addr = (unsigned long)event;
798         unsigned long index;
799
800         index = rb_event_index(event);
801         addr &= PAGE_MASK;
802
803         while (cpu_buffer->commit_page->page != (void *)addr) {
804                 if (RB_WARN_ON(cpu_buffer,
805                           cpu_buffer->commit_page == cpu_buffer->tail_page))
806                         return;
807                 cpu_buffer->commit_page->commit =
808                         cpu_buffer->commit_page->write;
809                 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
810                 cpu_buffer->write_stamp = cpu_buffer->commit_page->time_stamp;
811         }
812
813         /* Now set the commit to the event's index */
814         local_set(&cpu_buffer->commit_page->commit, index);
815 }
816
817 static inline void
818 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
819 {
820         /*
821          * We only race with interrupts and NMIs on this CPU.
822          * If we own the commit event, then we can commit
823          * all others that interrupted us, since the interruptions
824          * are in stack format (they finish before they come
825          * back to us). This allows us to do a simple loop to
826          * assign the commit to the tail.
827          */
828         while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
829                 cpu_buffer->commit_page->commit =
830                         cpu_buffer->commit_page->write;
831                 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
832                 cpu_buffer->write_stamp = cpu_buffer->commit_page->time_stamp;
833                 /* add barrier to keep gcc from optimizing too much */
834                 barrier();
835         }
836         while (rb_commit_index(cpu_buffer) !=
837                rb_page_write(cpu_buffer->commit_page)) {
838                 cpu_buffer->commit_page->commit =
839                         cpu_buffer->commit_page->write;
840                 barrier();
841         }
842 }
843
844 static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
845 {
846         cpu_buffer->read_stamp = cpu_buffer->reader_page->time_stamp;
847         cpu_buffer->reader_page->read = 0;
848 }
849
850 static inline void rb_inc_iter(struct ring_buffer_iter *iter)
851 {
852         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
853
854         /*
855          * The iterator could be on the reader page (it starts there).
856          * But the head could have moved, since the reader was
857          * found. Check for this case and assign the iterator
858          * to the head page instead of next.
859          */
860         if (iter->head_page == cpu_buffer->reader_page)
861                 iter->head_page = cpu_buffer->head_page;
862         else
863                 rb_inc_page(cpu_buffer, &iter->head_page);
864
865         iter->read_stamp = iter->head_page->time_stamp;
866         iter->head = 0;
867 }
868
869 /**
870  * ring_buffer_update_event - update event type and data
871  * @event: the even to update
872  * @type: the type of event
873  * @length: the size of the event field in the ring buffer
874  *
875  * Update the type and data fields of the event. The length
876  * is the actual size that is written to the ring buffer,
877  * and with this, we can determine what to place into the
878  * data field.
879  */
880 static inline void
881 rb_update_event(struct ring_buffer_event *event,
882                          unsigned type, unsigned length)
883 {
884         event->type = type;
885
886         switch (type) {
887
888         case RINGBUF_TYPE_PADDING:
889                 break;
890
891         case RINGBUF_TYPE_TIME_EXTEND:
892                 event->len =
893                         (RB_LEN_TIME_EXTEND + (RB_ALIGNMENT-1))
894                         >> RB_ALIGNMENT_SHIFT;
895                 break;
896
897         case RINGBUF_TYPE_TIME_STAMP:
898                 event->len =
899                         (RB_LEN_TIME_STAMP + (RB_ALIGNMENT-1))
900                         >> RB_ALIGNMENT_SHIFT;
901                 break;
902
903         case RINGBUF_TYPE_DATA:
904                 length -= RB_EVNT_HDR_SIZE;
905                 if (length > RB_MAX_SMALL_DATA) {
906                         event->len = 0;
907                         event->array[0] = length;
908                 } else
909                         event->len =
910                                 (length + (RB_ALIGNMENT-1))
911                                 >> RB_ALIGNMENT_SHIFT;
912                 break;
913         default:
914                 BUG();
915         }
916 }
917
918 static inline unsigned rb_calculate_event_length(unsigned length)
919 {
920         struct ring_buffer_event event; /* Used only for sizeof array */
921
922         /* zero length can cause confusions */
923         if (!length)
924                 length = 1;
925
926         if (length > RB_MAX_SMALL_DATA)
927                 length += sizeof(event.array[0]);
928
929         length += RB_EVNT_HDR_SIZE;
930         length = ALIGN(length, RB_ALIGNMENT);
931
932         return length;
933 }
934
935 static struct ring_buffer_event *
936 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
937                   unsigned type, unsigned long length, u64 *ts)
938 {
939         struct buffer_page *tail_page, *head_page, *reader_page;
940         unsigned long tail, write;
941         struct ring_buffer *buffer = cpu_buffer->buffer;
942         struct ring_buffer_event *event;
943         unsigned long flags;
944
945         tail_page = cpu_buffer->tail_page;
946         write = local_add_return(length, &tail_page->write);
947         tail = write - length;
948
949         /* See if we shot pass the end of this buffer page */
950         if (write > BUF_PAGE_SIZE) {
951                 struct buffer_page *next_page = tail_page;
952
953                 local_irq_save(flags);
954                 __raw_spin_lock(&cpu_buffer->lock);
955
956                 rb_inc_page(cpu_buffer, &next_page);
957
958                 head_page = cpu_buffer->head_page;
959                 reader_page = cpu_buffer->reader_page;
960
961                 /* we grabbed the lock before incrementing */
962                 if (RB_WARN_ON(cpu_buffer, next_page == reader_page))
963                         goto out_unlock;
964
965                 /*
966                  * If for some reason, we had an interrupt storm that made
967                  * it all the way around the buffer, bail, and warn
968                  * about it.
969                  */
970                 if (unlikely(next_page == cpu_buffer->commit_page)) {
971                         WARN_ON_ONCE(1);
972                         goto out_unlock;
973                 }
974
975                 if (next_page == head_page) {
976                         if (!(buffer->flags & RB_FL_OVERWRITE)) {
977                                 /* reset write */
978                                 if (tail <= BUF_PAGE_SIZE)
979                                         local_set(&tail_page->write, tail);
980                                 goto out_unlock;
981                         }
982
983                         /* tail_page has not moved yet? */
984                         if (tail_page == cpu_buffer->tail_page) {
985                                 /* count overflows */
986                                 rb_update_overflow(cpu_buffer);
987
988                                 rb_inc_page(cpu_buffer, &head_page);
989                                 cpu_buffer->head_page = head_page;
990                                 cpu_buffer->head_page->read = 0;
991                         }
992                 }
993
994                 /*
995                  * If the tail page is still the same as what we think
996                  * it is, then it is up to us to update the tail
997                  * pointer.
998                  */
999                 if (tail_page == cpu_buffer->tail_page) {
1000                         local_set(&next_page->write, 0);
1001                         local_set(&next_page->commit, 0);
1002                         cpu_buffer->tail_page = next_page;
1003
1004                         /* reread the time stamp */
1005                         *ts = ring_buffer_time_stamp(cpu_buffer->cpu);
1006                         cpu_buffer->tail_page->time_stamp = *ts;
1007                 }
1008
1009                 /*
1010                  * The actual tail page has moved forward.
1011                  */
1012                 if (tail < BUF_PAGE_SIZE) {
1013                         /* Mark the rest of the page with padding */
1014                         event = __rb_page_index(tail_page, tail);
1015                         event->type = RINGBUF_TYPE_PADDING;
1016                 }
1017
1018                 if (tail <= BUF_PAGE_SIZE)
1019                         /* Set the write back to the previous setting */
1020                         local_set(&tail_page->write, tail);
1021
1022                 /*
1023                  * If this was a commit entry that failed,
1024                  * increment that too
1025                  */
1026                 if (tail_page == cpu_buffer->commit_page &&
1027                     tail == rb_commit_index(cpu_buffer)) {
1028                         rb_set_commit_to_write(cpu_buffer);
1029                 }
1030
1031                 __raw_spin_unlock(&cpu_buffer->lock);
1032                 local_irq_restore(flags);
1033
1034                 /* fail and let the caller try again */
1035                 return ERR_PTR(-EAGAIN);
1036         }
1037
1038         /* We reserved something on the buffer */
1039
1040         if (RB_WARN_ON(cpu_buffer, write > BUF_PAGE_SIZE))
1041                 return NULL;
1042
1043         event = __rb_page_index(tail_page, tail);
1044         rb_update_event(event, type, length);
1045
1046         /*
1047          * If this is a commit and the tail is zero, then update
1048          * this page's time stamp.
1049          */
1050         if (!tail && rb_is_commit(cpu_buffer, event))
1051                 cpu_buffer->commit_page->time_stamp = *ts;
1052
1053         return event;
1054
1055  out_unlock:
1056         __raw_spin_unlock(&cpu_buffer->lock);
1057         local_irq_restore(flags);
1058         return NULL;
1059 }
1060
1061 static int
1062 rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1063                   u64 *ts, u64 *delta)
1064 {
1065         struct ring_buffer_event *event;
1066         static int once;
1067         int ret;
1068
1069         if (unlikely(*delta > (1ULL << 59) && !once++)) {
1070                 printk(KERN_WARNING "Delta way too big! %llu"
1071                        " ts=%llu write stamp = %llu\n",
1072                        (unsigned long long)*delta,
1073                        (unsigned long long)*ts,
1074                        (unsigned long long)cpu_buffer->write_stamp);
1075                 WARN_ON(1);
1076         }
1077
1078         /*
1079          * The delta is too big, we to add a
1080          * new timestamp.
1081          */
1082         event = __rb_reserve_next(cpu_buffer,
1083                                   RINGBUF_TYPE_TIME_EXTEND,
1084                                   RB_LEN_TIME_EXTEND,
1085                                   ts);
1086         if (!event)
1087                 return -EBUSY;
1088
1089         if (PTR_ERR(event) == -EAGAIN)
1090                 return -EAGAIN;
1091
1092         /* Only a commited time event can update the write stamp */
1093         if (rb_is_commit(cpu_buffer, event)) {
1094                 /*
1095                  * If this is the first on the page, then we need to
1096                  * update the page itself, and just put in a zero.
1097                  */
1098                 if (rb_event_index(event)) {
1099                         event->time_delta = *delta & TS_MASK;
1100                         event->array[0] = *delta >> TS_SHIFT;
1101                 } else {
1102                         cpu_buffer->commit_page->time_stamp = *ts;
1103                         event->time_delta = 0;
1104                         event->array[0] = 0;
1105                 }
1106                 cpu_buffer->write_stamp = *ts;
1107                 /* let the caller know this was the commit */
1108                 ret = 1;
1109         } else {
1110                 /* Darn, this is just wasted space */
1111                 event->time_delta = 0;
1112                 event->array[0] = 0;
1113                 ret = 0;
1114         }
1115
1116         *delta = 0;
1117
1118         return ret;
1119 }
1120
1121 static struct ring_buffer_event *
1122 rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
1123                       unsigned type, unsigned long length)
1124 {
1125         struct ring_buffer_event *event;
1126         u64 ts, delta;
1127         int commit = 0;
1128         int nr_loops = 0;
1129
1130  again:
1131         /*
1132          * We allow for interrupts to reenter here and do a trace.
1133          * If one does, it will cause this original code to loop
1134          * back here. Even with heavy interrupts happening, this
1135          * should only happen a few times in a row. If this happens
1136          * 1000 times in a row, there must be either an interrupt
1137          * storm or we have something buggy.
1138          * Bail!
1139          */
1140         if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000))
1141                 return NULL;
1142
1143         ts = ring_buffer_time_stamp(cpu_buffer->cpu);
1144
1145         /*
1146          * Only the first commit can update the timestamp.
1147          * Yes there is a race here. If an interrupt comes in
1148          * just after the conditional and it traces too, then it
1149          * will also check the deltas. More than one timestamp may
1150          * also be made. But only the entry that did the actual
1151          * commit will be something other than zero.
1152          */
1153         if (cpu_buffer->tail_page == cpu_buffer->commit_page &&
1154             rb_page_write(cpu_buffer->tail_page) ==
1155             rb_commit_index(cpu_buffer)) {
1156
1157                 delta = ts - cpu_buffer->write_stamp;
1158
1159                 /* make sure this delta is calculated here */
1160                 barrier();
1161
1162                 /* Did the write stamp get updated already? */
1163                 if (unlikely(ts < cpu_buffer->write_stamp))
1164                         delta = 0;
1165
1166                 if (test_time_stamp(delta)) {
1167
1168                         commit = rb_add_time_stamp(cpu_buffer, &ts, &delta);
1169
1170                         if (commit == -EBUSY)
1171                                 return NULL;
1172
1173                         if (commit == -EAGAIN)
1174                                 goto again;
1175
1176                         RB_WARN_ON(cpu_buffer, commit < 0);
1177                 }
1178         } else
1179                 /* Non commits have zero deltas */
1180                 delta = 0;
1181
1182         event = __rb_reserve_next(cpu_buffer, type, length, &ts);
1183         if (PTR_ERR(event) == -EAGAIN)
1184                 goto again;
1185
1186         if (!event) {
1187                 if (unlikely(commit))
1188                         /*
1189                          * Ouch! We needed a timestamp and it was commited. But
1190                          * we didn't get our event reserved.
1191                          */
1192                         rb_set_commit_to_write(cpu_buffer);
1193                 return NULL;
1194         }
1195
1196         /*
1197          * If the timestamp was commited, make the commit our entry
1198          * now so that we will update it when needed.
1199          */
1200         if (commit)
1201                 rb_set_commit_event(cpu_buffer, event);
1202         else if (!rb_is_commit(cpu_buffer, event))
1203                 delta = 0;
1204
1205         event->time_delta = delta;
1206
1207         return event;
1208 }
1209
1210 static DEFINE_PER_CPU(int, rb_need_resched);
1211
1212 /**
1213  * ring_buffer_lock_reserve - reserve a part of the buffer
1214  * @buffer: the ring buffer to reserve from
1215  * @length: the length of the data to reserve (excluding event header)
1216  * @flags: a pointer to save the interrupt flags
1217  *
1218  * Returns a reseverd event on the ring buffer to copy directly to.
1219  * The user of this interface will need to get the body to write into
1220  * and can use the ring_buffer_event_data() interface.
1221  *
1222  * The length is the length of the data needed, not the event length
1223  * which also includes the event header.
1224  *
1225  * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
1226  * If NULL is returned, then nothing has been allocated or locked.
1227  */
1228 struct ring_buffer_event *
1229 ring_buffer_lock_reserve(struct ring_buffer *buffer,
1230                          unsigned long length,
1231                          unsigned long *flags)
1232 {
1233         struct ring_buffer_per_cpu *cpu_buffer;
1234         struct ring_buffer_event *event;
1235         int cpu, resched;
1236
1237         if (ring_buffer_flags != RB_BUFFERS_ON)
1238                 return NULL;
1239
1240         if (atomic_read(&buffer->record_disabled))
1241                 return NULL;
1242
1243         /* If we are tracing schedule, we don't want to recurse */
1244         resched = ftrace_preempt_disable();
1245
1246         cpu = raw_smp_processor_id();
1247
1248         if (!cpu_isset(cpu, buffer->cpumask))
1249                 goto out;
1250
1251         cpu_buffer = buffer->buffers[cpu];
1252
1253         if (atomic_read(&cpu_buffer->record_disabled))
1254                 goto out;
1255
1256         length = rb_calculate_event_length(length);
1257         if (length > BUF_PAGE_SIZE)
1258                 goto out;
1259
1260         event = rb_reserve_next_event(cpu_buffer, RINGBUF_TYPE_DATA, length);
1261         if (!event)
1262                 goto out;
1263
1264         /*
1265          * Need to store resched state on this cpu.
1266          * Only the first needs to.
1267          */
1268
1269         if (preempt_count() == 1)
1270                 per_cpu(rb_need_resched, cpu) = resched;
1271
1272         return event;
1273
1274  out:
1275         ftrace_preempt_enable(resched);
1276         return NULL;
1277 }
1278
1279 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
1280                       struct ring_buffer_event *event)
1281 {
1282         cpu_buffer->entries++;
1283
1284         /* Only process further if we own the commit */
1285         if (!rb_is_commit(cpu_buffer, event))
1286                 return;
1287
1288         cpu_buffer->write_stamp += event->time_delta;
1289
1290         rb_set_commit_to_write(cpu_buffer);
1291 }
1292
1293 /**
1294  * ring_buffer_unlock_commit - commit a reserved
1295  * @buffer: The buffer to commit to
1296  * @event: The event pointer to commit.
1297  * @flags: the interrupt flags received from ring_buffer_lock_reserve.
1298  *
1299  * This commits the data to the ring buffer, and releases any locks held.
1300  *
1301  * Must be paired with ring_buffer_lock_reserve.
1302  */
1303 int ring_buffer_unlock_commit(struct ring_buffer *buffer,
1304                               struct ring_buffer_event *event,
1305                               unsigned long flags)
1306 {
1307         struct ring_buffer_per_cpu *cpu_buffer;
1308         int cpu = raw_smp_processor_id();
1309
1310         cpu_buffer = buffer->buffers[cpu];
1311
1312         rb_commit(cpu_buffer, event);
1313
1314         /*
1315          * Only the last preempt count needs to restore preemption.
1316          */
1317         if (preempt_count() == 1)
1318                 ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
1319         else
1320                 preempt_enable_no_resched_notrace();
1321
1322         return 0;
1323 }
1324
1325 /**
1326  * ring_buffer_write - write data to the buffer without reserving
1327  * @buffer: The ring buffer to write to.
1328  * @length: The length of the data being written (excluding the event header)
1329  * @data: The data to write to the buffer.
1330  *
1331  * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
1332  * one function. If you already have the data to write to the buffer, it
1333  * may be easier to simply call this function.
1334  *
1335  * Note, like ring_buffer_lock_reserve, the length is the length of the data
1336  * and not the length of the event which would hold the header.
1337  */
1338 int ring_buffer_write(struct ring_buffer *buffer,
1339                         unsigned long length,
1340                         void *data)
1341 {
1342         struct ring_buffer_per_cpu *cpu_buffer;
1343         struct ring_buffer_event *event;
1344         unsigned long event_length;
1345         void *body;
1346         int ret = -EBUSY;
1347         int cpu, resched;
1348
1349         if (ring_buffer_flags != RB_BUFFERS_ON)
1350                 return -EBUSY;
1351
1352         if (atomic_read(&buffer->record_disabled))
1353                 return -EBUSY;
1354
1355         resched = ftrace_preempt_disable();
1356
1357         cpu = raw_smp_processor_id();
1358
1359         if (!cpu_isset(cpu, buffer->cpumask))
1360                 goto out;
1361
1362         cpu_buffer = buffer->buffers[cpu];
1363
1364         if (atomic_read(&cpu_buffer->record_disabled))
1365                 goto out;
1366
1367         event_length = rb_calculate_event_length(length);
1368         event = rb_reserve_next_event(cpu_buffer,
1369                                       RINGBUF_TYPE_DATA, event_length);
1370         if (!event)
1371                 goto out;
1372
1373         body = rb_event_data(event);
1374
1375         memcpy(body, data, length);
1376
1377         rb_commit(cpu_buffer, event);
1378
1379         ret = 0;
1380  out:
1381         ftrace_preempt_enable(resched);
1382
1383         return ret;
1384 }
1385
1386 static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
1387 {
1388         struct buffer_page *reader = cpu_buffer->reader_page;
1389         struct buffer_page *head = cpu_buffer->head_page;
1390         struct buffer_page *commit = cpu_buffer->commit_page;
1391
1392         return reader->read == rb_page_commit(reader) &&
1393                 (commit == reader ||
1394                  (commit == head &&
1395                   head->read == rb_page_commit(commit)));
1396 }
1397
1398 /**
1399  * ring_buffer_record_disable - stop all writes into the buffer
1400  * @buffer: The ring buffer to stop writes to.
1401  *
1402  * This prevents all writes to the buffer. Any attempt to write
1403  * to the buffer after this will fail and return NULL.
1404  *
1405  * The caller should call synchronize_sched() after this.
1406  */
1407 void ring_buffer_record_disable(struct ring_buffer *buffer)
1408 {
1409         atomic_inc(&buffer->record_disabled);
1410 }
1411
1412 /**
1413  * ring_buffer_record_enable - enable writes to the buffer
1414  * @buffer: The ring buffer to enable writes
1415  *
1416  * Note, multiple disables will need the same number of enables
1417  * to truely enable the writing (much like preempt_disable).
1418  */
1419 void ring_buffer_record_enable(struct ring_buffer *buffer)
1420 {
1421         atomic_dec(&buffer->record_disabled);
1422 }
1423
1424 /**
1425  * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
1426  * @buffer: The ring buffer to stop writes to.
1427  * @cpu: The CPU buffer to stop
1428  *
1429  * This prevents all writes to the buffer. Any attempt to write
1430  * to the buffer after this will fail and return NULL.
1431  *
1432  * The caller should call synchronize_sched() after this.
1433  */
1434 void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
1435 {
1436         struct ring_buffer_per_cpu *cpu_buffer;
1437
1438         if (!cpu_isset(cpu, buffer->cpumask))
1439                 return;
1440
1441         cpu_buffer = buffer->buffers[cpu];
1442         atomic_inc(&cpu_buffer->record_disabled);
1443 }
1444
1445 /**
1446  * ring_buffer_record_enable_cpu - enable writes to the buffer
1447  * @buffer: The ring buffer to enable writes
1448  * @cpu: The CPU to enable.
1449  *
1450  * Note, multiple disables will need the same number of enables
1451  * to truely enable the writing (much like preempt_disable).
1452  */
1453 void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
1454 {
1455         struct ring_buffer_per_cpu *cpu_buffer;
1456
1457         if (!cpu_isset(cpu, buffer->cpumask))
1458                 return;
1459
1460         cpu_buffer = buffer->buffers[cpu];
1461         atomic_dec(&cpu_buffer->record_disabled);
1462 }
1463
1464 /**
1465  * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
1466  * @buffer: The ring buffer
1467  * @cpu: The per CPU buffer to get the entries from.
1468  */
1469 unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
1470 {
1471         struct ring_buffer_per_cpu *cpu_buffer;
1472
1473         if (!cpu_isset(cpu, buffer->cpumask))
1474                 return 0;
1475
1476         cpu_buffer = buffer->buffers[cpu];
1477         return cpu_buffer->entries;
1478 }
1479
1480 /**
1481  * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
1482  * @buffer: The ring buffer
1483  * @cpu: The per CPU buffer to get the number of overruns from
1484  */
1485 unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
1486 {
1487         struct ring_buffer_per_cpu *cpu_buffer;
1488
1489         if (!cpu_isset(cpu, buffer->cpumask))
1490                 return 0;
1491
1492         cpu_buffer = buffer->buffers[cpu];
1493         return cpu_buffer->overrun;
1494 }
1495
1496 /**
1497  * ring_buffer_entries - get the number of entries in a buffer
1498  * @buffer: The ring buffer
1499  *
1500  * Returns the total number of entries in the ring buffer
1501  * (all CPU entries)
1502  */
1503 unsigned long ring_buffer_entries(struct ring_buffer *buffer)
1504 {
1505         struct ring_buffer_per_cpu *cpu_buffer;
1506         unsigned long entries = 0;
1507         int cpu;
1508
1509         /* if you care about this being correct, lock the buffer */
1510         for_each_buffer_cpu(buffer, cpu) {
1511                 cpu_buffer = buffer->buffers[cpu];
1512                 entries += cpu_buffer->entries;
1513         }
1514
1515         return entries;
1516 }
1517
1518 /**
1519  * ring_buffer_overrun_cpu - get the number of overruns in buffer
1520  * @buffer: The ring buffer
1521  *
1522  * Returns the total number of overruns in the ring buffer
1523  * (all CPU entries)
1524  */
1525 unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
1526 {
1527         struct ring_buffer_per_cpu *cpu_buffer;
1528         unsigned long overruns = 0;
1529         int cpu;
1530
1531         /* if you care about this being correct, lock the buffer */
1532         for_each_buffer_cpu(buffer, cpu) {
1533                 cpu_buffer = buffer->buffers[cpu];
1534                 overruns += cpu_buffer->overrun;
1535         }
1536
1537         return overruns;
1538 }
1539
1540 static void rb_iter_reset(struct ring_buffer_iter *iter)
1541 {
1542         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1543
1544         /* Iterator usage is expected to have record disabled */
1545         if (list_empty(&cpu_buffer->reader_page->list)) {
1546                 iter->head_page = cpu_buffer->head_page;
1547                 iter->head = cpu_buffer->head_page->read;
1548         } else {
1549                 iter->head_page = cpu_buffer->reader_page;
1550                 iter->head = cpu_buffer->reader_page->read;
1551         }
1552         if (iter->head)
1553                 iter->read_stamp = cpu_buffer->read_stamp;
1554         else
1555                 iter->read_stamp = iter->head_page->time_stamp;
1556 }
1557
1558 /**
1559  * ring_buffer_iter_reset - reset an iterator
1560  * @iter: The iterator to reset
1561  *
1562  * Resets the iterator, so that it will start from the beginning
1563  * again.
1564  */
1565 void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
1566 {
1567         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1568         unsigned long flags;
1569
1570         spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1571         rb_iter_reset(iter);
1572         spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1573 }
1574
1575 /**
1576  * ring_buffer_iter_empty - check if an iterator has no more to read
1577  * @iter: The iterator to check
1578  */
1579 int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
1580 {
1581         struct ring_buffer_per_cpu *cpu_buffer;
1582
1583         cpu_buffer = iter->cpu_buffer;
1584
1585         return iter->head_page == cpu_buffer->commit_page &&
1586                 iter->head == rb_commit_index(cpu_buffer);
1587 }
1588
1589 static void
1590 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1591                      struct ring_buffer_event *event)
1592 {
1593         u64 delta;
1594
1595         switch (event->type) {
1596         case RINGBUF_TYPE_PADDING:
1597                 return;
1598
1599         case RINGBUF_TYPE_TIME_EXTEND:
1600                 delta = event->array[0];
1601                 delta <<= TS_SHIFT;
1602                 delta += event->time_delta;
1603                 cpu_buffer->read_stamp += delta;
1604                 return;
1605
1606         case RINGBUF_TYPE_TIME_STAMP:
1607                 /* FIXME: not implemented */
1608                 return;
1609
1610         case RINGBUF_TYPE_DATA:
1611                 cpu_buffer->read_stamp += event->time_delta;
1612                 return;
1613
1614         default:
1615                 BUG();
1616         }
1617         return;
1618 }
1619
1620 static void
1621 rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
1622                           struct ring_buffer_event *event)
1623 {
1624         u64 delta;
1625
1626         switch (event->type) {
1627         case RINGBUF_TYPE_PADDING:
1628                 return;
1629
1630         case RINGBUF_TYPE_TIME_EXTEND:
1631                 delta = event->array[0];
1632                 delta <<= TS_SHIFT;
1633                 delta += event->time_delta;
1634                 iter->read_stamp += delta;
1635                 return;
1636
1637         case RINGBUF_TYPE_TIME_STAMP:
1638                 /* FIXME: not implemented */
1639                 return;
1640
1641         case RINGBUF_TYPE_DATA:
1642                 iter->read_stamp += event->time_delta;
1643                 return;
1644
1645         default:
1646                 BUG();
1647         }
1648         return;
1649 }
1650
1651 static struct buffer_page *
1652 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1653 {
1654         struct buffer_page *reader = NULL;
1655         unsigned long flags;
1656         int nr_loops = 0;
1657
1658         local_irq_save(flags);
1659         __raw_spin_lock(&cpu_buffer->lock);
1660
1661  again:
1662         /*
1663          * This should normally only loop twice. But because the
1664          * start of the reader inserts an empty page, it causes
1665          * a case where we will loop three times. There should be no
1666          * reason to loop four times (that I know of).
1667          */
1668         if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) {
1669                 reader = NULL;
1670                 goto out;
1671         }
1672
1673         reader = cpu_buffer->reader_page;
1674
1675         /* If there's more to read, return this page */
1676         if (cpu_buffer->reader_page->read < rb_page_size(reader))
1677                 goto out;
1678
1679         /* Never should we have an index greater than the size */
1680         if (RB_WARN_ON(cpu_buffer,
1681                        cpu_buffer->reader_page->read > rb_page_size(reader)))
1682                 goto out;
1683
1684         /* check if we caught up to the tail */
1685         reader = NULL;
1686         if (cpu_buffer->commit_page == cpu_buffer->reader_page)
1687                 goto out;
1688
1689         /*
1690          * Splice the empty reader page into the list around the head.
1691          * Reset the reader page to size zero.
1692          */
1693
1694         reader = cpu_buffer->head_page;
1695         cpu_buffer->reader_page->list.next = reader->list.next;
1696         cpu_buffer->reader_page->list.prev = reader->list.prev;
1697
1698         local_set(&cpu_buffer->reader_page->write, 0);
1699         local_set(&cpu_buffer->reader_page->commit, 0);
1700
1701         /* Make the reader page now replace the head */
1702         reader->list.prev->next = &cpu_buffer->reader_page->list;
1703         reader->list.next->prev = &cpu_buffer->reader_page->list;
1704
1705         /*
1706          * If the tail is on the reader, then we must set the head
1707          * to the inserted page, otherwise we set it one before.
1708          */
1709         cpu_buffer->head_page = cpu_buffer->reader_page;
1710
1711         if (cpu_buffer->commit_page != reader)
1712                 rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
1713
1714         /* Finally update the reader page to the new head */
1715         cpu_buffer->reader_page = reader;
1716         rb_reset_reader_page(cpu_buffer);
1717
1718         goto again;
1719
1720  out:
1721         __raw_spin_unlock(&cpu_buffer->lock);
1722         local_irq_restore(flags);
1723
1724         return reader;
1725 }
1726
1727 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
1728 {
1729         struct ring_buffer_event *event;
1730         struct buffer_page *reader;
1731         unsigned length;
1732
1733         reader = rb_get_reader_page(cpu_buffer);
1734
1735         /* This function should not be called when buffer is empty */
1736         if (RB_WARN_ON(cpu_buffer, !reader))
1737                 return;
1738
1739         event = rb_reader_event(cpu_buffer);
1740
1741         if (event->type == RINGBUF_TYPE_DATA)
1742                 cpu_buffer->entries--;
1743
1744         rb_update_read_stamp(cpu_buffer, event);
1745
1746         length = rb_event_length(event);
1747         cpu_buffer->reader_page->read += length;
1748 }
1749
1750 static void rb_advance_iter(struct ring_buffer_iter *iter)
1751 {
1752         struct ring_buffer *buffer;
1753         struct ring_buffer_per_cpu *cpu_buffer;
1754         struct ring_buffer_event *event;
1755         unsigned length;
1756
1757         cpu_buffer = iter->cpu_buffer;
1758         buffer = cpu_buffer->buffer;
1759
1760         /*
1761          * Check if we are at the end of the buffer.
1762          */
1763         if (iter->head >= rb_page_size(iter->head_page)) {
1764                 if (RB_WARN_ON(buffer,
1765                                iter->head_page == cpu_buffer->commit_page))
1766                         return;
1767                 rb_inc_iter(iter);
1768                 return;
1769         }
1770
1771         event = rb_iter_head_event(iter);
1772
1773         length = rb_event_length(event);
1774
1775         /*
1776          * This should not be called to advance the header if we are
1777          * at the tail of the buffer.
1778          */
1779         if (RB_WARN_ON(cpu_buffer,
1780                        (iter->head_page == cpu_buffer->commit_page) &&
1781                        (iter->head + length > rb_commit_index(cpu_buffer))))
1782                 return;
1783
1784         rb_update_iter_read_stamp(iter, event);
1785
1786         iter->head += length;
1787
1788         /* check for end of page padding */
1789         if ((iter->head >= rb_page_size(iter->head_page)) &&
1790             (iter->head_page != cpu_buffer->commit_page))
1791                 rb_advance_iter(iter);
1792 }
1793
1794 static struct ring_buffer_event *
1795 rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1796 {
1797         struct ring_buffer_per_cpu *cpu_buffer;
1798         struct ring_buffer_event *event;
1799         struct buffer_page *reader;
1800         int nr_loops = 0;
1801
1802         if (!cpu_isset(cpu, buffer->cpumask))
1803                 return NULL;
1804
1805         cpu_buffer = buffer->buffers[cpu];
1806
1807  again:
1808         /*
1809          * We repeat when a timestamp is encountered. It is possible
1810          * to get multiple timestamps from an interrupt entering just
1811          * as one timestamp is about to be written. The max times
1812          * that this can happen is the number of nested interrupts we
1813          * can have.  Nesting 10 deep of interrupts is clearly
1814          * an anomaly.
1815          */
1816         if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10))
1817                 return NULL;
1818
1819         reader = rb_get_reader_page(cpu_buffer);
1820         if (!reader)
1821                 return NULL;
1822
1823         event = rb_reader_event(cpu_buffer);
1824
1825         switch (event->type) {
1826         case RINGBUF_TYPE_PADDING:
1827                 RB_WARN_ON(cpu_buffer, 1);
1828                 rb_advance_reader(cpu_buffer);
1829                 return NULL;
1830
1831         case RINGBUF_TYPE_TIME_EXTEND:
1832                 /* Internal data, OK to advance */
1833                 rb_advance_reader(cpu_buffer);
1834                 goto again;
1835
1836         case RINGBUF_TYPE_TIME_STAMP:
1837                 /* FIXME: not implemented */
1838                 rb_advance_reader(cpu_buffer);
1839                 goto again;
1840
1841         case RINGBUF_TYPE_DATA:
1842                 if (ts) {
1843                         *ts = cpu_buffer->read_stamp + event->time_delta;
1844                         ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
1845                 }
1846                 return event;
1847
1848         default:
1849                 BUG();
1850         }
1851
1852         return NULL;
1853 }
1854
1855 static struct ring_buffer_event *
1856 rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1857 {
1858         struct ring_buffer *buffer;
1859         struct ring_buffer_per_cpu *cpu_buffer;
1860         struct ring_buffer_event *event;
1861         int nr_loops = 0;
1862
1863         if (ring_buffer_iter_empty(iter))
1864                 return NULL;
1865
1866         cpu_buffer = iter->cpu_buffer;
1867         buffer = cpu_buffer->buffer;
1868
1869  again:
1870         /*
1871          * We repeat when a timestamp is encountered. It is possible
1872          * to get multiple timestamps from an interrupt entering just
1873          * as one timestamp is about to be written. The max times
1874          * that this can happen is the number of nested interrupts we
1875          * can have. Nesting 10 deep of interrupts is clearly
1876          * an anomaly.
1877          */
1878         if (RB_WARN_ON(cpu_buffer, ++nr_loops > 10))
1879                 return NULL;
1880
1881         if (rb_per_cpu_empty(cpu_buffer))
1882                 return NULL;
1883
1884         event = rb_iter_head_event(iter);
1885
1886         switch (event->type) {
1887         case RINGBUF_TYPE_PADDING:
1888                 rb_inc_iter(iter);
1889                 goto again;
1890
1891         case RINGBUF_TYPE_TIME_EXTEND:
1892                 /* Internal data, OK to advance */
1893                 rb_advance_iter(iter);
1894                 goto again;
1895
1896         case RINGBUF_TYPE_TIME_STAMP:
1897                 /* FIXME: not implemented */
1898                 rb_advance_iter(iter);
1899                 goto again;
1900
1901         case RINGBUF_TYPE_DATA:
1902                 if (ts) {
1903                         *ts = iter->read_stamp + event->time_delta;
1904                         ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
1905                 }
1906                 return event;
1907
1908         default:
1909                 BUG();
1910         }
1911
1912         return NULL;
1913 }
1914
1915 /**
1916  * ring_buffer_peek - peek at the next event to be read
1917  * @buffer: The ring buffer to read
1918  * @cpu: The cpu to peak at
1919  * @ts: The timestamp counter of this event.
1920  *
1921  * This will return the event that will be read next, but does
1922  * not consume the data.
1923  */
1924 struct ring_buffer_event *
1925 ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1926 {
1927         struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
1928         struct ring_buffer_event *event;
1929         unsigned long flags;
1930
1931         spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1932         event = rb_buffer_peek(buffer, cpu, ts);
1933         spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1934
1935         return event;
1936 }
1937
1938 /**
1939  * ring_buffer_iter_peek - peek at the next event to be read
1940  * @iter: The ring buffer iterator
1941  * @ts: The timestamp counter of this event.
1942  *
1943  * This will return the event that will be read next, but does
1944  * not increment the iterator.
1945  */
1946 struct ring_buffer_event *
1947 ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1948 {
1949         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1950         struct ring_buffer_event *event;
1951         unsigned long flags;
1952
1953         spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1954         event = rb_iter_peek(iter, ts);
1955         spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1956
1957         return event;
1958 }
1959
1960 /**
1961  * ring_buffer_consume - return an event and consume it
1962  * @buffer: The ring buffer to get the next event from
1963  *
1964  * Returns the next event in the ring buffer, and that event is consumed.
1965  * Meaning, that sequential reads will keep returning a different event,
1966  * and eventually empty the ring buffer if the producer is slower.
1967  */
1968 struct ring_buffer_event *
1969 ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
1970 {
1971         struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
1972         struct ring_buffer_event *event;
1973         unsigned long flags;
1974
1975         if (!cpu_isset(cpu, buffer->cpumask))
1976                 return NULL;
1977
1978         spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
1979
1980         event = rb_buffer_peek(buffer, cpu, ts);
1981         if (!event)
1982                 goto out;
1983
1984         rb_advance_reader(cpu_buffer);
1985
1986  out:
1987         spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
1988
1989         return event;
1990 }
1991
1992 /**
1993  * ring_buffer_read_start - start a non consuming read of the buffer
1994  * @buffer: The ring buffer to read from
1995  * @cpu: The cpu buffer to iterate over
1996  *
1997  * This starts up an iteration through the buffer. It also disables
1998  * the recording to the buffer until the reading is finished.
1999  * This prevents the reading from being corrupted. This is not
2000  * a consuming read, so a producer is not expected.
2001  *
2002  * Must be paired with ring_buffer_finish.
2003  */
2004 struct ring_buffer_iter *
2005 ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
2006 {
2007         struct ring_buffer_per_cpu *cpu_buffer;
2008         struct ring_buffer_iter *iter;
2009         unsigned long flags;
2010
2011         if (!cpu_isset(cpu, buffer->cpumask))
2012                 return NULL;
2013
2014         iter = kmalloc(sizeof(*iter), GFP_KERNEL);
2015         if (!iter)
2016                 return NULL;
2017
2018         cpu_buffer = buffer->buffers[cpu];
2019
2020         iter->cpu_buffer = cpu_buffer;
2021
2022         atomic_inc(&cpu_buffer->record_disabled);
2023         synchronize_sched();
2024
2025         spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2026         __raw_spin_lock(&cpu_buffer->lock);
2027         rb_iter_reset(iter);
2028         __raw_spin_unlock(&cpu_buffer->lock);
2029         spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2030
2031         return iter;
2032 }
2033
2034 /**
2035  * ring_buffer_finish - finish reading the iterator of the buffer
2036  * @iter: The iterator retrieved by ring_buffer_start
2037  *
2038  * This re-enables the recording to the buffer, and frees the
2039  * iterator.
2040  */
2041 void
2042 ring_buffer_read_finish(struct ring_buffer_iter *iter)
2043 {
2044         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2045
2046         atomic_dec(&cpu_buffer->record_disabled);
2047         kfree(iter);
2048 }
2049
2050 /**
2051  * ring_buffer_read - read the next item in the ring buffer by the iterator
2052  * @iter: The ring buffer iterator
2053  * @ts: The time stamp of the event read.
2054  *
2055  * This reads the next event in the ring buffer and increments the iterator.
2056  */
2057 struct ring_buffer_event *
2058 ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
2059 {
2060         struct ring_buffer_event *event;
2061         struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
2062         unsigned long flags;
2063
2064         spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2065         event = rb_iter_peek(iter, ts);
2066         if (!event)
2067                 goto out;
2068
2069         rb_advance_iter(iter);
2070  out:
2071         spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2072
2073         return event;
2074 }
2075
2076 /**
2077  * ring_buffer_size - return the size of the ring buffer (in bytes)
2078  * @buffer: The ring buffer.
2079  */
2080 unsigned long ring_buffer_size(struct ring_buffer *buffer)
2081 {
2082         return BUF_PAGE_SIZE * buffer->pages;
2083 }
2084
2085 static void
2086 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
2087 {
2088         cpu_buffer->head_page
2089                 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
2090         local_set(&cpu_buffer->head_page->write, 0);
2091         local_set(&cpu_buffer->head_page->commit, 0);
2092
2093         cpu_buffer->head_page->read = 0;
2094
2095         cpu_buffer->tail_page = cpu_buffer->head_page;
2096         cpu_buffer->commit_page = cpu_buffer->head_page;
2097
2098         INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
2099         local_set(&cpu_buffer->reader_page->write, 0);
2100         local_set(&cpu_buffer->reader_page->commit, 0);
2101         cpu_buffer->reader_page->read = 0;
2102
2103         cpu_buffer->overrun = 0;
2104         cpu_buffer->entries = 0;
2105 }
2106
2107 /**
2108  * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
2109  * @buffer: The ring buffer to reset a per cpu buffer of
2110  * @cpu: The CPU buffer to be reset
2111  */
2112 void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
2113 {
2114         struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
2115         unsigned long flags;
2116
2117         if (!cpu_isset(cpu, buffer->cpumask))
2118                 return;
2119
2120         spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
2121
2122         __raw_spin_lock(&cpu_buffer->lock);
2123
2124         rb_reset_cpu(cpu_buffer);
2125
2126         __raw_spin_unlock(&cpu_buffer->lock);
2127
2128         spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
2129 }
2130
2131 /**
2132  * ring_buffer_reset - reset a ring buffer
2133  * @buffer: The ring buffer to reset all cpu buffers
2134  */
2135 void ring_buffer_reset(struct ring_buffer *buffer)
2136 {
2137         int cpu;
2138
2139         for_each_buffer_cpu(buffer, cpu)
2140                 ring_buffer_reset_cpu(buffer, cpu);
2141 }
2142
2143 /**
2144  * rind_buffer_empty - is the ring buffer empty?
2145  * @buffer: The ring buffer to test
2146  */
2147 int ring_buffer_empty(struct ring_buffer *buffer)
2148 {
2149         struct ring_buffer_per_cpu *cpu_buffer;
2150         int cpu;
2151
2152         /* yes this is racy, but if you don't like the race, lock the buffer */
2153         for_each_buffer_cpu(buffer, cpu) {
2154                 cpu_buffer = buffer->buffers[cpu];
2155                 if (!rb_per_cpu_empty(cpu_buffer))
2156                         return 0;
2157         }
2158         return 1;
2159 }
2160
2161 /**
2162  * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
2163  * @buffer: The ring buffer
2164  * @cpu: The CPU buffer to test
2165  */
2166 int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
2167 {
2168         struct ring_buffer_per_cpu *cpu_buffer;
2169
2170         if (!cpu_isset(cpu, buffer->cpumask))
2171                 return 1;
2172
2173         cpu_buffer = buffer->buffers[cpu];
2174         return rb_per_cpu_empty(cpu_buffer);
2175 }
2176
2177 /**
2178  * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
2179  * @buffer_a: One buffer to swap with
2180  * @buffer_b: The other buffer to swap with
2181  *
2182  * This function is useful for tracers that want to take a "snapshot"
2183  * of a CPU buffer and has another back up buffer lying around.
2184  * it is expected that the tracer handles the cpu buffer not being
2185  * used at the moment.
2186  */
2187 int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
2188                          struct ring_buffer *buffer_b, int cpu)
2189 {
2190         struct ring_buffer_per_cpu *cpu_buffer_a;
2191         struct ring_buffer_per_cpu *cpu_buffer_b;
2192
2193         if (!cpu_isset(cpu, buffer_a->cpumask) ||
2194             !cpu_isset(cpu, buffer_b->cpumask))
2195                 return -EINVAL;
2196
2197         /* At least make sure the two buffers are somewhat the same */
2198         if (buffer_a->size != buffer_b->size ||
2199             buffer_a->pages != buffer_b->pages)
2200                 return -EINVAL;
2201
2202         cpu_buffer_a = buffer_a->buffers[cpu];
2203         cpu_buffer_b = buffer_b->buffers[cpu];
2204
2205         /*
2206          * We can't do a synchronize_sched here because this
2207          * function can be called in atomic context.
2208          * Normally this will be called from the same CPU as cpu.
2209          * If not it's up to the caller to protect this.
2210          */
2211         atomic_inc(&cpu_buffer_a->record_disabled);
2212         atomic_inc(&cpu_buffer_b->record_disabled);
2213
2214         buffer_a->buffers[cpu] = cpu_buffer_b;
2215         buffer_b->buffers[cpu] = cpu_buffer_a;
2216
2217         cpu_buffer_b->buffer = buffer_a;
2218         cpu_buffer_a->buffer = buffer_b;
2219
2220         atomic_dec(&cpu_buffer_a->record_disabled);
2221         atomic_dec(&cpu_buffer_b->record_disabled);
2222
2223         return 0;
2224 }
2225
2226 static ssize_t
2227 rb_simple_read(struct file *filp, char __user *ubuf,
2228                size_t cnt, loff_t *ppos)
2229 {
2230         long *p = filp->private_data;
2231         char buf[64];
2232         int r;
2233
2234         if (test_bit(RB_BUFFERS_DISABLED_BIT, p))
2235                 r = sprintf(buf, "permanently disabled\n");
2236         else
2237                 r = sprintf(buf, "%d\n", test_bit(RB_BUFFERS_ON_BIT, p));
2238
2239         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
2240 }
2241
2242 static ssize_t
2243 rb_simple_write(struct file *filp, const char __user *ubuf,
2244                 size_t cnt, loff_t *ppos)
2245 {
2246         long *p = filp->private_data;
2247         char buf[64];
2248         long val;
2249         int ret;
2250
2251         if (cnt >= sizeof(buf))
2252                 return -EINVAL;
2253
2254         if (copy_from_user(&buf, ubuf, cnt))
2255                 return -EFAULT;
2256
2257         buf[cnt] = 0;
2258
2259         ret = strict_strtoul(buf, 10, &val);
2260         if (ret < 0)
2261                 return ret;
2262
2263         if (val)
2264                 set_bit(RB_BUFFERS_ON_BIT, p);
2265         else
2266                 clear_bit(RB_BUFFERS_ON_BIT, p);
2267
2268         (*ppos)++;
2269
2270         return cnt;
2271 }
2272
2273 static struct file_operations rb_simple_fops = {
2274         .open           = tracing_open_generic,
2275         .read           = rb_simple_read,
2276         .write          = rb_simple_write,
2277 };
2278
2279
2280 static __init int rb_init_debugfs(void)
2281 {
2282         struct dentry *d_tracer;
2283         struct dentry *entry;
2284
2285         d_tracer = tracing_init_dentry();
2286
2287         entry = debugfs_create_file("tracing_on", 0644, d_tracer,
2288                                     &ring_buffer_flags, &rb_simple_fops);
2289         if (!entry)
2290                 pr_warning("Could not create debugfs 'tracing_on' entry\n");
2291
2292         return 0;
2293 }
2294
2295 fs_initcall(rb_init_debugfs);