4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2015, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 #define DEBUG_SUBSYSTEM S_LLITE
34 #include "../include/lustre_lite.h"
35 #include "../include/lprocfs_status.h"
36 #include <linux/seq_file.h>
37 #include "../include/obd_support.h"
39 #include "llite_internal.h"
40 #include "vvp_internal.h"
42 /* debugfs llite mount point registration */
43 static struct file_operations ll_rw_extents_stats_fops;
44 static struct file_operations ll_rw_extents_stats_pp_fops;
45 static struct file_operations ll_rw_offset_stats_fops;
47 static ssize_t blocksize_show(struct kobject *kobj, struct attribute *attr,
50 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
52 struct obd_statfs osfs;
55 rc = ll_statfs_internal(sbi->ll_sb, &osfs,
56 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
59 return sprintf(buf, "%u\n", osfs.os_bsize);
63 LUSTRE_RO_ATTR(blocksize);
65 static ssize_t kbytestotal_show(struct kobject *kobj, struct attribute *attr,
68 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
70 struct obd_statfs osfs;
73 rc = ll_statfs_internal(sbi->ll_sb, &osfs,
74 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
77 __u32 blk_size = osfs.os_bsize >> 10;
78 __u64 result = osfs.os_blocks;
80 while (blk_size >>= 1)
83 rc = sprintf(buf, "%llu\n", result);
88 LUSTRE_RO_ATTR(kbytestotal);
90 static ssize_t kbytesfree_show(struct kobject *kobj, struct attribute *attr,
93 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
95 struct obd_statfs osfs;
98 rc = ll_statfs_internal(sbi->ll_sb, &osfs,
99 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
102 __u32 blk_size = osfs.os_bsize >> 10;
103 __u64 result = osfs.os_bfree;
105 while (blk_size >>= 1)
108 rc = sprintf(buf, "%llu\n", result);
113 LUSTRE_RO_ATTR(kbytesfree);
115 static ssize_t kbytesavail_show(struct kobject *kobj, struct attribute *attr,
118 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
120 struct obd_statfs osfs;
123 rc = ll_statfs_internal(sbi->ll_sb, &osfs,
124 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
127 __u32 blk_size = osfs.os_bsize >> 10;
128 __u64 result = osfs.os_bavail;
130 while (blk_size >>= 1)
133 rc = sprintf(buf, "%llu\n", result);
138 LUSTRE_RO_ATTR(kbytesavail);
140 static ssize_t filestotal_show(struct kobject *kobj, struct attribute *attr,
143 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
145 struct obd_statfs osfs;
148 rc = ll_statfs_internal(sbi->ll_sb, &osfs,
149 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
152 return sprintf(buf, "%llu\n", osfs.os_files);
156 LUSTRE_RO_ATTR(filestotal);
158 static ssize_t filesfree_show(struct kobject *kobj, struct attribute *attr,
161 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
163 struct obd_statfs osfs;
166 rc = ll_statfs_internal(sbi->ll_sb, &osfs,
167 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
170 return sprintf(buf, "%llu\n", osfs.os_ffree);
174 LUSTRE_RO_ATTR(filesfree);
176 static ssize_t client_type_show(struct kobject *kobj, struct attribute *attr,
179 return sprintf(buf, "local client\n");
181 LUSTRE_RO_ATTR(client_type);
183 static ssize_t fstype_show(struct kobject *kobj, struct attribute *attr,
186 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
189 return sprintf(buf, "%s\n", sbi->ll_sb->s_type->name);
191 LUSTRE_RO_ATTR(fstype);
193 static ssize_t uuid_show(struct kobject *kobj, struct attribute *attr,
196 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
199 return sprintf(buf, "%s\n", sbi->ll_sb_uuid.uuid);
201 LUSTRE_RO_ATTR(uuid);
203 static int ll_site_stats_seq_show(struct seq_file *m, void *v)
205 struct super_block *sb = m->private;
208 * See description of statistical counters in struct cl_site, and
211 return cl_site_stats_print(lu2cl_site(ll_s2sbi(sb)->ll_site), m);
214 LPROC_SEQ_FOPS_RO(ll_site_stats);
216 static ssize_t max_read_ahead_mb_show(struct kobject *kobj,
217 struct attribute *attr, char *buf)
219 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
224 spin_lock(&sbi->ll_lock);
225 pages_number = sbi->ll_ra_info.ra_max_pages;
226 spin_unlock(&sbi->ll_lock);
228 mult = 1 << (20 - PAGE_SHIFT);
229 return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult);
232 static ssize_t max_read_ahead_mb_store(struct kobject *kobj,
233 struct attribute *attr,
237 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
240 unsigned long pages_number;
242 rc = kstrtoul(buffer, 10, &pages_number);
246 pages_number *= 1 << (20 - PAGE_SHIFT); /* MB -> pages */
248 if (pages_number > totalram_pages / 2) {
249 CERROR("can't set file readahead more than %lu MB\n",
250 totalram_pages >> (20 - PAGE_SHIFT + 1)); /*1/2 of RAM*/
254 spin_lock(&sbi->ll_lock);
255 sbi->ll_ra_info.ra_max_pages = pages_number;
256 spin_unlock(&sbi->ll_lock);
260 LUSTRE_RW_ATTR(max_read_ahead_mb);
262 static ssize_t max_read_ahead_per_file_mb_show(struct kobject *kobj,
263 struct attribute *attr,
266 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
271 spin_lock(&sbi->ll_lock);
272 pages_number = sbi->ll_ra_info.ra_max_pages_per_file;
273 spin_unlock(&sbi->ll_lock);
275 mult = 1 << (20 - PAGE_SHIFT);
276 return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult);
279 static ssize_t max_read_ahead_per_file_mb_store(struct kobject *kobj,
280 struct attribute *attr,
284 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
287 unsigned long pages_number;
289 rc = kstrtoul(buffer, 10, &pages_number);
293 if (pages_number > sbi->ll_ra_info.ra_max_pages) {
294 CERROR("can't set file readahead more than max_read_ahead_mb %lu MB\n",
295 sbi->ll_ra_info.ra_max_pages);
299 spin_lock(&sbi->ll_lock);
300 sbi->ll_ra_info.ra_max_pages_per_file = pages_number;
301 spin_unlock(&sbi->ll_lock);
305 LUSTRE_RW_ATTR(max_read_ahead_per_file_mb);
307 static ssize_t max_read_ahead_whole_mb_show(struct kobject *kobj,
308 struct attribute *attr,
311 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
316 spin_lock(&sbi->ll_lock);
317 pages_number = sbi->ll_ra_info.ra_max_read_ahead_whole_pages;
318 spin_unlock(&sbi->ll_lock);
320 mult = 1 << (20 - PAGE_SHIFT);
321 return lprocfs_read_frac_helper(buf, PAGE_SIZE, pages_number, mult);
324 static ssize_t max_read_ahead_whole_mb_store(struct kobject *kobj,
325 struct attribute *attr,
329 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
332 unsigned long pages_number;
334 rc = kstrtoul(buffer, 10, &pages_number);
338 /* Cap this at the current max readahead window size, the readahead
339 * algorithm does this anyway so it's pointless to set it larger.
341 if (pages_number > sbi->ll_ra_info.ra_max_pages_per_file) {
342 CERROR("can't set max_read_ahead_whole_mb more than max_read_ahead_per_file_mb: %lu\n",
343 sbi->ll_ra_info.ra_max_pages_per_file >> (20 - PAGE_SHIFT));
347 spin_lock(&sbi->ll_lock);
348 sbi->ll_ra_info.ra_max_read_ahead_whole_pages = pages_number;
349 spin_unlock(&sbi->ll_lock);
353 LUSTRE_RW_ATTR(max_read_ahead_whole_mb);
355 static int ll_max_cached_mb_seq_show(struct seq_file *m, void *v)
357 struct super_block *sb = m->private;
358 struct ll_sb_info *sbi = ll_s2sbi(sb);
359 struct cl_client_cache *cache = sbi->ll_cache;
360 int shift = 20 - PAGE_SHIFT;
364 max_cached_mb = cache->ccc_lru_max >> shift;
365 unused_mb = atomic_read(&cache->ccc_lru_left) >> shift;
368 "max_cached_mb: %d\n"
371 "reclaim_count: %u\n",
372 atomic_read(&cache->ccc_users),
374 max_cached_mb - unused_mb,
376 cache->ccc_lru_shrinkers);
380 static ssize_t ll_max_cached_mb_seq_write(struct file *file,
381 const char __user *buffer,
382 size_t count, loff_t *off)
384 struct super_block *sb = ((struct seq_file *)file->private_data)->private;
385 struct ll_sb_info *sbi = ll_s2sbi(sb);
386 struct cl_client_cache *cache = sbi->ll_cache;
389 int mult, rc, pages_number;
394 if (count >= sizeof(kernbuf))
397 if (copy_from_user(kernbuf, buffer, count))
401 mult = 1 << (20 - PAGE_SHIFT);
402 buffer += lprocfs_find_named_value(kernbuf, "max_cached_mb:", &count) -
404 rc = lprocfs_write_frac_helper(buffer, count, &pages_number, mult);
408 if (pages_number < 0 || pages_number > totalram_pages) {
409 CERROR("%s: can't set max cache more than %lu MB\n",
410 ll_get_fsname(sb, NULL, 0),
411 totalram_pages >> (20 - PAGE_SHIFT));
415 spin_lock(&sbi->ll_lock);
416 diff = pages_number - cache->ccc_lru_max;
417 spin_unlock(&sbi->ll_lock);
419 /* easy - add more LRU slots. */
421 atomic_add(diff, &cache->ccc_lru_left);
426 env = cl_env_get(&refcheck);
434 /* reduce LRU budget from free slots. */
438 ov = atomic_read(&cache->ccc_lru_left);
442 nv = ov > diff ? ov - diff : 0;
443 rc = atomic_cmpxchg(&cache->ccc_lru_left, ov, nv);
444 if (likely(ov == rc)) {
454 if (!sbi->ll_dt_exp) { /* being initialized */
459 /* difficult - have to ask OSCs to drop LRU slots. */
461 rc = obd_set_info_async(env, sbi->ll_dt_exp,
462 sizeof(KEY_CACHE_LRU_SHRINK),
463 KEY_CACHE_LRU_SHRINK,
464 sizeof(tmp), &tmp, NULL);
468 cl_env_put(env, &refcheck);
472 spin_lock(&sbi->ll_lock);
473 cache->ccc_lru_max = pages_number;
474 spin_unlock(&sbi->ll_lock);
477 atomic_add(nrpages, &cache->ccc_lru_left);
482 LPROC_SEQ_FOPS(ll_max_cached_mb);
484 static ssize_t checksum_pages_show(struct kobject *kobj, struct attribute *attr,
487 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
490 return sprintf(buf, "%u\n", (sbi->ll_flags & LL_SBI_CHECKSUM) ? 1 : 0);
493 static ssize_t checksum_pages_store(struct kobject *kobj,
494 struct attribute *attr,
498 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
507 rc = kstrtoul(buffer, 10, &val);
511 sbi->ll_flags |= LL_SBI_CHECKSUM;
513 sbi->ll_flags &= ~LL_SBI_CHECKSUM;
515 rc = obd_set_info_async(NULL, sbi->ll_dt_exp, sizeof(KEY_CHECKSUM),
516 KEY_CHECKSUM, sizeof(val), &val, NULL);
518 CWARN("Failed to set OSC checksum flags: %d\n", rc);
522 LUSTRE_RW_ATTR(checksum_pages);
524 static ssize_t ll_rd_track_id(struct kobject *kobj, char *buf,
525 enum stats_track_type type)
527 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
530 if (sbi->ll_stats_track_type == type)
531 return sprintf(buf, "%d\n", sbi->ll_stats_track_id);
532 else if (sbi->ll_stats_track_type == STATS_TRACK_ALL)
533 return sprintf(buf, "0 (all)\n");
535 return sprintf(buf, "untracked\n");
538 static ssize_t ll_wr_track_id(struct kobject *kobj, const char *buffer,
540 enum stats_track_type type)
542 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
547 rc = kstrtoul(buffer, 10, &pid);
550 sbi->ll_stats_track_id = pid;
552 sbi->ll_stats_track_type = STATS_TRACK_ALL;
554 sbi->ll_stats_track_type = type;
555 lprocfs_clear_stats(sbi->ll_stats);
559 static ssize_t stats_track_pid_show(struct kobject *kobj,
560 struct attribute *attr,
563 return ll_rd_track_id(kobj, buf, STATS_TRACK_PID);
566 static ssize_t stats_track_pid_store(struct kobject *kobj,
567 struct attribute *attr,
571 return ll_wr_track_id(kobj, buffer, count, STATS_TRACK_PID);
573 LUSTRE_RW_ATTR(stats_track_pid);
575 static ssize_t stats_track_ppid_show(struct kobject *kobj,
576 struct attribute *attr,
579 return ll_rd_track_id(kobj, buf, STATS_TRACK_PPID);
582 static ssize_t stats_track_ppid_store(struct kobject *kobj,
583 struct attribute *attr,
587 return ll_wr_track_id(kobj, buffer, count, STATS_TRACK_PPID);
589 LUSTRE_RW_ATTR(stats_track_ppid);
591 static ssize_t stats_track_gid_show(struct kobject *kobj,
592 struct attribute *attr,
595 return ll_rd_track_id(kobj, buf, STATS_TRACK_GID);
598 static ssize_t stats_track_gid_store(struct kobject *kobj,
599 struct attribute *attr,
603 return ll_wr_track_id(kobj, buffer, count, STATS_TRACK_GID);
605 LUSTRE_RW_ATTR(stats_track_gid);
607 static ssize_t statahead_max_show(struct kobject *kobj,
608 struct attribute *attr,
611 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
614 return sprintf(buf, "%u\n", sbi->ll_sa_max);
617 static ssize_t statahead_max_store(struct kobject *kobj,
618 struct attribute *attr,
622 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
627 rc = kstrtoul(buffer, 10, &val);
631 if (val <= LL_SA_RPC_MAX)
632 sbi->ll_sa_max = val;
634 CERROR("Bad statahead_max value %lu. Valid values are in the range [0, %d]\n",
639 LUSTRE_RW_ATTR(statahead_max);
641 static ssize_t statahead_agl_show(struct kobject *kobj,
642 struct attribute *attr,
645 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
648 return sprintf(buf, "%u\n", sbi->ll_flags & LL_SBI_AGL_ENABLED ? 1 : 0);
651 static ssize_t statahead_agl_store(struct kobject *kobj,
652 struct attribute *attr,
656 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
661 rc = kstrtoul(buffer, 10, &val);
666 sbi->ll_flags |= LL_SBI_AGL_ENABLED;
668 sbi->ll_flags &= ~LL_SBI_AGL_ENABLED;
672 LUSTRE_RW_ATTR(statahead_agl);
674 static int ll_statahead_stats_seq_show(struct seq_file *m, void *v)
676 struct super_block *sb = m->private;
677 struct ll_sb_info *sbi = ll_s2sbi(sb);
680 "statahead total: %u\n"
681 "statahead wrong: %u\n"
683 atomic_read(&sbi->ll_sa_total),
684 atomic_read(&sbi->ll_sa_wrong),
685 atomic_read(&sbi->ll_agl_total));
689 LPROC_SEQ_FOPS_RO(ll_statahead_stats);
691 static ssize_t lazystatfs_show(struct kobject *kobj,
692 struct attribute *attr,
695 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
698 return sprintf(buf, "%u\n", sbi->ll_flags & LL_SBI_LAZYSTATFS ? 1 : 0);
701 static ssize_t lazystatfs_store(struct kobject *kobj,
702 struct attribute *attr,
706 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
711 rc = kstrtoul(buffer, 10, &val);
716 sbi->ll_flags |= LL_SBI_LAZYSTATFS;
718 sbi->ll_flags &= ~LL_SBI_LAZYSTATFS;
722 LUSTRE_RW_ATTR(lazystatfs);
724 static ssize_t max_easize_show(struct kobject *kobj,
725 struct attribute *attr,
728 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
733 rc = ll_get_max_mdsize(sbi, &ealen);
737 return sprintf(buf, "%u\n", ealen);
739 LUSTRE_RO_ATTR(max_easize);
741 static ssize_t default_easize_show(struct kobject *kobj,
742 struct attribute *attr,
745 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
750 rc = ll_get_default_mdsize(sbi, &ealen);
754 return sprintf(buf, "%u\n", ealen);
756 LUSTRE_RO_ATTR(default_easize);
758 static int ll_sbi_flags_seq_show(struct seq_file *m, void *v)
760 const char *str[] = LL_SBI_FLAGS;
761 struct super_block *sb = m->private;
762 int flags = ll_s2sbi(sb)->ll_flags;
766 if (ARRAY_SIZE(str) <= i) {
767 CERROR("%s: Revise array LL_SBI_FLAGS to match sbi flags please.\n",
768 ll_get_fsname(sb, NULL, 0));
773 seq_printf(m, "%s ", str[i]);
777 seq_printf(m, "\b\n");
781 LPROC_SEQ_FOPS_RO(ll_sbi_flags);
783 static ssize_t xattr_cache_show(struct kobject *kobj,
784 struct attribute *attr,
787 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
790 return sprintf(buf, "%u\n", sbi->ll_xattr_cache_enabled);
793 static ssize_t xattr_cache_store(struct kobject *kobj,
794 struct attribute *attr,
798 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
803 rc = kstrtoul(buffer, 10, &val);
807 if (val != 0 && val != 1)
810 if (val == 1 && !(sbi->ll_flags & LL_SBI_XATTR_CACHE))
813 sbi->ll_xattr_cache_enabled = val;
817 LUSTRE_RW_ATTR(xattr_cache);
819 static ssize_t unstable_stats_show(struct kobject *kobj,
820 struct attribute *attr,
823 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
825 struct cl_client_cache *cache = sbi->ll_cache;
828 pages = atomic_read(&cache->ccc_unstable_nr);
829 mb = (pages * PAGE_SIZE) >> 20;
831 return sprintf(buf, "unstable_pages: %8d\n"
832 "unstable_mb: %8d\n", pages, mb);
834 LUSTRE_RO_ATTR(unstable_stats);
836 static struct lprocfs_vars lprocfs_llite_obd_vars[] = {
837 /* { "mntpt_path", ll_rd_path, 0, 0 }, */
838 { "site", &ll_site_stats_fops, NULL, 0 },
839 /* { "filegroups", lprocfs_rd_filegroups, 0, 0 }, */
840 { "max_cached_mb", &ll_max_cached_mb_fops, NULL },
841 { "statahead_stats", &ll_statahead_stats_fops, NULL, 0 },
842 { "sbi_flags", &ll_sbi_flags_fops, NULL, 0 },
846 #define MAX_STRING_SIZE 128
848 static struct attribute *llite_attrs[] = {
849 &lustre_attr_blocksize.attr,
850 &lustre_attr_kbytestotal.attr,
851 &lustre_attr_kbytesfree.attr,
852 &lustre_attr_kbytesavail.attr,
853 &lustre_attr_filestotal.attr,
854 &lustre_attr_filesfree.attr,
855 &lustre_attr_client_type.attr,
856 &lustre_attr_fstype.attr,
857 &lustre_attr_uuid.attr,
858 &lustre_attr_max_read_ahead_mb.attr,
859 &lustre_attr_max_read_ahead_per_file_mb.attr,
860 &lustre_attr_max_read_ahead_whole_mb.attr,
861 &lustre_attr_checksum_pages.attr,
862 &lustre_attr_stats_track_pid.attr,
863 &lustre_attr_stats_track_ppid.attr,
864 &lustre_attr_stats_track_gid.attr,
865 &lustre_attr_statahead_max.attr,
866 &lustre_attr_statahead_agl.attr,
867 &lustre_attr_lazystatfs.attr,
868 &lustre_attr_max_easize.attr,
869 &lustre_attr_default_easize.attr,
870 &lustre_attr_xattr_cache.attr,
871 &lustre_attr_unstable_stats.attr,
875 static void llite_sb_release(struct kobject *kobj)
877 struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
879 complete(&sbi->ll_kobj_unregister);
882 static struct kobj_type llite_ktype = {
883 .default_attrs = llite_attrs,
884 .sysfs_ops = &lustre_sysfs_ops,
885 .release = llite_sb_release,
888 static const struct llite_file_opcode {
892 } llite_opcode_table[LPROC_LL_FILE_OPCODES] = {
894 { LPROC_LL_DIRTY_HITS, LPROCFS_TYPE_REGS, "dirty_pages_hits" },
895 { LPROC_LL_DIRTY_MISSES, LPROCFS_TYPE_REGS, "dirty_pages_misses" },
896 { LPROC_LL_READ_BYTES, LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_BYTES,
898 { LPROC_LL_WRITE_BYTES, LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_BYTES,
900 { LPROC_LL_BRW_READ, LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_PAGES,
902 { LPROC_LL_BRW_WRITE, LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_PAGES,
904 { LPROC_LL_OSC_READ, LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_BYTES,
906 { LPROC_LL_OSC_WRITE, LPROCFS_CNTR_AVGMINMAX|LPROCFS_TYPE_BYTES,
908 { LPROC_LL_IOCTL, LPROCFS_TYPE_REGS, "ioctl" },
909 { LPROC_LL_OPEN, LPROCFS_TYPE_REGS, "open" },
910 { LPROC_LL_RELEASE, LPROCFS_TYPE_REGS, "close" },
911 { LPROC_LL_MAP, LPROCFS_TYPE_REGS, "mmap" },
912 { LPROC_LL_LLSEEK, LPROCFS_TYPE_REGS, "seek" },
913 { LPROC_LL_FSYNC, LPROCFS_TYPE_REGS, "fsync" },
914 { LPROC_LL_READDIR, LPROCFS_TYPE_REGS, "readdir" },
915 /* inode operation */
916 { LPROC_LL_SETATTR, LPROCFS_TYPE_REGS, "setattr" },
917 { LPROC_LL_TRUNC, LPROCFS_TYPE_REGS, "truncate" },
918 { LPROC_LL_FLOCK, LPROCFS_TYPE_REGS, "flock" },
919 { LPROC_LL_GETATTR, LPROCFS_TYPE_REGS, "getattr" },
920 /* dir inode operation */
921 { LPROC_LL_CREATE, LPROCFS_TYPE_REGS, "create" },
922 { LPROC_LL_LINK, LPROCFS_TYPE_REGS, "link" },
923 { LPROC_LL_UNLINK, LPROCFS_TYPE_REGS, "unlink" },
924 { LPROC_LL_SYMLINK, LPROCFS_TYPE_REGS, "symlink" },
925 { LPROC_LL_MKDIR, LPROCFS_TYPE_REGS, "mkdir" },
926 { LPROC_LL_RMDIR, LPROCFS_TYPE_REGS, "rmdir" },
927 { LPROC_LL_MKNOD, LPROCFS_TYPE_REGS, "mknod" },
928 { LPROC_LL_RENAME, LPROCFS_TYPE_REGS, "rename" },
929 /* special inode operation */
930 { LPROC_LL_STAFS, LPROCFS_TYPE_REGS, "statfs" },
931 { LPROC_LL_ALLOC_INODE, LPROCFS_TYPE_REGS, "alloc_inode" },
932 { LPROC_LL_SETXATTR, LPROCFS_TYPE_REGS, "setxattr" },
933 { LPROC_LL_GETXATTR, LPROCFS_TYPE_REGS, "getxattr" },
934 { LPROC_LL_GETXATTR_HITS, LPROCFS_TYPE_REGS, "getxattr_hits" },
935 { LPROC_LL_LISTXATTR, LPROCFS_TYPE_REGS, "listxattr" },
936 { LPROC_LL_REMOVEXATTR, LPROCFS_TYPE_REGS, "removexattr" },
937 { LPROC_LL_INODE_PERM, LPROCFS_TYPE_REGS, "inode_permission" },
940 void ll_stats_ops_tally(struct ll_sb_info *sbi, int op, int count)
944 if (sbi->ll_stats_track_type == STATS_TRACK_ALL)
945 lprocfs_counter_add(sbi->ll_stats, op, count);
946 else if (sbi->ll_stats_track_type == STATS_TRACK_PID &&
947 sbi->ll_stats_track_id == current->pid)
948 lprocfs_counter_add(sbi->ll_stats, op, count);
949 else if (sbi->ll_stats_track_type == STATS_TRACK_PPID &&
950 sbi->ll_stats_track_id == current->real_parent->pid)
951 lprocfs_counter_add(sbi->ll_stats, op, count);
952 else if (sbi->ll_stats_track_type == STATS_TRACK_GID &&
953 sbi->ll_stats_track_id ==
954 from_kgid(&init_user_ns, current_gid()))
955 lprocfs_counter_add(sbi->ll_stats, op, count);
957 EXPORT_SYMBOL(ll_stats_ops_tally);
959 static const char *ra_stat_string[] = {
960 [RA_STAT_HIT] = "hits",
961 [RA_STAT_MISS] = "misses",
962 [RA_STAT_DISTANT_READPAGE] = "readpage not consecutive",
963 [RA_STAT_MISS_IN_WINDOW] = "miss inside window",
964 [RA_STAT_FAILED_GRAB_PAGE] = "failed grab_cache_page",
965 [RA_STAT_FAILED_MATCH] = "failed lock match",
966 [RA_STAT_DISCARDED] = "read but discarded",
967 [RA_STAT_ZERO_LEN] = "zero length file",
968 [RA_STAT_ZERO_WINDOW] = "zero size window",
969 [RA_STAT_EOF] = "read-ahead to EOF",
970 [RA_STAT_MAX_IN_FLIGHT] = "hit max r-a issue",
971 [RA_STAT_WRONG_GRAB_PAGE] = "wrong page from grab_cache_page",
972 [RA_STAT_FAILED_REACH_END] = "failed to reach end"
975 int ldebugfs_register_mountpoint(struct dentry *parent,
976 struct super_block *sb, char *osc, char *mdc)
978 struct lustre_sb_info *lsi = s2lsi(sb);
979 struct ll_sb_info *sbi = ll_s2sbi(sb);
980 struct obd_device *obd;
982 char name[MAX_STRING_SIZE + 1], *ptr;
983 int err, id, len, rc;
985 name[MAX_STRING_SIZE] = '\0';
992 len = strlen(lsi->lsi_lmd->lmd_profile);
993 ptr = strrchr(lsi->lsi_lmd->lmd_profile, '-');
994 if (ptr && (strcmp(ptr, "-client") == 0))
998 snprintf(name, MAX_STRING_SIZE, "%.*s-%p", len,
999 lsi->lsi_lmd->lmd_profile, sb);
1001 dir = ldebugfs_register(name, parent, NULL, NULL);
1002 if (IS_ERR_OR_NULL(dir)) {
1003 err = dir ? PTR_ERR(dir) : -ENOMEM;
1004 sbi->ll_debugfs_entry = NULL;
1007 sbi->ll_debugfs_entry = dir;
1009 rc = ldebugfs_seq_create(sbi->ll_debugfs_entry, "dump_page_cache", 0444,
1010 &vvp_dump_pgcache_file_ops, sbi);
1012 CWARN("Error adding the dump_page_cache file\n");
1014 rc = ldebugfs_seq_create(sbi->ll_debugfs_entry, "extents_stats", 0644,
1015 &ll_rw_extents_stats_fops, sbi);
1017 CWARN("Error adding the extent_stats file\n");
1019 rc = ldebugfs_seq_create(sbi->ll_debugfs_entry,
1020 "extents_stats_per_process",
1021 0644, &ll_rw_extents_stats_pp_fops, sbi);
1023 CWARN("Error adding the extents_stats_per_process file\n");
1025 rc = ldebugfs_seq_create(sbi->ll_debugfs_entry, "offset_stats", 0644,
1026 &ll_rw_offset_stats_fops, sbi);
1028 CWARN("Error adding the offset_stats file\n");
1030 /* File operations stats */
1031 sbi->ll_stats = lprocfs_alloc_stats(LPROC_LL_FILE_OPCODES,
1032 LPROCFS_STATS_FLAG_NONE);
1033 if (!sbi->ll_stats) {
1037 /* do counter init */
1038 for (id = 0; id < LPROC_LL_FILE_OPCODES; id++) {
1039 __u32 type = llite_opcode_table[id].type;
1042 if (type & LPROCFS_TYPE_REGS)
1044 else if (type & LPROCFS_TYPE_BYTES)
1046 else if (type & LPROCFS_TYPE_PAGES)
1048 lprocfs_counter_init(sbi->ll_stats,
1049 llite_opcode_table[id].opcode,
1050 (type & LPROCFS_CNTR_AVGMINMAX),
1051 llite_opcode_table[id].opname, ptr);
1053 err = ldebugfs_register_stats(sbi->ll_debugfs_entry, "stats",
1058 sbi->ll_ra_stats = lprocfs_alloc_stats(ARRAY_SIZE(ra_stat_string),
1059 LPROCFS_STATS_FLAG_NONE);
1060 if (!sbi->ll_ra_stats) {
1065 for (id = 0; id < ARRAY_SIZE(ra_stat_string); id++)
1066 lprocfs_counter_init(sbi->ll_ra_stats, id, 0,
1067 ra_stat_string[id], "pages");
1069 err = ldebugfs_register_stats(sbi->ll_debugfs_entry, "read_ahead_stats",
1074 err = ldebugfs_add_vars(sbi->ll_debugfs_entry,
1075 lprocfs_llite_obd_vars, sb);
1079 sbi->ll_kobj.kset = llite_kset;
1080 init_completion(&sbi->ll_kobj_unregister);
1081 err = kobject_init_and_add(&sbi->ll_kobj, &llite_ktype, NULL,
1087 obd = class_name2obd(mdc);
1089 err = sysfs_create_link(&sbi->ll_kobj, &obd->obd_kobj,
1090 obd->obd_type->typ_name);
1095 obd = class_name2obd(osc);
1097 err = sysfs_create_link(&sbi->ll_kobj, &obd->obd_kobj,
1098 obd->obd_type->typ_name);
1101 ldebugfs_remove(&sbi->ll_debugfs_entry);
1102 lprocfs_free_stats(&sbi->ll_ra_stats);
1103 lprocfs_free_stats(&sbi->ll_stats);
1108 void ldebugfs_unregister_mountpoint(struct ll_sb_info *sbi)
1110 if (sbi->ll_debugfs_entry) {
1111 ldebugfs_remove(&sbi->ll_debugfs_entry);
1112 kobject_put(&sbi->ll_kobj);
1113 wait_for_completion(&sbi->ll_kobj_unregister);
1114 lprocfs_free_stats(&sbi->ll_ra_stats);
1115 lprocfs_free_stats(&sbi->ll_stats);
1119 #undef MAX_STRING_SIZE
1121 #define pct(a, b) (b ? a * 100 / b : 0)
1123 static void ll_display_extents_info(struct ll_rw_extents_info *io_extents,
1124 struct seq_file *seq, int which)
1126 unsigned long read_tot = 0, write_tot = 0, read_cum, write_cum;
1127 unsigned long start, end, r, w;
1128 char *unitp = "KMGTPEZY";
1130 struct per_process_info *pp_info = &io_extents->pp_extents[which];
1136 for (i = 0; i < LL_HIST_MAX; i++) {
1137 read_tot += pp_info->pp_r_hist.oh_buckets[i];
1138 write_tot += pp_info->pp_w_hist.oh_buckets[i];
1141 for (i = 0; i < LL_HIST_MAX; i++) {
1142 r = pp_info->pp_r_hist.oh_buckets[i];
1143 w = pp_info->pp_w_hist.oh_buckets[i];
1146 end = 1 << (i + LL_HIST_START - units);
1147 seq_printf(seq, "%4lu%c - %4lu%c%c: %14lu %4lu %4lu | %14lu %4lu %4lu\n",
1148 start, *unitp, end, *unitp,
1149 (i == LL_HIST_MAX - 1) ? '+' : ' ',
1150 r, pct(r, read_tot), pct(read_cum, read_tot),
1151 w, pct(w, write_tot), pct(write_cum, write_tot));
1153 if (start == 1<<10) {
1158 if (read_cum == read_tot && write_cum == write_tot)
1163 static int ll_rw_extents_stats_pp_seq_show(struct seq_file *seq, void *v)
1165 struct timespec64 now;
1166 struct ll_sb_info *sbi = seq->private;
1167 struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1170 ktime_get_real_ts64(&now);
1172 if (!sbi->ll_rw_stats_on) {
1173 seq_printf(seq, "disabled\n"
1174 "write anything in this file to activate, then 0 or \"[D/d]isabled\" to deactivate\n");
1177 seq_printf(seq, "snapshot_time: %llu.%09lu (secs.usecs)\n",
1178 (s64)now.tv_sec, (unsigned long)now.tv_nsec);
1179 seq_printf(seq, "%15s %19s | %20s\n", " ", "read", "write");
1180 seq_printf(seq, "%13s %14s %4s %4s | %14s %4s %4s\n",
1181 "extents", "calls", "%", "cum%",
1182 "calls", "%", "cum%");
1183 spin_lock(&sbi->ll_pp_extent_lock);
1184 for (k = 0; k < LL_PROCESS_HIST_MAX; k++) {
1185 if (io_extents->pp_extents[k].pid != 0) {
1186 seq_printf(seq, "\nPID: %d\n",
1187 io_extents->pp_extents[k].pid);
1188 ll_display_extents_info(io_extents, seq, k);
1191 spin_unlock(&sbi->ll_pp_extent_lock);
1195 static ssize_t ll_rw_extents_stats_pp_seq_write(struct file *file,
1196 const char __user *buf,
1200 struct seq_file *seq = file->private_data;
1201 struct ll_sb_info *sbi = seq->private;
1202 struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1204 int value = 1, rc = 0;
1209 rc = lprocfs_write_helper(buf, len, &value);
1210 if (rc < 0 && len < 16) {
1213 if (copy_from_user(kernbuf, buf, len))
1217 if (kernbuf[len - 1] == '\n')
1218 kernbuf[len - 1] = 0;
1220 if (strcmp(kernbuf, "disabled") == 0 ||
1221 strcmp(kernbuf, "Disabled") == 0)
1226 sbi->ll_rw_stats_on = 0;
1228 sbi->ll_rw_stats_on = 1;
1230 spin_lock(&sbi->ll_pp_extent_lock);
1231 for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
1232 io_extents->pp_extents[i].pid = 0;
1233 lprocfs_oh_clear(&io_extents->pp_extents[i].pp_r_hist);
1234 lprocfs_oh_clear(&io_extents->pp_extents[i].pp_w_hist);
1236 spin_unlock(&sbi->ll_pp_extent_lock);
1240 LPROC_SEQ_FOPS(ll_rw_extents_stats_pp);
1242 static int ll_rw_extents_stats_seq_show(struct seq_file *seq, void *v)
1244 struct timespec64 now;
1245 struct ll_sb_info *sbi = seq->private;
1246 struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1248 ktime_get_real_ts64(&now);
1250 if (!sbi->ll_rw_stats_on) {
1251 seq_printf(seq, "disabled\n"
1252 "write anything in this file to activate, then 0 or \"[D/d]isabled\" to deactivate\n");
1255 seq_printf(seq, "snapshot_time: %llu.%09lu (secs.usecs)\n",
1256 (u64)now.tv_sec, (unsigned long)now.tv_nsec);
1258 seq_printf(seq, "%15s %19s | %20s\n", " ", "read", "write");
1259 seq_printf(seq, "%13s %14s %4s %4s | %14s %4s %4s\n",
1260 "extents", "calls", "%", "cum%",
1261 "calls", "%", "cum%");
1262 spin_lock(&sbi->ll_lock);
1263 ll_display_extents_info(io_extents, seq, LL_PROCESS_HIST_MAX);
1264 spin_unlock(&sbi->ll_lock);
1269 static ssize_t ll_rw_extents_stats_seq_write(struct file *file,
1270 const char __user *buf,
1271 size_t len, loff_t *off)
1273 struct seq_file *seq = file->private_data;
1274 struct ll_sb_info *sbi = seq->private;
1275 struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1277 int value = 1, rc = 0;
1282 rc = lprocfs_write_helper(buf, len, &value);
1283 if (rc < 0 && len < 16) {
1286 if (copy_from_user(kernbuf, buf, len))
1290 if (kernbuf[len - 1] == '\n')
1291 kernbuf[len - 1] = 0;
1293 if (strcmp(kernbuf, "disabled") == 0 ||
1294 strcmp(kernbuf, "Disabled") == 0)
1299 sbi->ll_rw_stats_on = 0;
1301 sbi->ll_rw_stats_on = 1;
1303 spin_lock(&sbi->ll_pp_extent_lock);
1304 for (i = 0; i <= LL_PROCESS_HIST_MAX; i++) {
1305 io_extents->pp_extents[i].pid = 0;
1306 lprocfs_oh_clear(&io_extents->pp_extents[i].pp_r_hist);
1307 lprocfs_oh_clear(&io_extents->pp_extents[i].pp_w_hist);
1309 spin_unlock(&sbi->ll_pp_extent_lock);
1314 LPROC_SEQ_FOPS(ll_rw_extents_stats);
1316 void ll_rw_stats_tally(struct ll_sb_info *sbi, pid_t pid,
1317 struct ll_file_data *file, loff_t pos,
1318 size_t count, int rw)
1321 struct ll_rw_process_info *process;
1322 struct ll_rw_process_info *offset;
1323 int *off_count = &sbi->ll_rw_offset_entry_count;
1324 int *process_count = &sbi->ll_offset_process_count;
1325 struct ll_rw_extents_info *io_extents = &sbi->ll_rw_extents_info;
1327 if (!sbi->ll_rw_stats_on)
1329 process = sbi->ll_rw_process_info;
1330 offset = sbi->ll_rw_offset_info;
1332 spin_lock(&sbi->ll_pp_extent_lock);
1333 /* Extent statistics */
1334 for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
1335 if (io_extents->pp_extents[i].pid == pid) {
1343 sbi->ll_extent_process_count =
1344 (sbi->ll_extent_process_count + 1) % LL_PROCESS_HIST_MAX;
1345 cur = sbi->ll_extent_process_count;
1346 io_extents->pp_extents[cur].pid = pid;
1347 lprocfs_oh_clear(&io_extents->pp_extents[cur].pp_r_hist);
1348 lprocfs_oh_clear(&io_extents->pp_extents[cur].pp_w_hist);
1351 for (i = 0; (count >= (1 << LL_HIST_START << i)) &&
1352 (i < (LL_HIST_MAX - 1)); i++)
1355 io_extents->pp_extents[cur].pp_r_hist.oh_buckets[i]++;
1356 io_extents->pp_extents[LL_PROCESS_HIST_MAX].pp_r_hist.oh_buckets[i]++;
1358 io_extents->pp_extents[cur].pp_w_hist.oh_buckets[i]++;
1359 io_extents->pp_extents[LL_PROCESS_HIST_MAX].pp_w_hist.oh_buckets[i]++;
1361 spin_unlock(&sbi->ll_pp_extent_lock);
1363 spin_lock(&sbi->ll_process_lock);
1364 /* Offset statistics */
1365 for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
1366 if (process[i].rw_pid == pid) {
1367 if (process[i].rw_last_file != file) {
1368 process[i].rw_range_start = pos;
1369 process[i].rw_last_file_pos = pos + count;
1370 process[i].rw_smallest_extent = count;
1371 process[i].rw_largest_extent = count;
1372 process[i].rw_offset = 0;
1373 process[i].rw_last_file = file;
1374 spin_unlock(&sbi->ll_process_lock);
1377 if (process[i].rw_last_file_pos != pos) {
1379 (*off_count + 1) % LL_OFFSET_HIST_MAX;
1380 offset[*off_count].rw_op = process[i].rw_op;
1381 offset[*off_count].rw_pid = pid;
1382 offset[*off_count].rw_range_start =
1383 process[i].rw_range_start;
1384 offset[*off_count].rw_range_end =
1385 process[i].rw_last_file_pos;
1386 offset[*off_count].rw_smallest_extent =
1387 process[i].rw_smallest_extent;
1388 offset[*off_count].rw_largest_extent =
1389 process[i].rw_largest_extent;
1390 offset[*off_count].rw_offset =
1391 process[i].rw_offset;
1392 process[i].rw_op = rw;
1393 process[i].rw_range_start = pos;
1394 process[i].rw_smallest_extent = count;
1395 process[i].rw_largest_extent = count;
1396 process[i].rw_offset = pos -
1397 process[i].rw_last_file_pos;
1399 if (process[i].rw_smallest_extent > count)
1400 process[i].rw_smallest_extent = count;
1401 if (process[i].rw_largest_extent < count)
1402 process[i].rw_largest_extent = count;
1403 process[i].rw_last_file_pos = pos + count;
1404 spin_unlock(&sbi->ll_process_lock);
1408 *process_count = (*process_count + 1) % LL_PROCESS_HIST_MAX;
1409 process[*process_count].rw_pid = pid;
1410 process[*process_count].rw_op = rw;
1411 process[*process_count].rw_range_start = pos;
1412 process[*process_count].rw_last_file_pos = pos + count;
1413 process[*process_count].rw_smallest_extent = count;
1414 process[*process_count].rw_largest_extent = count;
1415 process[*process_count].rw_offset = 0;
1416 process[*process_count].rw_last_file = file;
1417 spin_unlock(&sbi->ll_process_lock);
1420 static int ll_rw_offset_stats_seq_show(struct seq_file *seq, void *v)
1422 struct timespec64 now;
1423 struct ll_sb_info *sbi = seq->private;
1424 struct ll_rw_process_info *offset = sbi->ll_rw_offset_info;
1425 struct ll_rw_process_info *process = sbi->ll_rw_process_info;
1428 ktime_get_real_ts64(&now);
1430 if (!sbi->ll_rw_stats_on) {
1431 seq_printf(seq, "disabled\n"
1432 "write anything in this file to activate, then 0 or \"[D/d]isabled\" to deactivate\n");
1435 spin_lock(&sbi->ll_process_lock);
1437 seq_printf(seq, "snapshot_time: %llu.%09lu (secs.usecs)\n",
1438 (s64)now.tv_sec, (unsigned long)now.tv_nsec);
1439 seq_printf(seq, "%3s %10s %14s %14s %17s %17s %14s\n",
1440 "R/W", "PID", "RANGE START", "RANGE END",
1441 "SMALLEST EXTENT", "LARGEST EXTENT", "OFFSET");
1442 /* We stored the discontiguous offsets here; print them first */
1443 for (i = 0; i < LL_OFFSET_HIST_MAX; i++) {
1444 if (offset[i].rw_pid != 0)
1446 "%3c %10d %14Lu %14Lu %17lu %17lu %14Lu",
1447 offset[i].rw_op == READ ? 'R' : 'W',
1449 offset[i].rw_range_start,
1450 offset[i].rw_range_end,
1451 (unsigned long)offset[i].rw_smallest_extent,
1452 (unsigned long)offset[i].rw_largest_extent,
1453 offset[i].rw_offset);
1455 /* Then print the current offsets for each process */
1456 for (i = 0; i < LL_PROCESS_HIST_MAX; i++) {
1457 if (process[i].rw_pid != 0)
1459 "%3c %10d %14Lu %14Lu %17lu %17lu %14Lu",
1460 process[i].rw_op == READ ? 'R' : 'W',
1462 process[i].rw_range_start,
1463 process[i].rw_last_file_pos,
1464 (unsigned long)process[i].rw_smallest_extent,
1465 (unsigned long)process[i].rw_largest_extent,
1466 process[i].rw_offset);
1468 spin_unlock(&sbi->ll_process_lock);
1473 static ssize_t ll_rw_offset_stats_seq_write(struct file *file,
1474 const char __user *buf,
1475 size_t len, loff_t *off)
1477 struct seq_file *seq = file->private_data;
1478 struct ll_sb_info *sbi = seq->private;
1479 struct ll_rw_process_info *process_info = sbi->ll_rw_process_info;
1480 struct ll_rw_process_info *offset_info = sbi->ll_rw_offset_info;
1481 int value = 1, rc = 0;
1486 rc = lprocfs_write_helper(buf, len, &value);
1488 if (rc < 0 && len < 16) {
1491 if (copy_from_user(kernbuf, buf, len))
1495 if (kernbuf[len - 1] == '\n')
1496 kernbuf[len - 1] = 0;
1498 if (strcmp(kernbuf, "disabled") == 0 ||
1499 strcmp(kernbuf, "Disabled") == 0)
1504 sbi->ll_rw_stats_on = 0;
1506 sbi->ll_rw_stats_on = 1;
1508 spin_lock(&sbi->ll_process_lock);
1509 sbi->ll_offset_process_count = 0;
1510 sbi->ll_rw_offset_entry_count = 0;
1511 memset(process_info, 0, sizeof(struct ll_rw_process_info) *
1512 LL_PROCESS_HIST_MAX);
1513 memset(offset_info, 0, sizeof(struct ll_rw_process_info) *
1514 LL_OFFSET_HIST_MAX);
1515 spin_unlock(&sbi->ll_process_lock);
1520 LPROC_SEQ_FOPS(ll_rw_offset_stats);
1522 void lprocfs_llite_init_vars(struct lprocfs_static_vars *lvars)
1524 lvars->obd_vars = lprocfs_llite_obd_vars;