4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2015, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/include/lustre/lustre_idl.h
38 * Lustre wire protocol definitions.
41 /** \defgroup lustreidl lustreidl
43 * Lustre wire protocol definitions.
45 * ALL structs passing over the wire should be declared here. Structs
46 * that are used in interfaces with userspace should go in lustre_user.h.
48 * All structs being declared here should be built from simple fixed-size
49 * types (__u8, __u16, __u32, __u64) or be built from other types or
50 * structs also declared in this file. Similarly, all flags and magic
51 * values in those structs should also be declared here. This ensures
52 * that the Lustre wire protocol is not influenced by external dependencies.
54 * The only other acceptable items in this file are VERY SIMPLE accessor
55 * functions to avoid callers grubbing inside the structures, and the
56 * prototypes of the swabber functions for each struct. Nothing that
57 * depends on external functions or definitions should be in here.
59 * Structs must be properly aligned to put 64-bit values on an 8-byte
60 * boundary. Any structs being added here must also be added to
61 * utils/wirecheck.c and "make newwiretest" run to regenerate the
62 * utils/wiretest.c sources. This allows us to verify that wire structs
63 * have the proper alignment/size on all architectures.
65 * DO NOT CHANGE any of the structs, flags, values declared here and used
66 * in released Lustre versions. Some structs may have padding fields that
67 * can be used. Some structs might allow addition at the end (verify this
68 * in the code to ensure that new/old clients that see this larger struct
69 * do not fail, otherwise you need to implement protocol compatibility).
71 * We assume all nodes are either little-endian or big-endian, and we
72 * always send messages in the sender's native format. The receiver
73 * detects the message format by checking the 'magic' field of the message
74 * (see lustre_msg_swabbed() below).
76 * Each wire type has corresponding 'lustre_swab_xxxtypexxx()' routines,
77 * implemented either here, inline (trivial implementations) or in
78 * ptlrpc/pack_generic.c. These 'swabbers' convert the type from "other"
79 * endian, in-place in the message buffer.
81 * A swabber takes a single pointer argument. The caller must already have
82 * verified that the length of the message buffer >= sizeof (type).
84 * For variable length types, a second 'lustre_swab_v_xxxtypexxx()' routine
85 * may be defined that swabs just the variable part, after the caller has
86 * verified that the message buffer is large enough.
91 #ifndef _LUSTRE_IDL_H_
92 #define _LUSTRE_IDL_H_
94 #include "../../../include/linux/libcfs/libcfs.h"
95 #include "../../../include/linux/lnet/types.h"
97 /* Defn's shared with user-space. */
98 #include "lustre_user.h"
99 #include "lustre_errno.h"
104 /* FOO_REQUEST_PORTAL is for incoming requests on the FOO
105 * FOO_REPLY_PORTAL is for incoming replies on the FOO
106 * FOO_BULK_PORTAL is for incoming bulk on the FOO
109 /* Lustre service names are following the format
110 * service name + MDT + seq name
112 #define LUSTRE_MDT_MAXNAMELEN 80
114 #define CONNMGR_REQUEST_PORTAL 1
115 #define CONNMGR_REPLY_PORTAL 2
116 /*#define OSC_REQUEST_PORTAL 3 */
117 #define OSC_REPLY_PORTAL 4
118 /*#define OSC_BULK_PORTAL 5 */
119 #define OST_IO_PORTAL 6
120 #define OST_CREATE_PORTAL 7
121 #define OST_BULK_PORTAL 8
122 /*#define MDC_REQUEST_PORTAL 9 */
123 #define MDC_REPLY_PORTAL 10
124 /*#define MDC_BULK_PORTAL 11 */
125 #define MDS_REQUEST_PORTAL 12
126 /*#define MDS_REPLY_PORTAL 13 */
127 #define MDS_BULK_PORTAL 14
128 #define LDLM_CB_REQUEST_PORTAL 15
129 #define LDLM_CB_REPLY_PORTAL 16
130 #define LDLM_CANCEL_REQUEST_PORTAL 17
131 #define LDLM_CANCEL_REPLY_PORTAL 18
132 /*#define PTLBD_REQUEST_PORTAL 19 */
133 /*#define PTLBD_REPLY_PORTAL 20 */
134 /*#define PTLBD_BULK_PORTAL 21 */
135 #define MDS_SETATTR_PORTAL 22
136 #define MDS_READPAGE_PORTAL 23
137 #define OUT_PORTAL 24
139 #define MGC_REPLY_PORTAL 25
140 #define MGS_REQUEST_PORTAL 26
141 #define MGS_REPLY_PORTAL 27
142 #define OST_REQUEST_PORTAL 28
143 #define FLD_REQUEST_PORTAL 29
144 #define SEQ_METADATA_PORTAL 30
145 #define SEQ_DATA_PORTAL 31
146 #define SEQ_CONTROLLER_PORTAL 32
147 #define MGS_BULK_PORTAL 33
149 /* Portal 63 is reserved for the Cray Inc DVS - nic@cray.com, roe@cray.com,
154 #define PTL_RPC_MSG_REQUEST 4711
155 #define PTL_RPC_MSG_ERR 4712
156 #define PTL_RPC_MSG_REPLY 4713
158 /* DON'T use swabbed values of MAGIC as magic! */
159 #define LUSTRE_MSG_MAGIC_V2 0x0BD00BD3
160 #define LUSTRE_MSG_MAGIC_V2_SWABBED 0xD30BD00B
162 #define LUSTRE_MSG_MAGIC LUSTRE_MSG_MAGIC_V2
164 #define PTLRPC_MSG_VERSION 0x00000003
165 #define LUSTRE_VERSION_MASK 0xffff0000
166 #define LUSTRE_OBD_VERSION 0x00010000
167 #define LUSTRE_MDS_VERSION 0x00020000
168 #define LUSTRE_OST_VERSION 0x00030000
169 #define LUSTRE_DLM_VERSION 0x00040000
170 #define LUSTRE_LOG_VERSION 0x00050000
171 #define LUSTRE_MGS_VERSION 0x00060000
174 * Describes a range of sequence, lsr_start is included but lsr_end is
176 * Same structure is used in fld module where lsr_index field holds mdt id
179 struct lu_seq_range {
186 struct lu_seq_range_array {
189 struct lu_seq_range lsra_lsr[0];
192 #define LU_SEQ_RANGE_MDT 0x0
193 #define LU_SEQ_RANGE_OST 0x1
194 #define LU_SEQ_RANGE_ANY 0x3
196 #define LU_SEQ_RANGE_MASK 0x3
198 static inline unsigned fld_range_type(const struct lu_seq_range *range)
200 return range->lsr_flags & LU_SEQ_RANGE_MASK;
203 static inline int fld_range_is_ost(const struct lu_seq_range *range)
205 return fld_range_type(range) == LU_SEQ_RANGE_OST;
208 static inline int fld_range_is_mdt(const struct lu_seq_range *range)
210 return fld_range_type(range) == LU_SEQ_RANGE_MDT;
214 * This all range is only being used when fld client sends fld query request,
215 * but it does not know whether the seq is MDT or OST, so it will send req
216 * with ALL type, which means either seq type gotten from lookup can be
219 static inline unsigned fld_range_is_any(const struct lu_seq_range *range)
221 return fld_range_type(range) == LU_SEQ_RANGE_ANY;
224 static inline void fld_range_set_type(struct lu_seq_range *range,
227 range->lsr_flags |= flags;
230 static inline void fld_range_set_mdt(struct lu_seq_range *range)
232 fld_range_set_type(range, LU_SEQ_RANGE_MDT);
235 static inline void fld_range_set_ost(struct lu_seq_range *range)
237 fld_range_set_type(range, LU_SEQ_RANGE_OST);
240 static inline void fld_range_set_any(struct lu_seq_range *range)
242 fld_range_set_type(range, LU_SEQ_RANGE_ANY);
246 * returns width of given range \a r
249 static inline __u64 range_space(const struct lu_seq_range *range)
251 return range->lsr_end - range->lsr_start;
255 * initialize range to zero
258 static inline void range_init(struct lu_seq_range *range)
260 memset(range, 0, sizeof(*range));
264 * check if given seq id \a s is within given range \a r
267 static inline int range_within(const struct lu_seq_range *range,
270 return s >= range->lsr_start && s < range->lsr_end;
273 static inline int range_is_sane(const struct lu_seq_range *range)
275 return (range->lsr_end >= range->lsr_start);
278 static inline int range_is_zero(const struct lu_seq_range *range)
280 return (range->lsr_start == 0 && range->lsr_end == 0);
283 static inline int range_is_exhausted(const struct lu_seq_range *range)
286 return range_space(range) == 0;
289 /* return 0 if two range have the same location */
290 static inline int range_compare_loc(const struct lu_seq_range *r1,
291 const struct lu_seq_range *r2)
293 return r1->lsr_index != r2->lsr_index ||
294 r1->lsr_flags != r2->lsr_flags;
297 #define DRANGE "[%#16.16Lx-%#16.16Lx):%x:%s"
299 #define PRANGE(range) \
300 (range)->lsr_start, \
302 (range)->lsr_index, \
303 fld_range_is_mdt(range) ? "mdt" : "ost"
305 /** \defgroup lu_fid lu_fid
310 * Flags for lustre_mdt_attrs::lma_compat and lustre_mdt_attrs::lma_incompat.
311 * Deprecated since HSM and SOM attributes are now stored in separate on-disk
315 LMAC_HSM = 0x00000001,
316 LMAC_SOM = 0x00000002,
317 LMAC_NOT_IN_OI = 0x00000004, /* the object does NOT need OI mapping */
318 LMAC_FID_ON_OST = 0x00000008, /* For OST-object, its OI mapping is
319 * under /O/<seq>/d<x>.
324 * Masks for all features that should be supported by a Lustre version to
325 * access a specific file.
326 * This information is stored in lustre_mdt_attrs::lma_incompat.
329 LMAI_RELEASED = 0x00000001, /* file is released */
330 LMAI_AGENT = 0x00000002, /* agent inode */
331 LMAI_REMOTE_PARENT = 0x00000004, /* the parent of the object
332 * is on the remote MDT
336 #define LMA_INCOMPAT_SUPP (LMAI_AGENT | LMAI_REMOTE_PARENT)
342 /** LASTID file has zero OID */
343 LUSTRE_FID_LASTID_OID = 0UL,
344 /** initial fid id value */
345 LUSTRE_FID_INIT_OID = 1UL
348 /** returns fid object sequence */
349 static inline __u64 fid_seq(const struct lu_fid *fid)
354 /** returns fid object id */
355 static inline __u32 fid_oid(const struct lu_fid *fid)
360 /** returns fid object version */
361 static inline __u32 fid_ver(const struct lu_fid *fid)
366 static inline void fid_zero(struct lu_fid *fid)
368 memset(fid, 0, sizeof(*fid));
371 static inline __u64 fid_ver_oid(const struct lu_fid *fid)
373 return ((__u64)fid_ver(fid) << 32 | fid_oid(fid));
376 /* copytool uses a 32b bitmask field to encode archive-Ids during register
378 * archive num = 0 => all
379 * archive num from 1 to 32
381 #define LL_HSM_MAX_ARCHIVE (sizeof(__u32) * 8)
384 * Note that reserved SEQ numbers below 12 will conflict with ldiskfs
385 * inodes in the IGIF namespace, so these reserved SEQ numbers can be
386 * used for other purposes and not risk collisions with existing inodes.
388 * Different FID Format
389 * http://arch.lustre.org/index.php?title=Interoperability_fids_zfs#NEW.0
392 FID_SEQ_OST_MDT0 = 0,
393 FID_SEQ_LLOG = 1, /* unnamed llogs */
395 FID_SEQ_OST_MDT1 = 3,
396 FID_SEQ_OST_MAX = 9, /* Max MDT count before OST_on_FID */
397 FID_SEQ_LLOG_NAME = 10, /* named llogs */
400 FID_SEQ_IGIF_MAX = 0x0ffffffffULL,
401 FID_SEQ_IDIF = 0x100000000ULL,
402 FID_SEQ_IDIF_MAX = 0x1ffffffffULL,
403 /* Normal FID sequence starts from this value, i.e. 1<<33 */
404 FID_SEQ_START = 0x200000000ULL,
405 /* sequence for local pre-defined FIDs listed in local_oid */
406 FID_SEQ_LOCAL_FILE = 0x200000001ULL,
407 FID_SEQ_DOT_LUSTRE = 0x200000002ULL,
408 /* sequence is used for local named objects FIDs generated
409 * by local_object_storage library
411 FID_SEQ_LOCAL_NAME = 0x200000003ULL,
412 /* Because current FLD will only cache the fid sequence, instead
413 * of oid on the client side, if the FID needs to be exposed to
414 * clients sides, it needs to make sure all of fids under one
415 * sequence will be located in one MDT.
417 FID_SEQ_SPECIAL = 0x200000004ULL,
418 FID_SEQ_QUOTA = 0x200000005ULL,
419 FID_SEQ_QUOTA_GLB = 0x200000006ULL,
420 FID_SEQ_ROOT = 0x200000007ULL, /* Located on MDT0 */
421 FID_SEQ_NORMAL = 0x200000400ULL,
422 FID_SEQ_LOV_DEFAULT = 0xffffffffffffffffULL
425 #define OBIF_OID_MAX_BITS 32
426 #define OBIF_MAX_OID (1ULL << OBIF_OID_MAX_BITS)
427 #define OBIF_OID_MASK ((1ULL << OBIF_OID_MAX_BITS) - 1)
428 #define IDIF_OID_MAX_BITS 48
429 #define IDIF_MAX_OID (1ULL << IDIF_OID_MAX_BITS)
430 #define IDIF_OID_MASK ((1ULL << IDIF_OID_MAX_BITS) - 1)
432 /** OID for FID_SEQ_SPECIAL */
434 /* Big Filesystem Lock to serialize rename operations */
435 FID_OID_SPECIAL_BFL = 1UL,
438 /** OID for FID_SEQ_DOT_LUSTRE */
439 enum dot_lustre_oid {
440 FID_OID_DOT_LUSTRE = 1UL,
441 FID_OID_DOT_LUSTRE_OBF = 2UL,
444 static inline int fid_seq_is_mdt0(__u64 seq)
446 return (seq == FID_SEQ_OST_MDT0);
449 static inline int fid_seq_is_mdt(const __u64 seq)
451 return seq == FID_SEQ_OST_MDT0 || seq >= FID_SEQ_NORMAL;
454 static inline int fid_seq_is_echo(__u64 seq)
456 return (seq == FID_SEQ_ECHO);
459 static inline int fid_is_echo(const struct lu_fid *fid)
461 return fid_seq_is_echo(fid_seq(fid));
464 static inline int fid_seq_is_llog(__u64 seq)
466 return (seq == FID_SEQ_LLOG);
469 static inline int fid_is_llog(const struct lu_fid *fid)
471 /* file with OID == 0 is not llog but contains last oid */
472 return fid_seq_is_llog(fid_seq(fid)) && fid_oid(fid) > 0;
475 static inline int fid_seq_is_rsvd(const __u64 seq)
477 return (seq > FID_SEQ_OST_MDT0 && seq <= FID_SEQ_RSVD);
480 static inline int fid_seq_is_special(const __u64 seq)
482 return seq == FID_SEQ_SPECIAL;
485 static inline int fid_seq_is_local_file(const __u64 seq)
487 return seq == FID_SEQ_LOCAL_FILE ||
488 seq == FID_SEQ_LOCAL_NAME;
491 static inline int fid_seq_is_root(const __u64 seq)
493 return seq == FID_SEQ_ROOT;
496 static inline int fid_seq_is_dot(const __u64 seq)
498 return seq == FID_SEQ_DOT_LUSTRE;
501 static inline int fid_seq_is_default(const __u64 seq)
503 return seq == FID_SEQ_LOV_DEFAULT;
506 static inline int fid_is_mdt0(const struct lu_fid *fid)
508 return fid_seq_is_mdt0(fid_seq(fid));
511 static inline void lu_root_fid(struct lu_fid *fid)
513 fid->f_seq = FID_SEQ_ROOT;
519 * Check if a fid is igif or not.
520 * \param fid the fid to be tested.
521 * \return true if the fid is a igif; otherwise false.
523 static inline int fid_seq_is_igif(const __u64 seq)
525 return seq >= FID_SEQ_IGIF && seq <= FID_SEQ_IGIF_MAX;
528 static inline int fid_is_igif(const struct lu_fid *fid)
530 return fid_seq_is_igif(fid_seq(fid));
534 * Check if a fid is idif or not.
535 * \param fid the fid to be tested.
536 * \return true if the fid is a idif; otherwise false.
538 static inline int fid_seq_is_idif(const __u64 seq)
540 return seq >= FID_SEQ_IDIF && seq <= FID_SEQ_IDIF_MAX;
543 static inline int fid_is_idif(const struct lu_fid *fid)
545 return fid_seq_is_idif(fid_seq(fid));
548 static inline int fid_is_local_file(const struct lu_fid *fid)
550 return fid_seq_is_local_file(fid_seq(fid));
553 static inline int fid_seq_is_norm(const __u64 seq)
555 return (seq >= FID_SEQ_NORMAL);
558 static inline int fid_is_norm(const struct lu_fid *fid)
560 return fid_seq_is_norm(fid_seq(fid));
563 /* convert an OST objid into an IDIF FID SEQ number */
564 static inline __u64 fid_idif_seq(__u64 id, __u32 ost_idx)
566 return FID_SEQ_IDIF | (ost_idx << 16) | ((id >> 32) & 0xffff);
569 /* convert a packed IDIF FID into an OST objid */
570 static inline __u64 fid_idif_id(__u64 seq, __u32 oid, __u32 ver)
572 return ((__u64)ver << 48) | ((seq & 0xffff) << 32) | oid;
575 /* extract ost index from IDIF FID */
576 static inline __u32 fid_idif_ost_idx(const struct lu_fid *fid)
578 return (fid_seq(fid) >> 16) & 0xffff;
581 /* extract OST sequence (group) from a wire ost_id (id/seq) pair */
582 static inline __u64 ostid_seq(const struct ost_id *ostid)
584 if (fid_seq_is_mdt0(ostid->oi.oi_seq))
585 return FID_SEQ_OST_MDT0;
587 if (unlikely(fid_seq_is_default(ostid->oi.oi_seq)))
588 return FID_SEQ_LOV_DEFAULT;
590 if (fid_is_idif(&ostid->oi_fid))
591 return FID_SEQ_OST_MDT0;
593 return fid_seq(&ostid->oi_fid);
596 /* extract OST objid from a wire ost_id (id/seq) pair */
597 static inline __u64 ostid_id(const struct ost_id *ostid)
599 if (fid_seq_is_mdt0(ostid->oi.oi_seq))
600 return ostid->oi.oi_id & IDIF_OID_MASK;
602 if (unlikely(fid_seq_is_default(ostid->oi.oi_seq)))
603 return ostid->oi.oi_id;
605 if (fid_is_idif(&ostid->oi_fid))
606 return fid_idif_id(fid_seq(&ostid->oi_fid),
607 fid_oid(&ostid->oi_fid), 0);
609 return fid_oid(&ostid->oi_fid);
612 static inline void ostid_set_seq(struct ost_id *oi, __u64 seq)
614 if (fid_seq_is_mdt0(seq) || fid_seq_is_default(seq)) {
617 oi->oi_fid.f_seq = seq;
618 /* Note: if f_oid + f_ver is zero, we need init it
619 * to be 1, otherwise, ostid_seq will treat this
620 * as old ostid (oi_seq == 0)
622 if (oi->oi_fid.f_oid == 0 && oi->oi_fid.f_ver == 0)
623 oi->oi_fid.f_oid = LUSTRE_FID_INIT_OID;
627 static inline void ostid_set_seq_mdt0(struct ost_id *oi)
629 ostid_set_seq(oi, FID_SEQ_OST_MDT0);
632 static inline void ostid_set_seq_echo(struct ost_id *oi)
634 ostid_set_seq(oi, FID_SEQ_ECHO);
637 static inline void ostid_set_seq_llog(struct ost_id *oi)
639 ostid_set_seq(oi, FID_SEQ_LLOG);
643 * Note: we need check oi_seq to decide where to set oi_id,
644 * so oi_seq should always be set ahead of oi_id.
646 static inline void ostid_set_id(struct ost_id *oi, __u64 oid)
648 if (fid_seq_is_mdt0(oi->oi.oi_seq)) {
649 if (oid >= IDIF_MAX_OID) {
650 CERROR("Bad %llu to set " DOSTID "\n", oid, POSTID(oi));
654 } else if (fid_is_idif(&oi->oi_fid)) {
655 if (oid >= IDIF_MAX_OID) {
656 CERROR("Bad %llu to set "DOSTID"\n",
660 oi->oi_fid.f_seq = fid_idif_seq(oid,
661 fid_idif_ost_idx(&oi->oi_fid));
662 oi->oi_fid.f_oid = oid;
663 oi->oi_fid.f_ver = oid >> 48;
665 if (oid > OBIF_MAX_OID) {
666 CERROR("Bad %llu to set " DOSTID "\n", oid, POSTID(oi));
669 oi->oi_fid.f_oid = oid;
673 static inline int fid_set_id(struct lu_fid *fid, __u64 oid)
675 if (unlikely(fid_seq_is_igif(fid->f_seq))) {
676 CERROR("bad IGIF, "DFID"\n", PFID(fid));
680 if (fid_is_idif(fid)) {
681 if (oid >= IDIF_MAX_OID) {
682 CERROR("Too large OID %#llx to set IDIF "DFID"\n",
683 (unsigned long long)oid, PFID(fid));
686 fid->f_seq = fid_idif_seq(oid, fid_idif_ost_idx(fid));
688 fid->f_ver = oid >> 48;
690 if (oid > OBIF_MAX_OID) {
691 CERROR("Too large OID %#llx to set REG "DFID"\n",
692 (unsigned long long)oid, PFID(fid));
701 * Unpack an OST object id/seq (group) into a FID. This is needed for
702 * converting all obdo, lmm, lsm, etc. 64-bit id/seq pairs into proper
703 * FIDs. Note that if an id/seq is already in FID/IDIF format it will
704 * be passed through unchanged. Only legacy OST objects in "group 0"
705 * will be mapped into the IDIF namespace so that they can fit into the
706 * struct lu_fid fields without loss. For reference see:
707 * http://arch.lustre.org/index.php?title=Interoperability_fids_zfs
709 static inline int ostid_to_fid(struct lu_fid *fid, struct ost_id *ostid,
712 __u64 seq = ostid_seq(ostid);
714 if (ost_idx > 0xffff) {
715 CERROR("bad ost_idx, "DOSTID" ost_idx:%u\n", POSTID(ostid),
720 if (fid_seq_is_mdt0(seq)) {
721 __u64 oid = ostid_id(ostid);
723 /* This is a "legacy" (old 1.x/2.early) OST object in "group 0"
724 * that we map into the IDIF namespace. It allows up to 2^48
725 * objects per OST, as this is the object namespace that has
726 * been in production for years. This can handle create rates
727 * of 1M objects/s/OST for 9 years, or combinations thereof.
729 if (oid >= IDIF_MAX_OID) {
730 CERROR("bad MDT0 id, " DOSTID " ost_idx:%u\n",
731 POSTID(ostid), ost_idx);
734 fid->f_seq = fid_idif_seq(oid, ost_idx);
735 /* truncate to 32 bits by assignment */
737 /* in theory, not currently used */
738 fid->f_ver = oid >> 48;
739 } else if (likely(!fid_seq_is_default(seq))) {
740 /* This is either an IDIF object, which identifies objects across
741 * all OSTs, or a regular FID. The IDIF namespace maps legacy
742 * OST objects into the FID namespace. In both cases, we just
743 * pass the FID through, no conversion needed.
745 if (ostid->oi_fid.f_ver != 0) {
746 CERROR("bad MDT0 id, " DOSTID " ost_idx:%u\n",
747 POSTID(ostid), ost_idx);
750 *fid = ostid->oi_fid;
756 /* pack any OST FID into an ostid (id/seq) for the wire/disk */
757 static inline int fid_to_ostid(const struct lu_fid *fid, struct ost_id *ostid)
759 if (unlikely(fid_seq_is_igif(fid->f_seq))) {
760 CERROR("bad IGIF, "DFID"\n", PFID(fid));
764 if (fid_is_idif(fid)) {
765 ostid_set_seq_mdt0(ostid);
766 ostid_set_id(ostid, fid_idif_id(fid_seq(fid), fid_oid(fid),
769 ostid->oi_fid = *fid;
775 /* Check whether the fid is for LAST_ID */
776 static inline int fid_is_last_id(const struct lu_fid *fid)
778 return (fid_oid(fid) == 0);
782 * Get inode number from a igif.
783 * \param fid a igif to get inode number from.
784 * \return inode number for the igif.
786 static inline ino_t lu_igif_ino(const struct lu_fid *fid)
791 void lustre_swab_ost_id(struct ost_id *oid);
794 * Get inode generation from a igif.
795 * \param fid a igif to get inode generation from.
796 * \return inode generation for the igif.
798 static inline __u32 lu_igif_gen(const struct lu_fid *fid)
804 * Build igif from the inode number/generation.
806 static inline void lu_igif_build(struct lu_fid *fid, __u32 ino, __u32 gen)
814 * Fids are transmitted across network (in the sender byte-ordering),
815 * and stored on disk in big-endian order.
817 static inline void fid_cpu_to_le(struct lu_fid *dst, const struct lu_fid *src)
819 dst->f_seq = cpu_to_le64(fid_seq(src));
820 dst->f_oid = cpu_to_le32(fid_oid(src));
821 dst->f_ver = cpu_to_le32(fid_ver(src));
824 static inline void fid_le_to_cpu(struct lu_fid *dst, const struct lu_fid *src)
826 dst->f_seq = le64_to_cpu(fid_seq(src));
827 dst->f_oid = le32_to_cpu(fid_oid(src));
828 dst->f_ver = le32_to_cpu(fid_ver(src));
831 static inline void fid_cpu_to_be(struct lu_fid *dst, const struct lu_fid *src)
833 dst->f_seq = cpu_to_be64(fid_seq(src));
834 dst->f_oid = cpu_to_be32(fid_oid(src));
835 dst->f_ver = cpu_to_be32(fid_ver(src));
838 static inline void fid_be_to_cpu(struct lu_fid *dst, const struct lu_fid *src)
840 dst->f_seq = be64_to_cpu(fid_seq(src));
841 dst->f_oid = be32_to_cpu(fid_oid(src));
842 dst->f_ver = be32_to_cpu(fid_ver(src));
845 static inline int fid_is_sane(const struct lu_fid *fid)
848 ((fid_seq(fid) >= FID_SEQ_START && fid_ver(fid) == 0) ||
849 fid_is_igif(fid) || fid_is_idif(fid) ||
850 fid_seq_is_rsvd(fid_seq(fid)));
853 static inline int fid_is_zero(const struct lu_fid *fid)
855 return fid_seq(fid) == 0 && fid_oid(fid) == 0;
858 void lustre_swab_lu_fid(struct lu_fid *fid);
859 void lustre_swab_lu_seq_range(struct lu_seq_range *range);
861 static inline int lu_fid_eq(const struct lu_fid *f0, const struct lu_fid *f1)
863 return memcmp(f0, f1, sizeof(*f0)) == 0;
866 #define __diff_normalize(val0, val1) \
868 typeof(val0) __val0 = (val0); \
869 typeof(val1) __val1 = (val1); \
871 (__val0 == __val1 ? 0 : __val0 > __val1 ? 1 : -1); \
874 static inline int lu_fid_cmp(const struct lu_fid *f0,
875 const struct lu_fid *f1)
878 __diff_normalize(fid_seq(f0), fid_seq(f1)) ?:
879 __diff_normalize(fid_oid(f0), fid_oid(f1)) ?:
880 __diff_normalize(fid_ver(f0), fid_ver(f1));
883 static inline void ostid_cpu_to_le(const struct ost_id *src_oi,
884 struct ost_id *dst_oi)
886 if (fid_seq_is_mdt0(ostid_seq(src_oi))) {
887 dst_oi->oi.oi_id = cpu_to_le64(src_oi->oi.oi_id);
888 dst_oi->oi.oi_seq = cpu_to_le64(src_oi->oi.oi_seq);
890 fid_cpu_to_le(&dst_oi->oi_fid, &src_oi->oi_fid);
894 static inline void ostid_le_to_cpu(const struct ost_id *src_oi,
895 struct ost_id *dst_oi)
897 if (fid_seq_is_mdt0(ostid_seq(src_oi))) {
898 dst_oi->oi.oi_id = le64_to_cpu(src_oi->oi.oi_id);
899 dst_oi->oi.oi_seq = le64_to_cpu(src_oi->oi.oi_seq);
901 fid_le_to_cpu(&dst_oi->oi_fid, &src_oi->oi_fid);
907 /** \defgroup lu_dir lu_dir
912 * Enumeration of possible directory entry attributes.
914 * Attributes follow directory entry header in the order they appear in this
917 enum lu_dirent_attrs {
920 LUDA_64BITHASH = 0x0004,
924 * Layout of readdir pages, as transmitted on wire.
927 /** valid if LUDA_FID is set. */
928 struct lu_fid lde_fid;
929 /** a unique entry identifier: a hash or an offset. */
931 /** total record length, including all attributes. */
935 /** optional variable size attributes following this entry.
936 * taken from enum lu_dirent_attrs.
939 /** name is followed by the attributes indicated in ->ldp_attrs, in
940 * their natural order. After the last attribute, padding bytes are
941 * added to make ->lde_reclen a multiple of 8.
947 * Definitions of optional directory entry attributes formats.
949 * Individual attributes do not have their length encoded in a generic way. It
950 * is assumed that consumer of an attribute knows its format. This means that
951 * it is impossible to skip over an unknown attribute, except by skipping over all
952 * remaining attributes (by using ->lde_reclen), which is not too
953 * constraining, because new server versions will append new attributes at
954 * the end of an entry.
958 * Fid directory attribute: a fid of an object referenced by the entry. This
959 * will be almost always requested by the client and supplied by the server.
961 * Aligned to 8 bytes.
963 /* To have compatibility with 1.8, lets have fid in lu_dirent struct. */
968 * Aligned to 2 bytes.
979 #define IFTODT(type) (((type) & S_IFMT) >> IFSHIFT)
982 #define DTTOIF(dirtype) ((dirtype) << IFSHIFT)
986 __u64 ldp_hash_start;
990 struct lu_dirent ldp_entries[0];
993 enum lu_dirpage_flags {
995 * dirpage contains no entry.
999 * last entry's lde_hash equals ldp_hash_end.
1001 LDF_COLLIDE = 1 << 1
1004 static inline struct lu_dirent *lu_dirent_start(struct lu_dirpage *dp)
1006 if (le32_to_cpu(dp->ldp_flags) & LDF_EMPTY)
1009 return dp->ldp_entries;
1012 static inline struct lu_dirent *lu_dirent_next(struct lu_dirent *ent)
1014 struct lu_dirent *next;
1016 if (le16_to_cpu(ent->lde_reclen) != 0)
1017 next = ((void *)ent) + le16_to_cpu(ent->lde_reclen);
1024 static inline int lu_dirent_calc_size(int namelen, __u16 attr)
1028 if (attr & LUDA_TYPE) {
1029 const unsigned align = sizeof(struct luda_type) - 1;
1031 size = (sizeof(struct lu_dirent) + namelen + align) & ~align;
1032 size += sizeof(struct luda_type);
1034 size = sizeof(struct lu_dirent) + namelen;
1037 return (size + 7) & ~7;
1040 static inline int lu_dirent_size(struct lu_dirent *ent)
1042 if (le16_to_cpu(ent->lde_reclen) == 0) {
1043 return lu_dirent_calc_size(le16_to_cpu(ent->lde_namelen),
1044 le32_to_cpu(ent->lde_attrs));
1046 return le16_to_cpu(ent->lde_reclen);
1049 #define MDS_DIR_END_OFF 0xfffffffffffffffeULL
1052 * MDS_READPAGE page size
1054 * This is the directory page size packed in MDS_READPAGE RPC.
1055 * It's different than PAGE_SIZE because the client needs to
1056 * access the struct lu_dirpage header packed at the beginning of
1057 * the "page" and without this there isn't any way to know find the
1058 * lu_dirpage header is if client and server PAGE_SIZE differ.
1060 #define LU_PAGE_SHIFT 12
1061 #define LU_PAGE_SIZE (1UL << LU_PAGE_SHIFT)
1062 #define LU_PAGE_MASK (~(LU_PAGE_SIZE - 1))
1064 #define LU_PAGE_COUNT (1 << (PAGE_SHIFT - LU_PAGE_SHIFT))
1068 struct lustre_handle {
1072 #define DEAD_HANDLE_MAGIC 0xdeadbeefcafebabeULL
1074 static inline int lustre_handle_is_used(struct lustre_handle *lh)
1076 return lh->cookie != 0ull;
1079 static inline int lustre_handle_equal(const struct lustre_handle *lh1,
1080 const struct lustre_handle *lh2)
1082 return lh1->cookie == lh2->cookie;
1085 static inline void lustre_handle_copy(struct lustre_handle *tgt,
1086 struct lustre_handle *src)
1088 tgt->cookie = src->cookie;
1091 /* flags for lm_flags */
1092 #define MSGHDR_AT_SUPPORT 0x1
1093 #define MSGHDR_CKSUM_INCOMPAT18 0x2
1095 #define lustre_msg lustre_msg_v2
1096 /* we depend on this structure to be 8-byte aligned */
1097 /* this type is only endian-adjusted in lustre_unpack_msg() */
1098 struct lustre_msg_v2 {
1107 __u32 lm_buflens[0];
1110 /* without gss, ptlrpc_body is put at the first buffer. */
1111 #define PTLRPC_NUM_VERSIONS 4
1112 #define JOBSTATS_JOBID_SIZE 32 /* 32 bytes string */
1113 struct ptlrpc_body_v3 {
1114 struct lustre_handle pb_handle;
1121 __u64 pb_last_committed;
1126 __u32 pb_timeout; /* for req, the deadline, for rep, the service est */
1127 __u32 pb_service_time; /* for rep, actual service time */
1130 /* VBR: pre-versions */
1131 __u64 pb_pre_versions[PTLRPC_NUM_VERSIONS];
1132 /* padding for future needs */
1133 __u64 pb_padding[4];
1134 char pb_jobid[JOBSTATS_JOBID_SIZE];
1137 #define ptlrpc_body ptlrpc_body_v3
1139 struct ptlrpc_body_v2 {
1140 struct lustre_handle pb_handle;
1147 __u64 pb_last_committed;
1152 __u32 pb_timeout; /* for req, the deadline, for rep, the service est */
1153 __u32 pb_service_time; /* for rep, actual service time, also used for
1154 * net_latency of req
1158 /* VBR: pre-versions */
1159 __u64 pb_pre_versions[PTLRPC_NUM_VERSIONS];
1160 /* padding for future needs */
1161 __u64 pb_padding[4];
1164 void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb);
1166 /* message body offset for lustre_msg_v2 */
1167 /* ptlrpc body offset in all request/reply messages */
1168 #define MSG_PTLRPC_BODY_OFF 0
1170 /* normal request/reply message record offset */
1171 #define REQ_REC_OFF 1
1172 #define REPLY_REC_OFF 1
1174 /* ldlm request message body offset */
1175 #define DLM_LOCKREQ_OFF 1 /* lockreq offset */
1176 #define DLM_REQ_REC_OFF 2 /* normal dlm request record offset */
1178 /* ldlm intent lock message body offset */
1179 #define DLM_INTENT_IT_OFF 2 /* intent lock it offset */
1180 #define DLM_INTENT_REC_OFF 3 /* intent lock record offset */
1182 /* ldlm reply message body offset */
1183 #define DLM_LOCKREPLY_OFF 1 /* lockrep offset */
1184 #define DLM_REPLY_REC_OFF 2 /* reply record offset */
1186 /** only use in req->rq_{req,rep}_swab_mask */
1187 #define MSG_PTLRPC_HEADER_OFF 31
1189 /* Flags that are operation-specific go in the top 16 bits. */
1190 #define MSG_OP_FLAG_MASK 0xffff0000
1191 #define MSG_OP_FLAG_SHIFT 16
1193 /* Flags that apply to all requests are in the bottom 16 bits */
1194 #define MSG_GEN_FLAG_MASK 0x0000ffff
1195 #define MSG_LAST_REPLAY 0x0001
1196 #define MSG_RESENT 0x0002
1197 #define MSG_REPLAY 0x0004
1198 /* #define MSG_AT_SUPPORT 0x0008
1199 * This was used in early prototypes of adaptive timeouts, and while there
1200 * shouldn't be any users of that code there also isn't a need for using this
1201 * bits. Defer usage until at least 1.10 to avoid potential conflict.
1203 #define MSG_DELAY_REPLAY 0x0010
1204 #define MSG_VERSION_REPLAY 0x0020
1205 #define MSG_REQ_REPLAY_DONE 0x0040
1206 #define MSG_LOCK_REPLAY_DONE 0x0080
1209 * Flags for all connect opcodes (MDS_CONNECT, OST_CONNECT)
1212 #define MSG_CONNECT_RECOVERING 0x00000001
1213 #define MSG_CONNECT_RECONNECT 0x00000002
1214 #define MSG_CONNECT_REPLAYABLE 0x00000004
1215 /*#define MSG_CONNECT_PEER 0x8 */
1216 #define MSG_CONNECT_LIBCLIENT 0x00000010
1217 #define MSG_CONNECT_INITIAL 0x00000020
1218 #define MSG_CONNECT_ASYNC 0x00000040
1219 #define MSG_CONNECT_NEXT_VER 0x00000080 /* use next version of lustre_msg */
1220 #define MSG_CONNECT_TRANSNO 0x00000100 /* report transno */
1223 #define OBD_CONNECT_RDONLY 0x1ULL /*client has read-only access*/
1224 #define OBD_CONNECT_INDEX 0x2ULL /*connect specific LOV idx */
1225 #define OBD_CONNECT_MDS 0x4ULL /*connect from MDT to OST */
1226 #define OBD_CONNECT_GRANT 0x8ULL /*OSC gets grant at connect */
1227 #define OBD_CONNECT_SRVLOCK 0x10ULL /*server takes locks for cli */
1228 #define OBD_CONNECT_VERSION 0x20ULL /*Lustre versions in ocd */
1229 #define OBD_CONNECT_REQPORTAL 0x40ULL /*Separate non-IO req portal */
1230 #define OBD_CONNECT_ACL 0x80ULL /*access control lists */
1231 #define OBD_CONNECT_XATTR 0x100ULL /*client use extended attr */
1232 #define OBD_CONNECT_CROW 0x200ULL /*MDS+OST create obj on write*/
1233 #define OBD_CONNECT_TRUNCLOCK 0x400ULL /*locks on server for punch */
1234 #define OBD_CONNECT_TRANSNO 0x800ULL /*replay sends init transno */
1235 #define OBD_CONNECT_IBITS 0x1000ULL /*support for inodebits locks*/
1236 #define OBD_CONNECT_JOIN 0x2000ULL /*files can be concatenated.
1237 *We do not support JOIN FILE
1238 *anymore, reserve this flags
1239 *just for preventing such bit
1242 #define OBD_CONNECT_ATTRFID 0x4000ULL /*Server can GetAttr By Fid*/
1243 #define OBD_CONNECT_NODEVOH 0x8000ULL /*No open hndl on specl nodes*/
1244 #define OBD_CONNECT_RMT_CLIENT 0x10000ULL /*Remote client */
1245 #define OBD_CONNECT_RMT_CLIENT_FORCE 0x20000ULL /*Remote client by force */
1246 #define OBD_CONNECT_BRW_SIZE 0x40000ULL /*Max bytes per rpc */
1247 #define OBD_CONNECT_QUOTA64 0x80000ULL /*Not used since 2.4 */
1248 #define OBD_CONNECT_MDS_CAPA 0x100000ULL /*MDS capability */
1249 #define OBD_CONNECT_OSS_CAPA 0x200000ULL /*OSS capability */
1250 #define OBD_CONNECT_CANCELSET 0x400000ULL /*Early batched cancels. */
1251 #define OBD_CONNECT_SOM 0x800000ULL /*Size on MDS */
1252 #define OBD_CONNECT_AT 0x1000000ULL /*client uses AT */
1253 #define OBD_CONNECT_LRU_RESIZE 0x2000000ULL /*LRU resize feature. */
1254 #define OBD_CONNECT_MDS_MDS 0x4000000ULL /*MDS-MDS connection */
1255 #define OBD_CONNECT_REAL 0x8000000ULL /*real connection */
1256 #define OBD_CONNECT_CHANGE_QS 0x10000000ULL /*Not used since 2.4 */
1257 #define OBD_CONNECT_CKSUM 0x20000000ULL /*support several cksum algos*/
1258 #define OBD_CONNECT_FID 0x40000000ULL /*FID is supported by server */
1259 #define OBD_CONNECT_VBR 0x80000000ULL /*version based recovery */
1260 #define OBD_CONNECT_LOV_V3 0x100000000ULL /*client supports LOV v3 EA */
1261 #define OBD_CONNECT_GRANT_SHRINK 0x200000000ULL /* support grant shrink */
1262 #define OBD_CONNECT_SKIP_ORPHAN 0x400000000ULL /* don't reuse orphan objids */
1263 #define OBD_CONNECT_MAX_EASIZE 0x800000000ULL /* preserved for large EA */
1264 #define OBD_CONNECT_FULL20 0x1000000000ULL /* it is 2.0 client */
1265 #define OBD_CONNECT_LAYOUTLOCK 0x2000000000ULL /* client uses layout lock */
1266 #define OBD_CONNECT_64BITHASH 0x4000000000ULL /* client supports 64-bits
1269 #define OBD_CONNECT_MAXBYTES 0x8000000000ULL /* max stripe size */
1270 #define OBD_CONNECT_IMP_RECOV 0x10000000000ULL /* imp recovery support */
1271 #define OBD_CONNECT_JOBSTATS 0x20000000000ULL /* jobid in ptlrpc_body */
1272 #define OBD_CONNECT_UMASK 0x40000000000ULL /* create uses client umask */
1273 #define OBD_CONNECT_EINPROGRESS 0x80000000000ULL /* client handles -EINPROGRESS
1274 * RPC error properly
1276 #define OBD_CONNECT_GRANT_PARAM 0x100000000000ULL/* extra grant params used for
1277 * finer space reservation
1279 #define OBD_CONNECT_FLOCK_OWNER 0x200000000000ULL /* for the fixed 1.8
1280 * policy and 2.x server
1282 #define OBD_CONNECT_LVB_TYPE 0x400000000000ULL /* variable type of LVB */
1283 #define OBD_CONNECT_NANOSEC_TIME 0x800000000000ULL /* nanosecond timestamps */
1284 #define OBD_CONNECT_LIGHTWEIGHT 0x1000000000000ULL/* lightweight connection */
1285 #define OBD_CONNECT_SHORTIO 0x2000000000000ULL/* short io */
1286 #define OBD_CONNECT_PINGLESS 0x4000000000000ULL/* pings not required */
1287 #define OBD_CONNECT_FLOCK_DEAD 0x8000000000000ULL/* flock deadlock detection */
1288 #define OBD_CONNECT_DISP_STRIPE 0x10000000000000ULL/*create stripe disposition*/
1289 #define OBD_CONNECT_OPEN_BY_FID 0x20000000000000ULL /* open by fid won't pack
1294 * Please DO NOT add flag values here before first ensuring that this same
1295 * flag value is not in use on some other branch. Please clear any such
1296 * changes with senior engineers before starting to use a new flag. Then,
1297 * submit a small patch against EVERY branch that ONLY adds the new flag,
1298 * updates obd_connect_names[] for lprocfs_rd_connect_flags(), adds the
1299 * flag to check_obd_connect_data(), and updates wiretests accordingly, so it
1300 * can be approved and landed easily to reserve the flag for future use.
1303 /* The MNE_SWAB flag is overloading the MDS_MDS bit only for the MGS
1304 * connection. It is a temporary bug fix for Imperative Recovery interop
1305 * between 2.2 and 2.3 x86/ppc nodes, and can be removed when interop for
1306 * 2.2 clients/servers is no longer needed. LU-1252/LU-1644.
1308 #define OBD_CONNECT_MNE_SWAB OBD_CONNECT_MDS_MDS
1310 #define OCD_HAS_FLAG(ocd, flg) \
1311 (!!((ocd)->ocd_connect_flags & OBD_CONNECT_##flg))
1313 /* Features required for this version of the client to work with server */
1314 #define CLIENT_CONNECT_MDT_REQD (OBD_CONNECT_IBITS | OBD_CONNECT_FID | \
1317 #define OBD_OCD_VERSION(major, minor, patch, fix) (((major)<<24) + \
1319 ((patch)<<8) + (fix))
1320 #define OBD_OCD_VERSION_MAJOR(version) ((int)((version)>>24)&255)
1321 #define OBD_OCD_VERSION_MINOR(version) ((int)((version)>>16)&255)
1322 #define OBD_OCD_VERSION_PATCH(version) ((int)((version)>>8)&255)
1323 #define OBD_OCD_VERSION_FIX(version) ((int)(version)&255)
1325 /* This structure is used for both request and reply.
1327 * If we eventually have separate connect data for different types, which we
1328 * almost certainly will, then perhaps we stick a union in here.
1330 struct obd_connect_data_v1 {
1331 __u64 ocd_connect_flags; /* OBD_CONNECT_* per above */
1332 __u32 ocd_version; /* lustre release version number */
1333 __u32 ocd_grant; /* initial cache grant amount (bytes) */
1334 __u32 ocd_index; /* LOV index to connect to */
1335 __u32 ocd_brw_size; /* Maximum BRW size in bytes, must be 2^n */
1336 __u64 ocd_ibits_known; /* inode bits this client understands */
1337 __u8 ocd_blocksize; /* log2 of the backend filesystem blocksize */
1338 __u8 ocd_inodespace; /* log2 of the per-inode space consumption */
1339 __u16 ocd_grant_extent; /* per-extent grant overhead, in 1K blocks */
1340 __u32 ocd_unused; /* also fix lustre_swab_connect */
1341 __u64 ocd_transno; /* first transno from client to be replayed */
1342 __u32 ocd_group; /* MDS group on OST */
1343 __u32 ocd_cksum_types; /* supported checksum algorithms */
1344 __u32 ocd_max_easize; /* How big LOV EA can be on MDS */
1345 __u32 ocd_instance; /* also fix lustre_swab_connect */
1346 __u64 ocd_maxbytes; /* Maximum stripe size in bytes */
1349 struct obd_connect_data {
1350 __u64 ocd_connect_flags; /* OBD_CONNECT_* per above */
1351 __u32 ocd_version; /* lustre release version number */
1352 __u32 ocd_grant; /* initial cache grant amount (bytes) */
1353 __u32 ocd_index; /* LOV index to connect to */
1354 __u32 ocd_brw_size; /* Maximum BRW size in bytes */
1355 __u64 ocd_ibits_known; /* inode bits this client understands */
1356 __u8 ocd_blocksize; /* log2 of the backend filesystem blocksize */
1357 __u8 ocd_inodespace; /* log2 of the per-inode space consumption */
1358 __u16 ocd_grant_extent; /* per-extent grant overhead, in 1K blocks */
1359 __u32 ocd_unused; /* also fix lustre_swab_connect */
1360 __u64 ocd_transno; /* first transno from client to be replayed */
1361 __u32 ocd_group; /* MDS group on OST */
1362 __u32 ocd_cksum_types; /* supported checksum algorithms */
1363 __u32 ocd_max_easize; /* How big LOV EA can be on MDS */
1364 __u32 ocd_instance; /* instance # of this target */
1365 __u64 ocd_maxbytes; /* Maximum stripe size in bytes */
1366 /* Fields after ocd_maxbytes are only accessible by the receiver
1367 * if the corresponding flag in ocd_connect_flags is set. Accessing
1368 * any field after ocd_maxbytes on the receiver without a valid flag
1369 * may result in out-of-bound memory access and kernel oops.
1371 __u64 padding1; /* added 2.1.0. also fix lustre_swab_connect */
1372 __u64 padding2; /* added 2.1.0. also fix lustre_swab_connect */
1373 __u64 padding3; /* added 2.1.0. also fix lustre_swab_connect */
1374 __u64 padding4; /* added 2.1.0. also fix lustre_swab_connect */
1375 __u64 padding5; /* added 2.1.0. also fix lustre_swab_connect */
1376 __u64 padding6; /* added 2.1.0. also fix lustre_swab_connect */
1377 __u64 padding7; /* added 2.1.0. also fix lustre_swab_connect */
1378 __u64 padding8; /* added 2.1.0. also fix lustre_swab_connect */
1379 __u64 padding9; /* added 2.1.0. also fix lustre_swab_connect */
1380 __u64 paddingA; /* added 2.1.0. also fix lustre_swab_connect */
1381 __u64 paddingB; /* added 2.1.0. also fix lustre_swab_connect */
1382 __u64 paddingC; /* added 2.1.0. also fix lustre_swab_connect */
1383 __u64 paddingD; /* added 2.1.0. also fix lustre_swab_connect */
1384 __u64 paddingE; /* added 2.1.0. also fix lustre_swab_connect */
1385 __u64 paddingF; /* added 2.1.0. also fix lustre_swab_connect */
1389 * Please DO NOT use any fields here before first ensuring that this same
1390 * field is not in use on some other branch. Please clear any such changes
1391 * with senior engineers before starting to use a new field. Then, submit
1392 * a small patch against EVERY branch that ONLY adds the new field along with
1393 * the matching OBD_CONNECT flag, so that can be approved and landed easily to
1394 * reserve the flag for future use.
1397 void lustre_swab_connect(struct obd_connect_data *ocd);
1400 * Supported checksum algorithms. Up to 32 checksum types are supported.
1401 * (32-bit mask stored in obd_connect_data::ocd_cksum_types)
1402 * Please update DECLARE_CKSUM_NAME/OBD_CKSUM_ALL in obd.h when adding a new
1403 * algorithm and also the OBD_FL_CKSUM* flags.
1406 OBD_CKSUM_CRC32 = 0x00000001,
1407 OBD_CKSUM_ADLER = 0x00000002,
1408 OBD_CKSUM_CRC32C = 0x00000004,
1412 * OST requests: OBDO & OBD request records
1417 OST_REPLY = 0, /* reply ? */
1433 OST_QUOTACHECK = 18,
1435 OST_QUOTA_ADJUST_QUNIT = 20, /* not used since 2.4 */
1438 #define OST_FIRST_OPC OST_REPLY
1441 OBD_FL_INLINEDATA = 0x00000001,
1442 OBD_FL_OBDMDEXISTS = 0x00000002,
1443 OBD_FL_DELORPHAN = 0x00000004, /* if set in o_flags delete orphans */
1444 OBD_FL_NORPC = 0x00000008, /* set in o_flags do in OSC not OST */
1445 OBD_FL_IDONLY = 0x00000010, /* set in o_flags only adjust obj id*/
1446 OBD_FL_RECREATE_OBJS = 0x00000020, /* recreate missing obj */
1447 OBD_FL_DEBUG_CHECK = 0x00000040, /* echo client/server debug check */
1448 OBD_FL_NO_USRQUOTA = 0x00000100, /* the object's owner is over quota */
1449 OBD_FL_NO_GRPQUOTA = 0x00000200, /* the object's group is over quota */
1450 OBD_FL_CREATE_CROW = 0x00000400, /* object should be create on write */
1451 OBD_FL_SRVLOCK = 0x00000800, /* delegate DLM locking to server */
1452 OBD_FL_CKSUM_CRC32 = 0x00001000, /* CRC32 checksum type */
1453 OBD_FL_CKSUM_ADLER = 0x00002000, /* ADLER checksum type */
1454 OBD_FL_CKSUM_CRC32C = 0x00004000, /* CRC32C checksum type */
1455 OBD_FL_CKSUM_RSVD2 = 0x00008000, /* for future cksum types */
1456 OBD_FL_CKSUM_RSVD3 = 0x00010000, /* for future cksum types */
1457 OBD_FL_SHRINK_GRANT = 0x00020000, /* object shrink the grant */
1458 OBD_FL_MMAP = 0x00040000, /* object is mmapped on the client.
1459 * XXX: obsoleted - reserved for old
1460 * clients prior than 2.2
1462 OBD_FL_RECOV_RESEND = 0x00080000, /* recoverable resent */
1463 OBD_FL_NOSPC_BLK = 0x00100000, /* no more block space on OST */
1464 OBD_FL_FLUSH = 0x00200000, /* flush pages on the OST */
1465 OBD_FL_SHORT_IO = 0x00400000, /* short io request */
1467 /* Note that while these checksum values are currently separate bits,
1468 * in 2.x we can actually allow all values from 1-31 if we wanted.
1470 OBD_FL_CKSUM_ALL = OBD_FL_CKSUM_CRC32 | OBD_FL_CKSUM_ADLER |
1471 OBD_FL_CKSUM_CRC32C,
1473 /* mask for local-only flag, which won't be sent over network */
1474 OBD_FL_LOCAL_MASK = 0xF0000000,
1477 #define LOV_MAGIC_V1 0x0BD10BD0
1478 #define LOV_MAGIC LOV_MAGIC_V1
1479 #define LOV_MAGIC_JOIN_V1 0x0BD20BD0
1480 #define LOV_MAGIC_V3 0x0BD30BD0
1483 * magic for fully defined striping
1484 * the idea is that we should have different magics for striping "hints"
1485 * (struct lov_user_md_v[13]) and defined ready-to-use striping (struct
1486 * lov_mds_md_v[13]). at the moment the magics are used in wire protocol,
1487 * we can't just change it w/o long way preparation, but we still need a
1488 * mechanism to allow LOD to differentiate hint versus ready striping.
1489 * so, at the moment we do a trick: MDT knows what to expect from request
1490 * depending on the case (replay uses ready striping, non-replay req uses
1491 * hints), so MDT replaces magic with appropriate one and now LOD can
1492 * easily understand what's inside -bzzz
1494 #define LOV_MAGIC_V1_DEF 0x0CD10BD0
1495 #define LOV_MAGIC_V3_DEF 0x0CD30BD0
1497 #define LOV_PATTERN_RAID0 0x001 /* stripes are used round-robin */
1498 #define LOV_PATTERN_RAID1 0x002 /* stripes are mirrors of each other */
1499 #define LOV_PATTERN_FIRST 0x100 /* first stripe is not in round-robin */
1500 #define LOV_PATTERN_CMOBD 0x200
1502 #define LOV_PATTERN_F_MASK 0xffff0000
1503 #define LOV_PATTERN_F_RELEASED 0x80000000 /* HSM released file */
1505 #define lov_pattern(pattern) (pattern & ~LOV_PATTERN_F_MASK)
1506 #define lov_pattern_flags(pattern) (pattern & LOV_PATTERN_F_MASK)
1508 #define lov_ost_data lov_ost_data_v1
1509 struct lov_ost_data_v1 { /* per-stripe data structure (little-endian)*/
1510 struct ost_id l_ost_oi; /* OST object ID */
1511 __u32 l_ost_gen; /* generation of this l_ost_idx */
1512 __u32 l_ost_idx; /* OST index in LOV (lov_tgt_desc->tgts) */
1515 #define lov_mds_md lov_mds_md_v1
1516 struct lov_mds_md_v1 { /* LOV EA mds/wire data (little-endian) */
1517 __u32 lmm_magic; /* magic number = LOV_MAGIC_V1 */
1518 __u32 lmm_pattern; /* LOV_PATTERN_RAID0, LOV_PATTERN_RAID1 */
1519 struct ost_id lmm_oi; /* LOV object ID */
1520 __u32 lmm_stripe_size; /* size of stripe in bytes */
1521 /* lmm_stripe_count used to be __u32 */
1522 __u16 lmm_stripe_count; /* num stripes in use for this object */
1523 __u16 lmm_layout_gen; /* layout generation number */
1524 struct lov_ost_data_v1 lmm_objects[0]; /* per-stripe data */
1528 * Sigh, because pre-2.4 uses
1529 * struct lov_mds_md_v1 {
1531 * __u64 lmm_object_id;
1532 * __u64 lmm_object_seq;
1535 * to identify the LOV(MDT) object, and lmm_object_seq will
1536 * be normal_fid, which make it hard to combine these conversion
1537 * to ostid_to FID. so we will do lmm_oi/fid conversion separately
1539 * We can tell the lmm_oi by this way,
1540 * 1.8: lmm_object_id = {inode}, lmm_object_gr = 0
1541 * 2.1: lmm_object_id = {oid < 128k}, lmm_object_seq = FID_SEQ_NORMAL
1542 * 2.4: lmm_oi.f_seq = FID_SEQ_NORMAL, lmm_oi.f_oid = {oid < 128k},
1545 * But currently lmm_oi/lsm_oi does not have any "real" usages,
1546 * except for printing some information, and the user can always
1547 * get the real FID from LMA, besides this multiple case check might
1548 * make swab more complicate. So we will keep using id/seq for lmm_oi.
1551 static inline void fid_to_lmm_oi(const struct lu_fid *fid,
1554 oi->oi.oi_id = fid_oid(fid);
1555 oi->oi.oi_seq = fid_seq(fid);
1558 static inline void lmm_oi_set_seq(struct ost_id *oi, __u64 seq)
1560 oi->oi.oi_seq = seq;
1563 static inline void lmm_oi_set_id(struct ost_id *oi, __u64 oid)
1568 static inline __u64 lmm_oi_id(struct ost_id *oi)
1570 return oi->oi.oi_id;
1573 static inline __u64 lmm_oi_seq(struct ost_id *oi)
1575 return oi->oi.oi_seq;
1578 static inline void lmm_oi_le_to_cpu(struct ost_id *dst_oi,
1579 struct ost_id *src_oi)
1581 dst_oi->oi.oi_id = le64_to_cpu(src_oi->oi.oi_id);
1582 dst_oi->oi.oi_seq = le64_to_cpu(src_oi->oi.oi_seq);
1585 static inline void lmm_oi_cpu_to_le(struct ost_id *dst_oi,
1586 struct ost_id *src_oi)
1588 dst_oi->oi.oi_id = cpu_to_le64(src_oi->oi.oi_id);
1589 dst_oi->oi.oi_seq = cpu_to_le64(src_oi->oi.oi_seq);
1592 /* extern void lustre_swab_lov_mds_md(struct lov_mds_md *llm); */
1594 #define MAX_MD_SIZE \
1595 (sizeof(struct lov_mds_md) + 4 * sizeof(struct lov_ost_data))
1596 #define MIN_MD_SIZE \
1597 (sizeof(struct lov_mds_md) + 1 * sizeof(struct lov_ost_data))
1599 #define XATTR_NAME_ACL_ACCESS "system.posix_acl_access"
1600 #define XATTR_NAME_ACL_DEFAULT "system.posix_acl_default"
1601 #define XATTR_USER_PREFIX "user."
1602 #define XATTR_TRUSTED_PREFIX "trusted."
1603 #define XATTR_SECURITY_PREFIX "security."
1604 #define XATTR_LUSTRE_PREFIX "lustre."
1606 #define XATTR_NAME_LOV "trusted.lov"
1607 #define XATTR_NAME_LMA "trusted.lma"
1608 #define XATTR_NAME_LMV "trusted.lmv"
1609 #define XATTR_NAME_LINK "trusted.link"
1610 #define XATTR_NAME_FID "trusted.fid"
1611 #define XATTR_NAME_VERSION "trusted.version"
1612 #define XATTR_NAME_SOM "trusted.som"
1613 #define XATTR_NAME_HSM "trusted.hsm"
1614 #define XATTR_NAME_LFSCK_NAMESPACE "trusted.lfsck_namespace"
1616 struct lov_mds_md_v3 { /* LOV EA mds/wire data (little-endian) */
1617 __u32 lmm_magic; /* magic number = LOV_MAGIC_V3 */
1618 __u32 lmm_pattern; /* LOV_PATTERN_RAID0, LOV_PATTERN_RAID1 */
1619 struct ost_id lmm_oi; /* LOV object ID */
1620 __u32 lmm_stripe_size; /* size of stripe in bytes */
1621 /* lmm_stripe_count used to be __u32 */
1622 __u16 lmm_stripe_count; /* num stripes in use for this object */
1623 __u16 lmm_layout_gen; /* layout generation number */
1624 char lmm_pool_name[LOV_MAXPOOLNAME]; /* must be 32bit aligned */
1625 struct lov_ost_data_v1 lmm_objects[0]; /* per-stripe data */
1628 static inline __u32 lov_mds_md_size(__u16 stripes, __u32 lmm_magic)
1630 if (lmm_magic == LOV_MAGIC_V3)
1631 return sizeof(struct lov_mds_md_v3) +
1632 stripes * sizeof(struct lov_ost_data_v1);
1634 return sizeof(struct lov_mds_md_v1) +
1635 stripes * sizeof(struct lov_ost_data_v1);
1639 lov_mds_md_max_stripe_count(size_t buf_size, __u32 lmm_magic)
1641 switch (lmm_magic) {
1642 case LOV_MAGIC_V1: {
1643 struct lov_mds_md_v1 lmm;
1645 if (buf_size < sizeof(lmm))
1648 return (buf_size - sizeof(lmm)) / sizeof(lmm.lmm_objects[0]);
1650 case LOV_MAGIC_V3: {
1651 struct lov_mds_md_v3 lmm;
1653 if (buf_size < sizeof(lmm))
1656 return (buf_size - sizeof(lmm)) / sizeof(lmm.lmm_objects[0]);
1663 #define OBD_MD_FLID (0x00000001ULL) /* object ID */
1664 #define OBD_MD_FLATIME (0x00000002ULL) /* access time */
1665 #define OBD_MD_FLMTIME (0x00000004ULL) /* data modification time */
1666 #define OBD_MD_FLCTIME (0x00000008ULL) /* change time */
1667 #define OBD_MD_FLSIZE (0x00000010ULL) /* size */
1668 #define OBD_MD_FLBLOCKS (0x00000020ULL) /* allocated blocks count */
1669 #define OBD_MD_FLBLKSZ (0x00000040ULL) /* block size */
1670 #define OBD_MD_FLMODE (0x00000080ULL) /* access bits (mode & ~S_IFMT) */
1671 #define OBD_MD_FLTYPE (0x00000100ULL) /* object type (mode & S_IFMT) */
1672 #define OBD_MD_FLUID (0x00000200ULL) /* user ID */
1673 #define OBD_MD_FLGID (0x00000400ULL) /* group ID */
1674 #define OBD_MD_FLFLAGS (0x00000800ULL) /* flags word */
1675 #define OBD_MD_FLNLINK (0x00002000ULL) /* link count */
1676 #define OBD_MD_FLGENER (0x00004000ULL) /* generation number */
1677 /*#define OBD_MD_FLINLINE (0x00008000ULL) inline data. used until 1.6.5 */
1678 #define OBD_MD_FLRDEV (0x00010000ULL) /* device number */
1679 #define OBD_MD_FLEASIZE (0x00020000ULL) /* extended attribute data */
1680 #define OBD_MD_LINKNAME (0x00040000ULL) /* symbolic link target */
1681 #define OBD_MD_FLHANDLE (0x00080000ULL) /* file/lock handle */
1682 #define OBD_MD_FLCKSUM (0x00100000ULL) /* bulk data checksum */
1683 #define OBD_MD_FLQOS (0x00200000ULL) /* quality of service stats */
1684 /*#define OBD_MD_FLOSCOPQ (0x00400000ULL) osc opaque data, never used */
1685 #define OBD_MD_FLCOOKIE (0x00800000ULL) /* log cancellation cookie */
1686 #define OBD_MD_FLGROUP (0x01000000ULL) /* group */
1687 #define OBD_MD_FLFID (0x02000000ULL) /* ->ost write inline fid */
1688 #define OBD_MD_FLEPOCH (0x04000000ULL) /* ->ost write with ioepoch */
1689 /* ->mds if epoch opens or closes
1691 #define OBD_MD_FLGRANT (0x08000000ULL) /* ost preallocation space grant */
1692 #define OBD_MD_FLDIREA (0x10000000ULL) /* dir's extended attribute data */
1693 #define OBD_MD_FLUSRQUOTA (0x20000000ULL) /* over quota flags sent from ost */
1694 #define OBD_MD_FLGRPQUOTA (0x40000000ULL) /* over quota flags sent from ost */
1695 #define OBD_MD_FLMODEASIZE (0x80000000ULL) /* EA size will be changed */
1697 #define OBD_MD_MDS (0x0000000100000000ULL) /* where an inode lives on */
1698 #define OBD_MD_REINT (0x0000000200000000ULL) /* reintegrate oa */
1699 #define OBD_MD_MEA (0x0000000400000000ULL) /* CMD split EA */
1700 #define OBD_MD_TSTATE (0x0000000800000000ULL) /* transient state field */
1702 #define OBD_MD_FLXATTR (0x0000001000000000ULL) /* xattr */
1703 #define OBD_MD_FLXATTRLS (0x0000002000000000ULL) /* xattr list */
1704 #define OBD_MD_FLXATTRRM (0x0000004000000000ULL) /* xattr remove */
1705 #define OBD_MD_FLACL (0x0000008000000000ULL) /* ACL */
1706 #define OBD_MD_FLRMTPERM (0x0000010000000000ULL) /* remote permission */
1707 #define OBD_MD_FLMDSCAPA (0x0000020000000000ULL) /* MDS capability */
1708 #define OBD_MD_FLOSSCAPA (0x0000040000000000ULL) /* OSS capability */
1709 #define OBD_MD_FLCKSPLIT (0x0000080000000000ULL) /* Check split on server */
1710 #define OBD_MD_FLCROSSREF (0x0000100000000000ULL) /* Cross-ref case */
1711 #define OBD_MD_FLGETATTRLOCK (0x0000200000000000ULL) /* Get IOEpoch attributes
1712 * under lock; for xattr
1713 * requests means the
1714 * client holds the lock
1716 #define OBD_MD_FLOBJCOUNT (0x0000400000000000ULL) /* for multiple destroy */
1718 #define OBD_MD_FLRMTLSETFACL (0x0001000000000000ULL) /* lfs lsetfacl case */
1719 #define OBD_MD_FLRMTLGETFACL (0x0002000000000000ULL) /* lfs lgetfacl case */
1720 #define OBD_MD_FLRMTRSETFACL (0x0004000000000000ULL) /* lfs rsetfacl case */
1721 #define OBD_MD_FLRMTRGETFACL (0x0008000000000000ULL) /* lfs rgetfacl case */
1723 #define OBD_MD_FLDATAVERSION (0x0010000000000000ULL) /* iversion sum */
1724 #define OBD_MD_FLRELEASED (0x0020000000000000ULL) /* file released */
1726 #define OBD_MD_FLGETATTR (OBD_MD_FLID | OBD_MD_FLATIME | OBD_MD_FLMTIME | \
1727 OBD_MD_FLCTIME | OBD_MD_FLSIZE | OBD_MD_FLBLKSZ | \
1728 OBD_MD_FLMODE | OBD_MD_FLTYPE | OBD_MD_FLUID | \
1729 OBD_MD_FLGID | OBD_MD_FLFLAGS | OBD_MD_FLNLINK | \
1730 OBD_MD_FLGENER | OBD_MD_FLRDEV | OBD_MD_FLGROUP)
1732 #define OBD_MD_FLXATTRALL (OBD_MD_FLXATTR | OBD_MD_FLXATTRLS)
1734 /* don't forget obdo_fid which is way down at the bottom so it can
1735 * come after the definition of llog_cookie
1740 HSS_CLEARMASK = 0x02,
1741 HSS_ARCHIVE_ID = 0x04,
1744 struct hsm_state_set {
1746 __u32 hss_archive_id;
1748 __u64 hss_clearmask;
1751 void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
1752 void lustre_swab_hsm_state_set(struct hsm_state_set *hss);
1754 void lustre_swab_obd_statfs(struct obd_statfs *os);
1756 /* ost_body.data values for OST_BRW */
1758 #define OBD_BRW_READ 0x01
1759 #define OBD_BRW_WRITE 0x02
1760 #define OBD_BRW_RWMASK (OBD_BRW_READ | OBD_BRW_WRITE)
1761 #define OBD_BRW_SYNC 0x08 /* this page is a part of synchronous
1762 * transfer and is not accounted in
1765 #define OBD_BRW_CHECK 0x10
1766 #define OBD_BRW_FROM_GRANT 0x20 /* the osc manages this under llite */
1767 #define OBD_BRW_GRANTED 0x40 /* the ost manages this */
1768 #define OBD_BRW_NOCACHE 0x80 /* this page is a part of non-cached IO */
1769 #define OBD_BRW_NOQUOTA 0x100
1770 #define OBD_BRW_SRVLOCK 0x200 /* Client holds no lock over this page */
1771 #define OBD_BRW_ASYNC 0x400 /* Server may delay commit to disk */
1772 #define OBD_BRW_MEMALLOC 0x800 /* Client runs in the "kswapd" context */
1773 #define OBD_BRW_OVER_USRQUOTA 0x1000 /* Running out of user quota */
1774 #define OBD_BRW_OVER_GRPQUOTA 0x2000 /* Running out of group quota */
1775 #define OBD_BRW_SOFT_SYNC 0x4000 /* This flag notifies the server
1776 * that the client is running low on
1777 * space for unstable pages; asking
1778 * it to sync quickly
1781 #define OBD_OBJECT_EOF 0xffffffffffffffffULL
1783 #define OST_MIN_PRECREATE 32
1784 #define OST_MAX_PRECREATE 20000
1787 struct ost_id ioo_oid; /* object ID, if multi-obj BRW */
1788 __u32 ioo_max_brw; /* low 16 bits were o_mode before 2.4,
1789 * now (PTLRPC_BULK_OPS_COUNT - 1) in
1790 * high 16 bits in 2.4 and later
1792 __u32 ioo_bufcnt; /* number of niobufs for this object */
1795 #define IOOBJ_MAX_BRW_BITS 16
1796 #define IOOBJ_TYPE_MASK ((1U << IOOBJ_MAX_BRW_BITS) - 1)
1797 #define ioobj_max_brw_get(ioo) (((ioo)->ioo_max_brw >> IOOBJ_MAX_BRW_BITS) + 1)
1798 #define ioobj_max_brw_set(ioo, num) \
1799 do { (ioo)->ioo_max_brw = ((num) - 1) << IOOBJ_MAX_BRW_BITS; } while (0)
1801 void lustre_swab_obd_ioobj(struct obd_ioobj *ioo);
1803 /* multiple of 8 bytes => can array */
1804 struct niobuf_remote {
1810 void lustre_swab_niobuf_remote(struct niobuf_remote *nbr);
1812 /* lock value block communicated between the filter and llite */
1814 /* OST_LVB_ERR_INIT is needed because the return code in rc is
1815 * negative, i.e. because ((MASK + rc) & MASK) != MASK.
1817 #define OST_LVB_ERR_INIT 0xffbadbad80000000ULL
1818 #define OST_LVB_ERR_MASK 0xffbadbad00000000ULL
1819 #define OST_LVB_IS_ERR(blocks) \
1820 ((blocks & OST_LVB_ERR_MASK) == OST_LVB_ERR_MASK)
1821 #define OST_LVB_SET_ERR(blocks, rc) \
1822 do { blocks = OST_LVB_ERR_INIT + rc; } while (0)
1823 #define OST_LVB_GET_ERR(blocks) (int)(blocks - OST_LVB_ERR_INIT)
1833 void lustre_swab_ost_lvb_v1(struct ost_lvb_v1 *lvb);
1847 void lustre_swab_ost_lvb(struct ost_lvb *lvb);
1850 * lquota data structures
1853 /* The lquota_id structure is an union of all the possible identifier types that
1854 * can be used with quota, this includes:
1857 * - a FID which can be used for per-directory quota in the future
1860 struct lu_fid qid_fid; /* FID for per-directory quota */
1861 __u64 qid_uid; /* user identifier */
1862 __u64 qid_gid; /* group identifier */
1865 /* quotactl management */
1866 struct obd_quotactl {
1868 __u32 qc_type; /* see Q_* flag below */
1871 struct obd_dqinfo qc_dqinfo;
1872 struct obd_dqblk qc_dqblk;
1875 void lustre_swab_obd_quotactl(struct obd_quotactl *q);
1877 #define Q_QUOTACHECK 0x800100 /* deprecated as of 2.4 */
1878 #define Q_INITQUOTA 0x800101 /* deprecated as of 2.4 */
1879 #define Q_GETOINFO 0x800102 /* get obd quota info */
1880 #define Q_GETOQUOTA 0x800103 /* get obd quotas */
1881 #define Q_FINVALIDATE 0x800104 /* deprecated as of 2.4 */
1883 #define Q_COPY(out, in, member) (out)->member = (in)->member
1885 #define QCTL_COPY(out, in) \
1887 Q_COPY(out, in, qc_cmd); \
1888 Q_COPY(out, in, qc_type); \
1889 Q_COPY(out, in, qc_id); \
1890 Q_COPY(out, in, qc_stat); \
1891 Q_COPY(out, in, qc_dqinfo); \
1892 Q_COPY(out, in, qc_dqblk); \
1895 /* Data structures associated with the quota locks */
1897 /* Glimpse descriptor used for the index & per-ID quota locks */
1898 struct ldlm_gl_lquota_desc {
1899 union lquota_id gl_id; /* quota ID subject to the glimpse */
1900 __u64 gl_flags; /* see LQUOTA_FL* below */
1901 __u64 gl_ver; /* new index version */
1902 __u64 gl_hardlimit; /* new hardlimit or qunit value */
1903 __u64 gl_softlimit; /* new softlimit */
1908 /* quota glimpse flags */
1909 #define LQUOTA_FL_EDQUOT 0x1 /* user/group out of quota space on QMT */
1911 /* LVB used with quota (global and per-ID) locks */
1913 __u64 lvb_flags; /* see LQUOTA_FL* above */
1914 __u64 lvb_id_may_rel; /* space that might be released later */
1915 __u64 lvb_id_rel; /* space released by the slave for this ID */
1916 __u64 lvb_id_qunit; /* current qunit value */
1920 void lustre_swab_lquota_lvb(struct lquota_lvb *lvb);
1928 #define QUOTA_FIRST_OPC QUOTA_DQACQ
1937 MDS_GETATTR_NAME = 34,
1942 MDS_DISCONNECT = 39,
1948 MDS_DONE_WRITING = 45,
1950 MDS_QUOTACHECK = 47,
1953 MDS_SETXATTR = 50, /* obsolete, now it's MDS_REINT op */
1957 MDS_HSM_STATE_GET = 54,
1958 MDS_HSM_STATE_SET = 55,
1959 MDS_HSM_ACTION = 56,
1960 MDS_HSM_PROGRESS = 57,
1961 MDS_HSM_REQUEST = 58,
1962 MDS_HSM_CT_REGISTER = 59,
1963 MDS_HSM_CT_UNREGISTER = 60,
1964 MDS_SWAP_LAYOUTS = 61,
1968 #define MDS_FIRST_OPC MDS_GETATTR
1974 enum mdt_reint_cmd {
1983 /* REINT_WRITE = 9, */
1987 void lustre_swab_generic_32s(__u32 *val);
1989 /* the disposition of the intent outlines what was executed */
1990 #define DISP_IT_EXECD 0x00000001
1991 #define DISP_LOOKUP_EXECD 0x00000002
1992 #define DISP_LOOKUP_NEG 0x00000004
1993 #define DISP_LOOKUP_POS 0x00000008
1994 #define DISP_OPEN_CREATE 0x00000010
1995 #define DISP_OPEN_OPEN 0x00000020
1996 #define DISP_ENQ_COMPLETE 0x00400000 /* obsolete and unused */
1997 #define DISP_ENQ_OPEN_REF 0x00800000
1998 #define DISP_ENQ_CREATE_REF 0x01000000
1999 #define DISP_OPEN_LOCK 0x02000000
2000 #define DISP_OPEN_LEASE 0x04000000
2001 #define DISP_OPEN_STRIPE 0x08000000
2003 /* INODE LOCK PARTS */
2004 #define MDS_INODELOCK_LOOKUP 0x000001 /* For namespace, dentry etc, and also
2005 * was used to protect permission (mode,
2006 * owner, group etc) before 2.4.
2008 #define MDS_INODELOCK_UPDATE 0x000002 /* size, links, timestamps */
2009 #define MDS_INODELOCK_OPEN 0x000004 /* For opened files */
2010 #define MDS_INODELOCK_LAYOUT 0x000008 /* for layout */
2012 /* The PERM bit is added int 2.4, and it is used to protect permission(mode,
2013 * owner, group, acl etc), so to separate the permission from LOOKUP lock.
2014 * Because for remote directories(in DNE), these locks will be granted by
2015 * different MDTs(different ldlm namespace).
2017 * For local directory, MDT will always grant UPDATE_LOCK|PERM_LOCK together.
2018 * For Remote directory, the master MDT, where the remote directory is, will
2019 * grant UPDATE_LOCK|PERM_LOCK, and the remote MDT, where the name entry is,
2020 * will grant LOOKUP_LOCK.
2022 #define MDS_INODELOCK_PERM 0x000010
2023 #define MDS_INODELOCK_XATTR 0x000020 /* extended attributes */
2025 #define MDS_INODELOCK_MAXSHIFT 5
2026 /* This FULL lock is useful to take on unlink sort of operations */
2027 #define MDS_INODELOCK_FULL ((1<<(MDS_INODELOCK_MAXSHIFT+1))-1)
2029 /* NOTE: until Lustre 1.8.7/2.1.1 the fid_ver() was packed into name[2],
2030 * but was moved into name[1] along with the OID to avoid consuming the
2031 * name[2,3] fields that need to be used for the quota id (also a FID).
2034 LUSTRE_RES_ID_SEQ_OFF = 0,
2035 LUSTRE_RES_ID_VER_OID_OFF = 1,
2036 LUSTRE_RES_ID_WAS_VER_OFF = 2, /* see note above */
2037 LUSTRE_RES_ID_QUOTA_SEQ_OFF = 2,
2038 LUSTRE_RES_ID_QUOTA_VER_OID_OFF = 3,
2039 LUSTRE_RES_ID_HSH_OFF = 3
2042 #define MDS_STATUS_CONN 1
2043 #define MDS_STATUS_LOV 2
2045 /* mdt_thread_info.mti_flags. */
2047 /* The flag indicates Size-on-MDS attributes are changed. */
2048 MF_SOM_CHANGE = (1 << 0),
2049 /* Flags indicates an epoch opens or closes. */
2050 MF_EPOCH_OPEN = (1 << 1),
2051 MF_EPOCH_CLOSE = (1 << 2),
2052 MF_MDC_CANCEL_FID1 = (1 << 3),
2053 MF_MDC_CANCEL_FID2 = (1 << 4),
2054 MF_MDC_CANCEL_FID3 = (1 << 5),
2055 MF_MDC_CANCEL_FID4 = (1 << 6),
2056 /* There is a pending attribute update. */
2057 MF_SOM_AU = (1 << 7),
2058 /* Cancel OST locks while getattr OST attributes. */
2059 MF_GETATTR_LOCK = (1 << 8),
2060 MF_GET_MDT_IDX = (1 << 9),
2063 #define MF_SOM_LOCAL_FLAGS (MF_SOM_CHANGE | MF_EPOCH_OPEN | MF_EPOCH_CLOSE)
2065 #define LUSTRE_BFLAG_UNCOMMITTED_WRITES 0x1
2067 /* these should be identical to their EXT4_*_FL counterparts, they are
2068 * redefined here only to avoid dragging in fs/ext4/ext4.h
2070 #define LUSTRE_SYNC_FL 0x00000008 /* Synchronous updates */
2071 #define LUSTRE_IMMUTABLE_FL 0x00000010 /* Immutable file */
2072 #define LUSTRE_APPEND_FL 0x00000020 /* writes to file may only append */
2073 #define LUSTRE_NOATIME_FL 0x00000080 /* do not update atime */
2074 #define LUSTRE_DIRSYNC_FL 0x00010000 /* dirsync behaviour (dir only) */
2076 /* Convert wire LUSTRE_*_FL to corresponding client local VFS S_* values
2077 * for the client inode i_flags. The LUSTRE_*_FL are the Lustre wire
2078 * protocol equivalents of LDISKFS_*_FL values stored on disk, while
2079 * the S_* flags are kernel-internal values that change between kernel
2080 * versions. These flags are set/cleared via FSFILT_IOC_{GET,SET}_FLAGS.
2081 * See b=16526 for a full history.
2083 static inline int ll_ext_to_inode_flags(int flags)
2085 return (((flags & LUSTRE_SYNC_FL) ? S_SYNC : 0) |
2086 ((flags & LUSTRE_NOATIME_FL) ? S_NOATIME : 0) |
2087 ((flags & LUSTRE_APPEND_FL) ? S_APPEND : 0) |
2088 ((flags & LUSTRE_DIRSYNC_FL) ? S_DIRSYNC : 0) |
2089 ((flags & LUSTRE_IMMUTABLE_FL) ? S_IMMUTABLE : 0));
2092 static inline int ll_inode_to_ext_flags(int iflags)
2094 return (((iflags & S_SYNC) ? LUSTRE_SYNC_FL : 0) |
2095 ((iflags & S_NOATIME) ? LUSTRE_NOATIME_FL : 0) |
2096 ((iflags & S_APPEND) ? LUSTRE_APPEND_FL : 0) |
2097 ((iflags & S_DIRSYNC) ? LUSTRE_DIRSYNC_FL : 0) |
2098 ((iflags & S_IMMUTABLE) ? LUSTRE_IMMUTABLE_FL : 0));
2101 /* 64 possible states */
2102 enum md_transient_state {
2103 MS_RESTORE = (1 << 0), /* restore is running */
2109 struct lustre_handle handle;
2111 __u64 size; /* Offset, in the case of MDS_READPAGE */
2115 __u64 blocks; /* XID, in the case of MDS_READPAGE */
2117 __u64 t_state; /* transient file state defined in
2118 * enum md_transient_state
2119 * was "ino" until 2.4.0
2127 __u32 flags; /* from vfs for pin/unpin, LUSTRE_BFLAG close */
2129 __u32 nlink; /* #bytes to read in the case of MDS_READPAGE */
2130 __u32 unused2; /* was "generation" until 2.4.0 */
2135 __u32 max_cookiesize;
2136 __u32 uid_h; /* high 32-bits of uid, for FUID */
2137 __u32 gid_h; /* high 32-bits of gid, for FUID */
2138 __u32 padding_5; /* also fix lustre_swab_mdt_body */
2146 void lustre_swab_mdt_body(struct mdt_body *b);
2148 struct mdt_ioepoch {
2149 struct lustre_handle handle;
2155 void lustre_swab_mdt_ioepoch(struct mdt_ioepoch *b);
2157 /* permissions for md_perm.mp_perm */
2159 CFS_SETUID_PERM = 0x01,
2160 CFS_SETGID_PERM = 0x02,
2161 CFS_SETGRP_PERM = 0x04,
2162 CFS_RMTACL_PERM = 0x08,
2163 CFS_RMTOWN_PERM = 0x10
2166 /* inode access permission for remote user, the inode info are omitted,
2167 * for client knows them.
2169 struct mdt_remote_perm {
2176 __u32 rp_access_perm; /* MAY_READ/WRITE/EXEC */
2180 void lustre_swab_mdt_remote_perm(struct mdt_remote_perm *p);
2182 struct mdt_rec_setattr {
2192 __u32 sa_padding_1_h;
2193 struct lu_fid sa_fid;
2202 __u32 sa_attr_flags;
2204 __u32 sa_bias; /* some operation flags */
2210 void lustre_swab_mdt_rec_setattr(struct mdt_rec_setattr *sa);
2213 * Attribute flags used in mdt_rec_setattr::sa_valid.
2214 * The kernel's #defines for ATTR_* should not be used over the network
2215 * since the client and MDS may run different kernels (see bug 13828)
2216 * Therefore, we should only use MDS_ATTR_* attributes for sa_valid.
2218 #define MDS_ATTR_MODE 0x1ULL /* = 1 */
2219 #define MDS_ATTR_UID 0x2ULL /* = 2 */
2220 #define MDS_ATTR_GID 0x4ULL /* = 4 */
2221 #define MDS_ATTR_SIZE 0x8ULL /* = 8 */
2222 #define MDS_ATTR_ATIME 0x10ULL /* = 16 */
2223 #define MDS_ATTR_MTIME 0x20ULL /* = 32 */
2224 #define MDS_ATTR_CTIME 0x40ULL /* = 64 */
2225 #define MDS_ATTR_ATIME_SET 0x80ULL /* = 128 */
2226 #define MDS_ATTR_MTIME_SET 0x100ULL /* = 256 */
2227 #define MDS_ATTR_FORCE 0x200ULL /* = 512, Not a change, but a change it */
2228 #define MDS_ATTR_ATTR_FLAG 0x400ULL /* = 1024 */
2229 #define MDS_ATTR_KILL_SUID 0x800ULL /* = 2048 */
2230 #define MDS_ATTR_KILL_SGID 0x1000ULL /* = 4096 */
2231 #define MDS_ATTR_CTIME_SET 0x2000ULL /* = 8192 */
2232 #define MDS_ATTR_FROM_OPEN 0x4000ULL /* = 16384, called from open path,
2235 #define MDS_ATTR_BLOCKS 0x8000ULL /* = 32768 */
2237 #define MDS_FMODE_CLOSED 00000000
2238 #define MDS_FMODE_EXEC 00000004
2239 /* IO Epoch is opened on a closed file. */
2240 #define MDS_FMODE_EPOCH 01000000
2241 /* IO Epoch is opened on a file truncate. */
2242 #define MDS_FMODE_TRUNC 02000000
2243 /* Size-on-MDS Attribute Update is pending. */
2244 #define MDS_FMODE_SOM 04000000
2246 #define MDS_OPEN_CREATED 00000010
2247 #define MDS_OPEN_CROSS 00000020
2249 #define MDS_OPEN_CREAT 00000100
2250 #define MDS_OPEN_EXCL 00000200
2251 #define MDS_OPEN_TRUNC 00001000
2252 #define MDS_OPEN_APPEND 00002000
2253 #define MDS_OPEN_SYNC 00010000
2254 #define MDS_OPEN_DIRECTORY 00200000
2256 #define MDS_OPEN_BY_FID 040000000 /* open_by_fid for known object */
2257 #define MDS_OPEN_DELAY_CREATE 0100000000 /* delay initial object create */
2258 #define MDS_OPEN_OWNEROVERRIDE 0200000000 /* NFSD rw-reopen ro file for owner */
2259 #define MDS_OPEN_JOIN_FILE 0400000000 /* open for join file.
2260 * We do not support JOIN FILE
2261 * anymore, reserve this flags
2262 * just for preventing such bit
2266 #define MDS_OPEN_LOCK 04000000000 /* This open requires open lock */
2267 #define MDS_OPEN_HAS_EA 010000000000 /* specify object create pattern */
2268 #define MDS_OPEN_HAS_OBJS 020000000000 /* Just set the EA the obj exist */
2269 #define MDS_OPEN_NORESTORE 0100000000000ULL /* Do not restore file at open */
2270 #define MDS_OPEN_NEWSTRIPE 0200000000000ULL /* New stripe needed (restripe or
2272 #define MDS_OPEN_VOLATILE 0400000000000ULL /* File is volatile = created
2274 #define MDS_OPEN_LEASE 01000000000000ULL /* Open the file and grant lease
2275 * delegation, succeed if it's not
2276 * being opened with conflict mode.
2278 #define MDS_OPEN_RELEASE 02000000000000ULL /* Open the file for HSM release */
2281 MDS_CHECK_SPLIT = 1 << 0,
2282 MDS_CROSS_REF = 1 << 1,
2283 MDS_VTX_BYPASS = 1 << 2,
2284 MDS_PERM_BYPASS = 1 << 3,
2286 MDS_QUOTA_IGNORE = 1 << 5,
2287 MDS_CLOSE_CLEANUP = 1 << 6,
2288 MDS_KEEP_ORPHAN = 1 << 7,
2289 MDS_RECOV_OPEN = 1 << 8,
2290 MDS_DATA_MODIFIED = 1 << 9,
2291 MDS_CREATE_VOLATILE = 1 << 10,
2292 MDS_OWNEROVERRIDE = 1 << 11,
2293 MDS_HSM_RELEASE = 1 << 12,
2296 /* instance of mdt_reint_rec */
2297 struct mdt_rec_create {
2305 __u32 cr_suppgid1_h;
2307 __u32 cr_suppgid2_h;
2308 struct lu_fid cr_fid1;
2309 struct lu_fid cr_fid2;
2310 struct lustre_handle cr_old_handle; /* handle in case of open replay */
2314 __u64 cr_padding_1; /* rr_blocks */
2317 /* use of helpers set/get_mrc_cr_flags() is needed to access
2318 * 64 bits cr_flags [cr_flags_l, cr_flags_h], this is done to
2319 * extend cr_flags size without breaking 1.8 compat
2321 __u32 cr_flags_l; /* for use with open, low 32 bits */
2322 __u32 cr_flags_h; /* for use with open, high 32 bits */
2323 __u32 cr_umask; /* umask for create */
2324 __u32 cr_padding_4; /* rr_padding_4 */
2327 static inline void set_mrc_cr_flags(struct mdt_rec_create *mrc, __u64 flags)
2329 mrc->cr_flags_l = (__u32)(flags & 0xFFFFFFFFUll);
2330 mrc->cr_flags_h = (__u32)(flags >> 32);
2333 static inline __u64 get_mrc_cr_flags(struct mdt_rec_create *mrc)
2335 return ((__u64)(mrc->cr_flags_l) | ((__u64)mrc->cr_flags_h << 32));
2338 /* instance of mdt_reint_rec */
2339 struct mdt_rec_link {
2347 __u32 lk_suppgid1_h;
2349 __u32 lk_suppgid2_h;
2350 struct lu_fid lk_fid1;
2351 struct lu_fid lk_fid2;
2353 __u64 lk_padding_1; /* rr_atime */
2354 __u64 lk_padding_2; /* rr_ctime */
2355 __u64 lk_padding_3; /* rr_size */
2356 __u64 lk_padding_4; /* rr_blocks */
2358 __u32 lk_padding_5; /* rr_mode */
2359 __u32 lk_padding_6; /* rr_flags */
2360 __u32 lk_padding_7; /* rr_padding_2 */
2361 __u32 lk_padding_8; /* rr_padding_3 */
2362 __u32 lk_padding_9; /* rr_padding_4 */
2365 /* instance of mdt_reint_rec */
2366 struct mdt_rec_unlink {
2374 __u32 ul_suppgid1_h;
2376 __u32 ul_suppgid2_h;
2377 struct lu_fid ul_fid1;
2378 struct lu_fid ul_fid2;
2380 __u64 ul_padding_2; /* rr_atime */
2381 __u64 ul_padding_3; /* rr_ctime */
2382 __u64 ul_padding_4; /* rr_size */
2383 __u64 ul_padding_5; /* rr_blocks */
2386 __u32 ul_padding_6; /* rr_flags */
2387 __u32 ul_padding_7; /* rr_padding_2 */
2388 __u32 ul_padding_8; /* rr_padding_3 */
2389 __u32 ul_padding_9; /* rr_padding_4 */
2392 /* instance of mdt_reint_rec */
2393 struct mdt_rec_rename {
2401 __u32 rn_suppgid1_h;
2403 __u32 rn_suppgid2_h;
2404 struct lu_fid rn_fid1;
2405 struct lu_fid rn_fid2;
2407 __u64 rn_padding_1; /* rr_atime */
2408 __u64 rn_padding_2; /* rr_ctime */
2409 __u64 rn_padding_3; /* rr_size */
2410 __u64 rn_padding_4; /* rr_blocks */
2411 __u32 rn_bias; /* some operation flags */
2412 __u32 rn_mode; /* cross-ref rename has mode */
2413 __u32 rn_padding_5; /* rr_flags */
2414 __u32 rn_padding_6; /* rr_padding_2 */
2415 __u32 rn_padding_7; /* rr_padding_3 */
2416 __u32 rn_padding_8; /* rr_padding_4 */
2419 /* instance of mdt_reint_rec */
2420 struct mdt_rec_setxattr {
2428 __u32 sx_suppgid1_h;
2430 __u32 sx_suppgid2_h;
2431 struct lu_fid sx_fid;
2432 __u64 sx_padding_1; /* These three are rr_fid2 */
2437 __u64 sx_padding_5; /* rr_ctime */
2438 __u64 sx_padding_6; /* rr_size */
2439 __u64 sx_padding_7; /* rr_blocks */
2442 __u32 sx_padding_8; /* rr_flags */
2443 __u32 sx_padding_9; /* rr_padding_2 */
2444 __u32 sx_padding_10; /* rr_padding_3 */
2445 __u32 sx_padding_11; /* rr_padding_4 */
2449 * mdt_rec_reint is the template for all mdt_reint_xxx structures.
2450 * Do NOT change the size of various members, otherwise the value
2451 * will be broken in lustre_swab_mdt_rec_reint().
2453 * If you add new members in other mdt_reint_xxx structures and need to use the
2454 * rr_padding_x fields, then update lustre_swab_mdt_rec_reint() also.
2456 struct mdt_rec_reint {
2464 __u32 rr_suppgid1_h;
2466 __u32 rr_suppgid2_h;
2467 struct lu_fid rr_fid1;
2468 struct lu_fid rr_fid2;
2479 __u32 rr_padding_4; /* also fix lustre_swab_mdt_rec_reint */
2482 void lustre_swab_mdt_rec_reint(struct mdt_rec_reint *rr);
2484 /* lmv structures */
2486 __u32 ld_tgt_count; /* how many MDS's */
2487 __u32 ld_active_tgt_count; /* how many active */
2488 __u32 ld_default_stripe_count; /* how many objects are used */
2489 __u32 ld_pattern; /* default MEA_MAGIC_* */
2490 __u64 ld_default_hash_size;
2491 __u64 ld_padding_1; /* also fix lustre_swab_lmv_desc */
2492 __u32 ld_padding_2; /* also fix lustre_swab_lmv_desc */
2493 __u32 ld_qos_maxage; /* in second */
2494 __u32 ld_padding_3; /* also fix lustre_swab_lmv_desc */
2495 __u32 ld_padding_4; /* also fix lustre_swab_lmv_desc */
2496 struct obd_uuid ld_uuid;
2499 /* TODO: lmv_stripe_md should contain mds capabilities for all slave fids */
2500 struct lmv_stripe_md {
2505 char mea_pool_name[LOV_MAXPOOLNAME];
2506 struct lu_fid mea_ids[0];
2509 #define MEA_MAGIC_LAST_CHAR 0xb2221ca1
2510 #define MEA_MAGIC_ALL_CHARS 0xb222a11c
2511 #define MEA_MAGIC_HASH_SEGMENT 0xb222a11b
2513 #define MAX_HASH_SIZE_32 0x7fffffffUL
2514 #define MAX_HASH_SIZE 0x7fffffffffffffffULL
2515 #define MAX_HASH_HIGHEST_BIT 0x1000000000000000ULL
2521 FLD_FIRST_OPC = FLD_QUERY
2527 SEQ_FIRST_OPC = SEQ_QUERY
2531 SEQ_ALLOC_SUPER = 0,
2542 * LOV data structures
2545 #define LOV_MAX_UUID_BUFFER_SIZE 8192
2546 /* The size of the buffer the lov/mdc reserves for the
2547 * array of UUIDs returned by the MDS. With the current
2548 * protocol, this will limit the max number of OSTs per LOV
2551 #define LOV_DESC_MAGIC 0xB0CCDE5C
2552 #define LOV_DESC_QOS_MAXAGE_DEFAULT 5 /* Seconds */
2553 #define LOV_DESC_STRIPE_SIZE_DEFAULT (1 << LNET_MTU_BITS)
2555 /* LOV settings descriptor (should only contain static info) */
2557 __u32 ld_tgt_count; /* how many OBD's */
2558 __u32 ld_active_tgt_count; /* how many active */
2559 __u32 ld_default_stripe_count; /* how many objects are used */
2560 __u32 ld_pattern; /* default PATTERN_RAID0 */
2561 __u64 ld_default_stripe_size; /* in bytes */
2562 __u64 ld_default_stripe_offset; /* in bytes */
2563 __u32 ld_padding_0; /* unused */
2564 __u32 ld_qos_maxage; /* in second */
2565 __u32 ld_padding_1; /* also fix lustre_swab_lov_desc */
2566 __u32 ld_padding_2; /* also fix lustre_swab_lov_desc */
2567 struct obd_uuid ld_uuid;
2570 #define ld_magic ld_active_tgt_count /* for swabbing from llogs */
2572 void lustre_swab_lov_desc(struct lov_desc *ld);
2577 /* opcodes -- MUST be distinct from OST/MDS opcodes */
2582 LDLM_BL_CALLBACK = 104,
2583 LDLM_CP_CALLBACK = 105,
2584 LDLM_GL_CALLBACK = 106,
2585 LDLM_SET_INFO = 107,
2588 #define LDLM_FIRST_OPC LDLM_ENQUEUE
2590 #define RES_NAME_SIZE 4
2591 struct ldlm_res_id {
2592 __u64 name[RES_NAME_SIZE];
2595 #define DLDLMRES "[%#llx:%#llx:%#llx].%llx"
2596 #define PLDLMRES(res) (res)->lr_name.name[0], (res)->lr_name.name[1], \
2597 (res)->lr_name.name[2], (res)->lr_name.name[3]
2599 static inline int ldlm_res_eq(const struct ldlm_res_id *res0,
2600 const struct ldlm_res_id *res1)
2602 return !memcmp(res0, res1, sizeof(*res0));
2619 #define LCK_MODE_NUM 8
2629 #define LDLM_MIN_TYPE LDLM_PLAIN
2631 struct ldlm_extent {
2637 #define LDLM_GID_ANY ((__u64)-1)
2639 static inline int ldlm_extent_overlap(struct ldlm_extent *ex1,
2640 struct ldlm_extent *ex2)
2642 return (ex1->start <= ex2->end) && (ex2->start <= ex1->end);
2645 /* check if @ex1 contains @ex2 */
2646 static inline int ldlm_extent_contain(struct ldlm_extent *ex1,
2647 struct ldlm_extent *ex2)
2649 return (ex1->start <= ex2->start) && (ex1->end >= ex2->end);
2652 struct ldlm_inodebits {
2656 struct ldlm_flock_wire {
2664 /* it's important that the fields of the ldlm_extent structure match
2665 * the first fields of the ldlm_flock structure because there is only
2666 * one ldlm_swab routine to process the ldlm_policy_data_t union. if
2667 * this ever changes we will need to swab the union differently based
2668 * on the resource type.
2672 struct ldlm_extent l_extent;
2673 struct ldlm_flock_wire l_flock;
2674 struct ldlm_inodebits l_inodebits;
2675 } ldlm_wire_policy_data_t;
2677 union ldlm_gl_desc {
2678 struct ldlm_gl_lquota_desc lquota_desc;
2681 void lustre_swab_gl_desc(union ldlm_gl_desc *);
2683 struct ldlm_intent {
2687 void lustre_swab_ldlm_intent(struct ldlm_intent *i);
2689 struct ldlm_resource_desc {
2690 enum ldlm_type lr_type;
2691 __u32 lr_padding; /* also fix lustre_swab_ldlm_resource_desc */
2692 struct ldlm_res_id lr_name;
2695 struct ldlm_lock_desc {
2696 struct ldlm_resource_desc l_resource;
2697 enum ldlm_mode l_req_mode;
2698 enum ldlm_mode l_granted_mode;
2699 ldlm_wire_policy_data_t l_policy_data;
2702 #define LDLM_LOCKREQ_HANDLES 2
2703 #define LDLM_ENQUEUE_CANCEL_OFF 1
2705 struct ldlm_request {
2708 struct ldlm_lock_desc lock_desc;
2709 struct lustre_handle lock_handle[LDLM_LOCKREQ_HANDLES];
2712 void lustre_swab_ldlm_request(struct ldlm_request *rq);
2714 /* If LDLM_ENQUEUE, 1 slot is already occupied, 1 is available.
2715 * Otherwise, 2 are available.
2717 #define ldlm_request_bufsize(count, type) \
2719 int _avail = LDLM_LOCKREQ_HANDLES; \
2720 _avail -= (type == LDLM_ENQUEUE ? LDLM_ENQUEUE_CANCEL_OFF : 0); \
2721 sizeof(struct ldlm_request) + \
2722 (count > _avail ? count - _avail : 0) * \
2723 sizeof(struct lustre_handle); \
2728 __u32 lock_padding; /* also fix lustre_swab_ldlm_reply */
2729 struct ldlm_lock_desc lock_desc;
2730 struct lustre_handle lock_handle;
2731 __u64 lock_policy_res1;
2732 __u64 lock_policy_res2;
2735 void lustre_swab_ldlm_reply(struct ldlm_reply *r);
2737 #define ldlm_flags_to_wire(flags) ((__u32)(flags))
2738 #define ldlm_flags_from_wire(flags) ((__u64)(flags))
2741 * Opcodes for mountconf (mgs and mgc)
2746 MGS_EXCEPTION, /* node died, etc. */
2747 MGS_TARGET_REG, /* whenever target starts up */
2753 #define MGS_FIRST_OPC MGS_CONNECT
2755 #define MGS_PARAM_MAXLEN 1024
2756 #define KEY_SET_INFO "set_info"
2758 struct mgs_send_param {
2759 char mgs_param[MGS_PARAM_MAXLEN];
2762 /* We pass this info to the MGS so it can write config logs */
2763 #define MTI_NAME_MAXLEN 64
2764 #define MTI_PARAM_MAXLEN 4096
2765 #define MTI_NIDS_MAX 32
2766 struct mgs_target_info {
2767 __u32 mti_lustre_ver;
2768 __u32 mti_stripe_index;
2769 __u32 mti_config_ver;
2771 __u32 mti_nid_count;
2772 __u32 mti_instance; /* Running instance of target */
2773 char mti_fsname[MTI_NAME_MAXLEN];
2774 char mti_svname[MTI_NAME_MAXLEN];
2775 char mti_uuid[sizeof(struct obd_uuid)];
2776 __u64 mti_nids[MTI_NIDS_MAX]; /* host nids (lnet_nid_t)*/
2777 char mti_params[MTI_PARAM_MAXLEN];
2780 void lustre_swab_mgs_target_info(struct mgs_target_info *oinfo);
2782 struct mgs_nidtbl_entry {
2783 __u64 mne_version; /* table version of this entry */
2784 __u32 mne_instance; /* target instance # */
2785 __u32 mne_index; /* target index */
2786 __u32 mne_length; /* length of this entry - by bytes */
2787 __u8 mne_type; /* target type LDD_F_SV_TYPE_OST/MDT */
2788 __u8 mne_nid_type; /* type of nid(mbz). for ipv6. */
2789 __u8 mne_nid_size; /* size of each NID, by bytes */
2790 __u8 mne_nid_count; /* # of NIDs in buffer */
2792 lnet_nid_t nids[0]; /* variable size buffer for NIDs. */
2796 void lustre_swab_mgs_nidtbl_entry(struct mgs_nidtbl_entry *oinfo);
2798 struct mgs_config_body {
2799 char mcb_name[MTI_NAME_MAXLEN]; /* logname */
2800 __u64 mcb_offset; /* next index of config log to request */
2801 __u16 mcb_type; /* type of log: CONFIG_T_[CONFIG|RECOVER] */
2803 __u8 mcb_bits; /* bits unit size of config log */
2804 __u32 mcb_units; /* # of units for bulk transfer */
2807 void lustre_swab_mgs_config_body(struct mgs_config_body *body);
2809 struct mgs_config_res {
2810 __u64 mcr_offset; /* index of last config log */
2811 __u64 mcr_size; /* size of the log */
2814 void lustre_swab_mgs_config_res(struct mgs_config_res *body);
2816 /* Config marker flags (in config log) */
2817 #define CM_START 0x01
2819 #define CM_SKIP 0x04
2820 #define CM_UPGRADE146 0x08
2821 #define CM_EXCLUDE 0x10
2822 #define CM_START_SKIP (CM_START | CM_SKIP)
2825 __u32 cm_step; /* aka config version */
2827 __u32 cm_vers; /* lustre release version number */
2828 __u32 cm_padding; /* 64 bit align */
2829 __s64 cm_createtime; /*when this record was first created */
2830 __s64 cm_canceltime; /*when this record is no longer valid*/
2831 char cm_tgtname[MTI_NAME_MAXLEN];
2832 char cm_comment[MTI_NAME_MAXLEN];
2835 void lustre_swab_cfg_marker(struct cfg_marker *marker, int swab, int size);
2838 * Opcodes for multiple servers.
2848 #define OBD_FIRST_OPC OBD_PING
2850 /* catalog of log objects */
2852 /** Identifier for a single log object */
2854 struct ost_id lgl_oi;
2858 /** Records written to the CATALOGS list */
2859 #define CATLIST "CATALOGS"
2861 struct llog_logid lci_logid;
2867 /* Log data record types - there is no specific reason that these need to
2868 * be related to the RPC opcodes, but no reason not to (may be handy later?)
2870 #define LLOG_OP_MAGIC 0x10600000
2871 #define LLOG_OP_MASK 0xfff00000
2874 LLOG_PAD_MAGIC = LLOG_OP_MAGIC | 0x00000,
2875 OST_SZ_REC = LLOG_OP_MAGIC | 0x00f00,
2876 /* OST_RAID1_REC = LLOG_OP_MAGIC | 0x01000, never used */
2877 MDS_UNLINK_REC = LLOG_OP_MAGIC | 0x10000 | (MDS_REINT << 8) |
2878 REINT_UNLINK, /* obsolete after 2.5.0 */
2879 MDS_UNLINK64_REC = LLOG_OP_MAGIC | 0x90000 | (MDS_REINT << 8) |
2881 /* MDS_SETATTR_REC = LLOG_OP_MAGIC | 0x12401, obsolete 1.8.0 */
2882 MDS_SETATTR64_REC = LLOG_OP_MAGIC | 0x90000 | (MDS_REINT << 8) |
2884 OBD_CFG_REC = LLOG_OP_MAGIC | 0x20000,
2885 /* PTL_CFG_REC = LLOG_OP_MAGIC | 0x30000, obsolete 1.4.0 */
2886 LLOG_GEN_REC = LLOG_OP_MAGIC | 0x40000,
2887 /* LLOG_JOIN_REC = LLOG_OP_MAGIC | 0x50000, obsolete 1.8.0 */
2888 CHANGELOG_REC = LLOG_OP_MAGIC | 0x60000,
2889 CHANGELOG_USER_REC = LLOG_OP_MAGIC | 0x70000,
2890 HSM_AGENT_REC = LLOG_OP_MAGIC | 0x80000,
2891 LLOG_HDR_MAGIC = LLOG_OP_MAGIC | 0x45539,
2892 LLOG_LOGID_MAGIC = LLOG_OP_MAGIC | 0x4553b,
2895 #define LLOG_REC_HDR_NEEDS_SWABBING(r) \
2896 (((r)->lrh_type & __swab32(LLOG_OP_MASK)) == __swab32(LLOG_OP_MAGIC))
2898 /** Log record header - stored in little endian order.
2899 * Each record must start with this struct, end with a llog_rec_tail,
2900 * and be a multiple of 256 bits in size.
2902 struct llog_rec_hdr {
2909 struct llog_rec_tail {
2914 /* Where data follow just after header */
2915 #define REC_DATA(ptr) \
2916 ((void *)((char *)ptr + sizeof(struct llog_rec_hdr)))
2918 #define REC_DATA_LEN(rec) \
2919 (rec->lrh_len - sizeof(struct llog_rec_hdr) - \
2920 sizeof(struct llog_rec_tail))
2922 struct llog_logid_rec {
2923 struct llog_rec_hdr lid_hdr;
2924 struct llog_logid lid_id;
2928 struct llog_rec_tail lid_tail;
2931 struct llog_unlink_rec {
2932 struct llog_rec_hdr lur_hdr;
2936 struct llog_rec_tail lur_tail;
2939 struct llog_unlink64_rec {
2940 struct llog_rec_hdr lur_hdr;
2941 struct lu_fid lur_fid;
2942 __u32 lur_count; /* to destroy the lost precreated */
2946 struct llog_rec_tail lur_tail;
2949 struct llog_setattr64_rec {
2950 struct llog_rec_hdr lsr_hdr;
2951 struct ost_id lsr_oi;
2957 struct llog_rec_tail lsr_tail;
2960 struct llog_size_change_rec {
2961 struct llog_rec_hdr lsc_hdr;
2962 struct ll_fid lsc_fid;
2967 struct llog_rec_tail lsc_tail;
2970 /* changelog llog name, needed by client replicators */
2971 #define CHANGELOG_CATALOG "changelog_catalog"
2973 struct changelog_setinfo {
2978 /** changelog record */
2979 struct llog_changelog_rec {
2980 struct llog_rec_hdr cr_hdr;
2981 struct changelog_rec cr;
2982 struct llog_rec_tail cr_tail; /**< for_sizezof_only */
2985 struct llog_changelog_ext_rec {
2986 struct llog_rec_hdr cr_hdr;
2987 struct changelog_ext_rec cr;
2988 struct llog_rec_tail cr_tail; /**< for_sizezof_only */
2991 struct llog_changelog_user_rec {
2992 struct llog_rec_hdr cur_hdr;
2996 struct llog_rec_tail cur_tail;
2999 enum agent_req_status {
3007 static inline char *agent_req_status2name(enum agent_req_status ars)
3025 static inline bool agent_req_in_final_state(enum agent_req_status ars)
3027 return ((ars == ARS_SUCCEED) || (ars == ARS_FAILED) ||
3028 (ars == ARS_CANCELED));
3031 struct llog_agent_req_rec {
3032 struct llog_rec_hdr arr_hdr; /**< record header */
3033 __u32 arr_status; /**< status of the request */
3037 __u32 arr_archive_id; /**< backend archive number */
3038 __u64 arr_flags; /**< req flags */
3039 __u64 arr_compound_id;/**< compound cookie */
3040 __u64 arr_req_create; /**< req. creation time */
3041 __u64 arr_req_change; /**< req. status change time */
3042 struct hsm_action_item arr_hai; /**< req. to the agent */
3043 struct llog_rec_tail arr_tail; /**< record tail for_sizezof_only */
3046 /* Old llog gen for compatibility */
3052 struct llog_gen_rec {
3053 struct llog_rec_hdr lgr_hdr;
3054 struct llog_gen lgr_gen;
3058 struct llog_rec_tail lgr_tail;
3061 /* On-disk header structure of each log object, stored in little endian order */
3062 #define LLOG_CHUNK_SIZE 8192
3063 #define LLOG_HEADER_SIZE (96)
3064 #define LLOG_BITMAP_BYTES (LLOG_CHUNK_SIZE - LLOG_HEADER_SIZE)
3066 #define LLOG_MIN_REC_SIZE (24) /* round(llog_rec_hdr + llog_rec_tail) */
3068 /* flags for the logs */
3070 LLOG_F_ZAP_WHEN_EMPTY = 0x1,
3071 LLOG_F_IS_CAT = 0x2,
3072 LLOG_F_IS_PLAIN = 0x4,
3075 struct llog_log_hdr {
3076 struct llog_rec_hdr llh_hdr;
3077 __s64 llh_timestamp;
3079 __u32 llh_bitmap_offset;
3083 /* for a catalog the first plain slot is next to it */
3084 struct obd_uuid llh_tgtuuid;
3085 __u32 llh_reserved[LLOG_HEADER_SIZE/sizeof(__u32) - 23];
3086 __u32 llh_bitmap[LLOG_BITMAP_BYTES/sizeof(__u32)];
3087 struct llog_rec_tail llh_tail;
3090 #define LLOG_BITMAP_SIZE(llh) (__u32)((llh->llh_hdr.lrh_len - \
3091 llh->llh_bitmap_offset - \
3092 sizeof(llh->llh_tail)) * 8)
3094 /** log cookies are used to reference a specific log file and a record
3097 struct llog_cookie {
3098 struct llog_logid lgc_lgl;
3104 /** llog protocol */
3105 enum llogd_rpc_ops {
3106 LLOG_ORIGIN_HANDLE_CREATE = 501,
3107 LLOG_ORIGIN_HANDLE_NEXT_BLOCK = 502,
3108 LLOG_ORIGIN_HANDLE_READ_HEADER = 503,
3109 LLOG_ORIGIN_HANDLE_WRITE_REC = 504,
3110 LLOG_ORIGIN_HANDLE_CLOSE = 505,
3111 LLOG_ORIGIN_CONNECT = 506,
3112 LLOG_CATINFO = 507, /* deprecated */
3113 LLOG_ORIGIN_HANDLE_PREV_BLOCK = 508,
3114 LLOG_ORIGIN_HANDLE_DESTROY = 509, /* for destroy llog object*/
3116 LLOG_FIRST_OPC = LLOG_ORIGIN_HANDLE_CREATE
3120 struct llog_logid lgd_logid;
3122 __u32 lgd_llh_flags;
3124 __u32 lgd_saved_index;
3126 __u64 lgd_cur_offset;
3129 struct llogd_conn_body {
3130 struct llog_gen lgdc_gen;
3131 struct llog_logid lgdc_logid;
3132 __u32 lgdc_ctxt_idx;
3135 /* Note: 64-bit types are 64-bit aligned in structure */
3137 __u64 o_valid; /* hot fields in this obdo */
3140 __u64 o_size; /* o_size-o_blocks == ost_lvb */
3144 __u64 o_blocks; /* brw: cli sent cached bytes */
3147 /* 32-bit fields start here: keep an even number of them via padding */
3148 __u32 o_blksize; /* optimal IO blocksize */
3149 __u32 o_mode; /* brw: cli sent cache remain */
3153 __u32 o_nlink; /* brw: checksum */
3155 __u32 o_misc; /* brw: o_dropped */
3157 __u64 o_ioepoch; /* epoch in ost writes */
3158 __u32 o_stripe_idx; /* holds stripe idx */
3160 struct lustre_handle o_handle; /* brw: lock handle to prolong locks
3162 struct llog_cookie o_lcookie; /* destroy: unlink cookie from MDS
3167 __u64 o_data_version; /* getattr: sum of iversion for
3169 * brw: grant space consumed on
3170 * the client for the write
3177 #define o_dirty o_blocks
3178 #define o_undirty o_mode
3179 #define o_dropped o_misc
3180 #define o_cksum o_nlink
3181 #define o_grant_used o_data_version
3183 static inline void lustre_set_wire_obdo(struct obd_connect_data *ocd,
3185 const struct obdo *lobdo)
3188 wobdo->o_flags &= ~OBD_FL_LOCAL_MASK;
3192 if (unlikely(!(ocd->ocd_connect_flags & OBD_CONNECT_FID)) &&
3193 fid_seq_is_echo(ostid_seq(&lobdo->o_oi))) {
3194 /* Currently OBD_FL_OSTID will only be used when 2.4 echo
3195 * client communicate with pre-2.4 server
3197 wobdo->o_oi.oi.oi_id = fid_oid(&lobdo->o_oi.oi_fid);
3198 wobdo->o_oi.oi.oi_seq = fid_seq(&lobdo->o_oi.oi_fid);
3202 static inline void lustre_get_wire_obdo(struct obd_connect_data *ocd,
3204 const struct obdo *wobdo)
3206 __u32 local_flags = 0;
3208 if (lobdo->o_valid & OBD_MD_FLFLAGS)
3209 local_flags = lobdo->o_flags & OBD_FL_LOCAL_MASK;
3212 if (local_flags != 0) {
3213 lobdo->o_valid |= OBD_MD_FLFLAGS;
3214 lobdo->o_flags &= ~OBD_FL_LOCAL_MASK;
3215 lobdo->o_flags |= local_flags;
3220 if (unlikely(!(ocd->ocd_connect_flags & OBD_CONNECT_FID)) &&
3221 fid_seq_is_echo(wobdo->o_oi.oi.oi_seq)) {
3223 lobdo->o_oi.oi_fid.f_seq = wobdo->o_oi.oi.oi_seq;
3224 lobdo->o_oi.oi_fid.f_oid = wobdo->o_oi.oi.oi_id;
3225 lobdo->o_oi.oi_fid.f_ver = 0;
3229 /* request structure for OST's */
3234 /* Key for FIEMAP to be used in get_info calls */
3235 struct ll_fiemap_info_key {
3238 struct ll_user_fiemap fiemap;
3241 void lustre_swab_ost_body(struct ost_body *b);
3242 void lustre_swab_ost_last_id(__u64 *id);
3243 void lustre_swab_fiemap(struct ll_user_fiemap *fiemap);
3245 void lustre_swab_lov_user_md_v1(struct lov_user_md_v1 *lum);
3246 void lustre_swab_lov_user_md_v3(struct lov_user_md_v3 *lum);
3247 void lustre_swab_lov_user_md_objects(struct lov_user_ost_data *lod,
3249 void lustre_swab_lov_mds_md(struct lov_mds_md *lmm);
3252 void lustre_swab_llogd_body(struct llogd_body *d);
3253 void lustre_swab_llog_hdr(struct llog_log_hdr *h);
3254 void lustre_swab_llogd_conn_body(struct llogd_conn_body *d);
3255 void lustre_swab_llog_rec(struct llog_rec_hdr *rec);
3258 void lustre_swab_lustre_cfg(struct lustre_cfg *lcfg);
3260 /* Functions for dumping PTLRPC fields */
3261 void dump_rniobuf(struct niobuf_remote *rnb);
3262 void dump_ioo(struct obd_ioobj *nb);
3263 void dump_ost_body(struct ost_body *ob);
3264 void dump_rcs(__u32 *rc);
3266 /* security opcodes */
3269 SEC_CTX_INIT_CONT = 802,
3272 SEC_FIRST_OPC = SEC_CTX_INIT
3276 * capa related definitions
3278 #define CAPA_HMAC_MAX_LEN 64
3279 #define CAPA_HMAC_KEY_MAX_LEN 56
3281 /* NB take care when changing the sequence of elements this struct,
3282 * because the offset info is used in find_capa()
3284 struct lustre_capa {
3285 struct lu_fid lc_fid; /** fid */
3286 __u64 lc_opc; /** operations allowed */
3287 __u64 lc_uid; /** file owner */
3288 __u64 lc_gid; /** file group */
3289 __u32 lc_flags; /** HMAC algorithm & flags */
3290 __u32 lc_keyid; /** key# used for the capability */
3291 __u32 lc_timeout; /** capa timeout value (sec) */
3292 /* FIXME: y2038 time_t overflow: */
3293 __u32 lc_expiry; /** expiry time (sec) */
3294 __u8 lc_hmac[CAPA_HMAC_MAX_LEN]; /** HMAC */
3297 void lustre_swab_lustre_capa(struct lustre_capa *c);
3299 /** lustre_capa::lc_opc */
3301 CAPA_OPC_BODY_WRITE = 1<<0, /**< write object data */
3302 CAPA_OPC_BODY_READ = 1<<1, /**< read object data */
3303 CAPA_OPC_INDEX_LOOKUP = 1<<2, /**< lookup object fid */
3304 CAPA_OPC_INDEX_INSERT = 1<<3, /**< insert object fid */
3305 CAPA_OPC_INDEX_DELETE = 1<<4, /**< delete object fid */
3306 CAPA_OPC_OSS_WRITE = 1<<5, /**< write oss object data */
3307 CAPA_OPC_OSS_READ = 1<<6, /**< read oss object data */
3308 CAPA_OPC_OSS_TRUNC = 1<<7, /**< truncate oss object */
3309 CAPA_OPC_OSS_DESTROY = 1<<8, /**< destroy oss object */
3310 CAPA_OPC_META_WRITE = 1<<9, /**< write object meta data */
3311 CAPA_OPC_META_READ = 1<<10, /**< read object meta data */
3314 #define CAPA_OPC_OSS_RW (CAPA_OPC_OSS_READ | CAPA_OPC_OSS_WRITE)
3315 #define CAPA_OPC_MDS_ONLY \
3316 (CAPA_OPC_BODY_WRITE | CAPA_OPC_BODY_READ | CAPA_OPC_INDEX_LOOKUP | \
3317 CAPA_OPC_INDEX_INSERT | CAPA_OPC_INDEX_DELETE)
3318 #define CAPA_OPC_OSS_ONLY \
3319 (CAPA_OPC_OSS_WRITE | CAPA_OPC_OSS_READ | CAPA_OPC_OSS_TRUNC | \
3320 CAPA_OPC_OSS_DESTROY)
3321 #define CAPA_OPC_MDS_DEFAULT ~CAPA_OPC_OSS_ONLY
3322 #define CAPA_OPC_OSS_DEFAULT ~(CAPA_OPC_MDS_ONLY | CAPA_OPC_OSS_ONLY)
3324 struct lustre_capa_key {
3325 __u64 lk_seq; /**< mds# */
3326 __u32 lk_keyid; /**< key# */
3328 __u8 lk_key[CAPA_HMAC_KEY_MAX_LEN]; /**< key */
3331 /** The link ea holds 1 \a link_ea_entry for each hardlink */
3332 #define LINK_EA_MAGIC 0x11EAF1DFUL
3333 struct link_ea_header {
3336 __u64 leh_len; /* total size */
3342 /** Hardlink data is name and parent fid.
3343 * Stored in this crazy struct for maximum packing and endian-neutrality
3345 struct link_ea_entry {
3346 /** __u16 stored big-endian, unaligned */
3347 unsigned char lee_reclen[2];
3348 unsigned char lee_parent_fid[sizeof(struct lu_fid)];
3352 /** fid2path request/reply structure */
3353 struct getinfo_fid2path {
3354 struct lu_fid gf_fid;
3361 void lustre_swab_fid2path(struct getinfo_fid2path *gf);
3364 LAYOUT_INTENT_ACCESS = 0,
3365 LAYOUT_INTENT_READ = 1,
3366 LAYOUT_INTENT_WRITE = 2,
3367 LAYOUT_INTENT_GLIMPSE = 3,
3368 LAYOUT_INTENT_TRUNC = 4,
3369 LAYOUT_INTENT_RELEASE = 5,
3370 LAYOUT_INTENT_RESTORE = 6
3373 /* enqueue layout lock with intent */
3374 struct layout_intent {
3375 __u32 li_opc; /* intent operation for enqueue, read, write etc */
3381 void lustre_swab_layout_intent(struct layout_intent *li);
3384 * On the wire version of hsm_progress structure.
3386 * Contains the userspace hsm_progress and some internal fields.
3388 struct hsm_progress_kernel {
3389 /* Field taken from struct hsm_progress */
3390 struct lu_fid hpk_fid;
3392 struct hsm_extent hpk_extent;
3394 __u16 hpk_errval; /* positive val */
3396 /* Additional fields */
3397 __u64 hpk_data_version;
3401 void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
3402 void lustre_swab_hsm_current_action(struct hsm_current_action *action);
3403 void lustre_swab_hsm_progress_kernel(struct hsm_progress_kernel *hpk);
3404 void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
3405 void lustre_swab_hsm_user_item(struct hsm_user_item *hui);
3406 void lustre_swab_hsm_request(struct hsm_request *hr);
3408 /** layout swap request structure
3409 * fid1 and fid2 are in mdt_body
3411 struct mdc_swap_layouts {
3415 void lustre_swab_swap_layouts(struct mdc_swap_layouts *msl);
3418 struct lustre_handle cd_handle;
3419 struct lu_fid cd_fid;
3420 __u64 cd_data_version;
3421 __u64 cd_reserved[8];
3424 void lustre_swab_close_data(struct close_data *data);