4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/include/lustre/lustre_idl.h
38 * Lustre wire protocol definitions.
41 /** \defgroup lustreidl lustreidl
43 * Lustre wire protocol definitions.
45 * ALL structs passing over the wire should be declared here. Structs
46 * that are used in interfaces with userspace should go in lustre_user.h.
48 * All structs being declared here should be built from simple fixed-size
49 * types (__u8, __u16, __u32, __u64) or be built from other types or
50 * structs also declared in this file. Similarly, all flags and magic
51 * values in those structs should also be declared here. This ensures
52 * that the Lustre wire protocol is not influenced by external dependencies.
54 * The only other acceptable items in this file are VERY SIMPLE accessor
55 * functions to avoid callers grubbing inside the structures, and the
56 * prototypes of the swabber functions for each struct. Nothing that
57 * depends on external functions or definitions should be in here.
59 * Structs must be properly aligned to put 64-bit values on an 8-byte
60 * boundary. Any structs being added here must also be added to
61 * utils/wirecheck.c and "make newwiretest" run to regenerate the
62 * utils/wiretest.c sources. This allows us to verify that wire structs
63 * have the proper alignment/size on all architectures.
65 * DO NOT CHANGE any of the structs, flags, values declared here and used
66 * in released Lustre versions. Some structs may have padding fields that
67 * can be used. Some structs might allow addition at the end (verify this
68 * in the code to ensure that new/old clients that see this larger struct
69 * do not fail, otherwise you need to implement protocol compatibility).
71 * We assume all nodes are either little-endian or big-endian, and we
72 * always send messages in the sender's native format. The receiver
73 * detects the message format by checking the 'magic' field of the message
74 * (see lustre_msg_swabbed() below).
76 * Each wire type has corresponding 'lustre_swab_xxxtypexxx()' routines,
77 * implemented either here, inline (trivial implementations) or in
78 * ptlrpc/pack_generic.c. These 'swabbers' convert the type from "other"
79 * endian, in-place in the message buffer.
81 * A swabber takes a single pointer argument. The caller must already have
82 * verified that the length of the message buffer >= sizeof (type).
84 * For variable length types, a second 'lustre_swab_v_xxxtypexxx()' routine
85 * may be defined that swabs just the variable part, after the caller has
86 * verified that the message buffer is large enough.
91 #ifndef _LUSTRE_IDL_H_
92 #define _LUSTRE_IDL_H_
94 #include "../../../include/linux/libcfs/libcfs.h"
95 #include "../../../include/linux/lnet/types.h"
97 /* Defn's shared with user-space. */
98 #include "lustre_user.h"
99 #include "lustre_errno.h"
104 /* FOO_REQUEST_PORTAL is for incoming requests on the FOO
105 * FOO_REPLY_PORTAL is for incoming replies on the FOO
106 * FOO_BULK_PORTAL is for incoming bulk on the FOO
109 /* Lustre service names are following the format
110 * service name + MDT + seq name
112 #define LUSTRE_MDT_MAXNAMELEN 80
114 #define CONNMGR_REQUEST_PORTAL 1
115 #define CONNMGR_REPLY_PORTAL 2
116 //#define OSC_REQUEST_PORTAL 3
117 #define OSC_REPLY_PORTAL 4
118 //#define OSC_BULK_PORTAL 5
119 #define OST_IO_PORTAL 6
120 #define OST_CREATE_PORTAL 7
121 #define OST_BULK_PORTAL 8
122 //#define MDC_REQUEST_PORTAL 9
123 #define MDC_REPLY_PORTAL 10
124 //#define MDC_BULK_PORTAL 11
125 #define MDS_REQUEST_PORTAL 12
126 //#define MDS_REPLY_PORTAL 13
127 #define MDS_BULK_PORTAL 14
128 #define LDLM_CB_REQUEST_PORTAL 15
129 #define LDLM_CB_REPLY_PORTAL 16
130 #define LDLM_CANCEL_REQUEST_PORTAL 17
131 #define LDLM_CANCEL_REPLY_PORTAL 18
132 //#define PTLBD_REQUEST_PORTAL 19
133 //#define PTLBD_REPLY_PORTAL 20
134 //#define PTLBD_BULK_PORTAL 21
135 #define MDS_SETATTR_PORTAL 22
136 #define MDS_READPAGE_PORTAL 23
137 #define OUT_PORTAL 24
139 #define MGC_REPLY_PORTAL 25
140 #define MGS_REQUEST_PORTAL 26
141 #define MGS_REPLY_PORTAL 27
142 #define OST_REQUEST_PORTAL 28
143 #define FLD_REQUEST_PORTAL 29
144 #define SEQ_METADATA_PORTAL 30
145 #define SEQ_DATA_PORTAL 31
146 #define SEQ_CONTROLLER_PORTAL 32
147 #define MGS_BULK_PORTAL 33
149 /* Portal 63 is reserved for the Cray Inc DVS - nic@cray.com, roe@cray.com, n8851@cray.com */
152 #define PTL_RPC_MSG_REQUEST 4711
153 #define PTL_RPC_MSG_ERR 4712
154 #define PTL_RPC_MSG_REPLY 4713
156 /* DON'T use swabbed values of MAGIC as magic! */
157 #define LUSTRE_MSG_MAGIC_V1 0x0BD00BD0
158 #define LUSTRE_MSG_MAGIC_V2 0x0BD00BD3
160 #define LUSTRE_MSG_MAGIC_V1_SWABBED 0xD00BD00B
161 #define LUSTRE_MSG_MAGIC_V2_SWABBED 0xD30BD00B
163 #define LUSTRE_MSG_MAGIC LUSTRE_MSG_MAGIC_V2
165 #define PTLRPC_MSG_VERSION 0x00000003
166 #define LUSTRE_VERSION_MASK 0xffff0000
167 #define LUSTRE_OBD_VERSION 0x00010000
168 #define LUSTRE_MDS_VERSION 0x00020000
169 #define LUSTRE_OST_VERSION 0x00030000
170 #define LUSTRE_DLM_VERSION 0x00040000
171 #define LUSTRE_LOG_VERSION 0x00050000
172 #define LUSTRE_MGS_VERSION 0x00060000
175 * Describes a range of sequence, lsr_start is included but lsr_end is
177 * Same structure is used in fld module where lsr_index field holds mdt id
180 struct lu_seq_range {
187 #define LU_SEQ_RANGE_MDT 0x0
188 #define LU_SEQ_RANGE_OST 0x1
189 #define LU_SEQ_RANGE_ANY 0x3
191 #define LU_SEQ_RANGE_MASK 0x3
193 static inline unsigned fld_range_type(const struct lu_seq_range *range)
195 return range->lsr_flags & LU_SEQ_RANGE_MASK;
198 static inline int fld_range_is_ost(const struct lu_seq_range *range)
200 return fld_range_type(range) == LU_SEQ_RANGE_OST;
203 static inline int fld_range_is_mdt(const struct lu_seq_range *range)
205 return fld_range_type(range) == LU_SEQ_RANGE_MDT;
209 * This all range is only being used when fld client sends fld query request,
210 * but it does not know whether the seq is MDT or OST, so it will send req
211 * with ALL type, which means either seq type gotten from lookup can be
214 static inline unsigned fld_range_is_any(const struct lu_seq_range *range)
216 return fld_range_type(range) == LU_SEQ_RANGE_ANY;
219 static inline void fld_range_set_type(struct lu_seq_range *range,
222 range->lsr_flags |= flags;
225 static inline void fld_range_set_mdt(struct lu_seq_range *range)
227 fld_range_set_type(range, LU_SEQ_RANGE_MDT);
230 static inline void fld_range_set_ost(struct lu_seq_range *range)
232 fld_range_set_type(range, LU_SEQ_RANGE_OST);
235 static inline void fld_range_set_any(struct lu_seq_range *range)
237 fld_range_set_type(range, LU_SEQ_RANGE_ANY);
241 * returns width of given range \a r
244 static inline __u64 range_space(const struct lu_seq_range *range)
246 return range->lsr_end - range->lsr_start;
250 * initialize range to zero
253 static inline void range_init(struct lu_seq_range *range)
255 memset(range, 0, sizeof(*range));
259 * check if given seq id \a s is within given range \a r
262 static inline int range_within(const struct lu_seq_range *range,
265 return s >= range->lsr_start && s < range->lsr_end;
268 static inline int range_is_sane(const struct lu_seq_range *range)
270 return (range->lsr_end >= range->lsr_start);
273 static inline int range_is_zero(const struct lu_seq_range *range)
275 return (range->lsr_start == 0 && range->lsr_end == 0);
278 static inline int range_is_exhausted(const struct lu_seq_range *range)
281 return range_space(range) == 0;
284 /* return 0 if two range have the same location */
285 static inline int range_compare_loc(const struct lu_seq_range *r1,
286 const struct lu_seq_range *r2)
288 return r1->lsr_index != r2->lsr_index ||
289 r1->lsr_flags != r2->lsr_flags;
292 #define DRANGE "[%#16.16Lx-%#16.16Lx):%x:%s"
294 #define PRANGE(range) \
295 (range)->lsr_start, \
297 (range)->lsr_index, \
298 fld_range_is_mdt(range) ? "mdt" : "ost"
301 /** \defgroup lu_fid lu_fid
305 * Flags for lustre_mdt_attrs::lma_compat and lustre_mdt_attrs::lma_incompat.
306 * Deprecated since HSM and SOM attributes are now stored in separate on-disk
310 LMAC_HSM = 0x00000001,
311 LMAC_SOM = 0x00000002,
312 LMAC_NOT_IN_OI = 0x00000004, /* the object does NOT need OI mapping */
313 LMAC_FID_ON_OST = 0x00000008, /* For OST-object, its OI mapping is
314 * under /O/<seq>/d<x>. */
318 * Masks for all features that should be supported by a Lustre version to
319 * access a specific file.
320 * This information is stored in lustre_mdt_attrs::lma_incompat.
323 LMAI_RELEASED = 0x00000001, /* file is released */
324 LMAI_AGENT = 0x00000002, /* agent inode */
325 LMAI_REMOTE_PARENT = 0x00000004, /* the parent of the object
326 is on the remote MDT */
328 #define LMA_INCOMPAT_SUPP (LMAI_AGENT | LMAI_REMOTE_PARENT)
334 /** LASTID file has zero OID */
335 LUSTRE_FID_LASTID_OID = 0UL,
336 /** initial fid id value */
337 LUSTRE_FID_INIT_OID = 1UL
340 /** returns fid object sequence */
341 static inline __u64 fid_seq(const struct lu_fid *fid)
346 /** returns fid object id */
347 static inline __u32 fid_oid(const struct lu_fid *fid)
352 /** returns fid object version */
353 static inline __u32 fid_ver(const struct lu_fid *fid)
358 static inline void fid_zero(struct lu_fid *fid)
360 memset(fid, 0, sizeof(*fid));
363 static inline __u64 fid_ver_oid(const struct lu_fid *fid)
365 return ((__u64)fid_ver(fid) << 32 | fid_oid(fid));
369 * Note that reserved SEQ numbers below 12 will conflict with ldiskfs
370 * inodes in the IGIF namespace, so these reserved SEQ numbers can be
371 * used for other purposes and not risk collisions with existing inodes.
373 * Different FID Format
374 * http://arch.lustre.org/index.php?title=Interoperability_fids_zfs#NEW.0
377 FID_SEQ_OST_MDT0 = 0,
378 FID_SEQ_LLOG = 1, /* unnamed llogs */
380 FID_SEQ_OST_MDT1 = 3,
381 FID_SEQ_OST_MAX = 9, /* Max MDT count before OST_on_FID */
382 FID_SEQ_LLOG_NAME = 10, /* named llogs */
385 FID_SEQ_IGIF_MAX = 0x0ffffffffULL,
386 FID_SEQ_IDIF = 0x100000000ULL,
387 FID_SEQ_IDIF_MAX = 0x1ffffffffULL,
388 /* Normal FID sequence starts from this value, i.e. 1<<33 */
389 FID_SEQ_START = 0x200000000ULL,
390 /* sequence for local pre-defined FIDs listed in local_oid */
391 FID_SEQ_LOCAL_FILE = 0x200000001ULL,
392 FID_SEQ_DOT_LUSTRE = 0x200000002ULL,
393 /* sequence is used for local named objects FIDs generated
394 * by local_object_storage library */
395 FID_SEQ_LOCAL_NAME = 0x200000003ULL,
396 /* Because current FLD will only cache the fid sequence, instead
397 * of oid on the client side, if the FID needs to be exposed to
398 * clients sides, it needs to make sure all of fids under one
399 * sequence will be located in one MDT. */
400 FID_SEQ_SPECIAL = 0x200000004ULL,
401 FID_SEQ_QUOTA = 0x200000005ULL,
402 FID_SEQ_QUOTA_GLB = 0x200000006ULL,
403 FID_SEQ_ROOT = 0x200000007ULL, /* Located on MDT0 */
404 FID_SEQ_NORMAL = 0x200000400ULL,
405 FID_SEQ_LOV_DEFAULT = 0xffffffffffffffffULL
408 #define OBIF_OID_MAX_BITS 32
409 #define OBIF_MAX_OID (1ULL << OBIF_OID_MAX_BITS)
410 #define OBIF_OID_MASK ((1ULL << OBIF_OID_MAX_BITS) - 1)
411 #define IDIF_OID_MAX_BITS 48
412 #define IDIF_MAX_OID (1ULL << IDIF_OID_MAX_BITS)
413 #define IDIF_OID_MASK ((1ULL << IDIF_OID_MAX_BITS) - 1)
415 /** OID for FID_SEQ_SPECIAL */
417 /* Big Filesystem Lock to serialize rename operations */
418 FID_OID_SPECIAL_BFL = 1UL,
421 /** OID for FID_SEQ_DOT_LUSTRE */
422 enum dot_lustre_oid {
423 FID_OID_DOT_LUSTRE = 1UL,
424 FID_OID_DOT_LUSTRE_OBF = 2UL,
427 static inline int fid_seq_is_mdt0(__u64 seq)
429 return (seq == FID_SEQ_OST_MDT0);
432 static inline int fid_seq_is_mdt(const __u64 seq)
434 return seq == FID_SEQ_OST_MDT0 || seq >= FID_SEQ_NORMAL;
437 static inline int fid_seq_is_echo(__u64 seq)
439 return (seq == FID_SEQ_ECHO);
442 static inline int fid_is_echo(const struct lu_fid *fid)
444 return fid_seq_is_echo(fid_seq(fid));
447 static inline int fid_seq_is_llog(__u64 seq)
449 return (seq == FID_SEQ_LLOG);
452 static inline int fid_is_llog(const struct lu_fid *fid)
454 /* file with OID == 0 is not llog but contains last oid */
455 return fid_seq_is_llog(fid_seq(fid)) && fid_oid(fid) > 0;
458 static inline int fid_seq_is_rsvd(const __u64 seq)
460 return (seq > FID_SEQ_OST_MDT0 && seq <= FID_SEQ_RSVD);
463 static inline int fid_seq_is_special(const __u64 seq)
465 return seq == FID_SEQ_SPECIAL;
468 static inline int fid_seq_is_local_file(const __u64 seq)
470 return seq == FID_SEQ_LOCAL_FILE ||
471 seq == FID_SEQ_LOCAL_NAME;
474 static inline int fid_seq_is_root(const __u64 seq)
476 return seq == FID_SEQ_ROOT;
479 static inline int fid_seq_is_dot(const __u64 seq)
481 return seq == FID_SEQ_DOT_LUSTRE;
484 static inline int fid_seq_is_default(const __u64 seq)
486 return seq == FID_SEQ_LOV_DEFAULT;
489 static inline int fid_is_mdt0(const struct lu_fid *fid)
491 return fid_seq_is_mdt0(fid_seq(fid));
494 static inline void lu_root_fid(struct lu_fid *fid)
496 fid->f_seq = FID_SEQ_ROOT;
502 * Check if a fid is igif or not.
503 * \param fid the fid to be tested.
504 * \return true if the fid is a igif; otherwise false.
506 static inline int fid_seq_is_igif(const __u64 seq)
508 return seq >= FID_SEQ_IGIF && seq <= FID_SEQ_IGIF_MAX;
511 static inline int fid_is_igif(const struct lu_fid *fid)
513 return fid_seq_is_igif(fid_seq(fid));
517 * Check if a fid is idif or not.
518 * \param fid the fid to be tested.
519 * \return true if the fid is a idif; otherwise false.
521 static inline int fid_seq_is_idif(const __u64 seq)
523 return seq >= FID_SEQ_IDIF && seq <= FID_SEQ_IDIF_MAX;
526 static inline int fid_is_idif(const struct lu_fid *fid)
528 return fid_seq_is_idif(fid_seq(fid));
531 static inline int fid_is_local_file(const struct lu_fid *fid)
533 return fid_seq_is_local_file(fid_seq(fid));
536 static inline int fid_seq_is_norm(const __u64 seq)
538 return (seq >= FID_SEQ_NORMAL);
541 static inline int fid_is_norm(const struct lu_fid *fid)
543 return fid_seq_is_norm(fid_seq(fid));
546 /* convert an OST objid into an IDIF FID SEQ number */
547 static inline __u64 fid_idif_seq(__u64 id, __u32 ost_idx)
549 return FID_SEQ_IDIF | (ost_idx << 16) | ((id >> 32) & 0xffff);
552 /* convert a packed IDIF FID into an OST objid */
553 static inline __u64 fid_idif_id(__u64 seq, __u32 oid, __u32 ver)
555 return ((__u64)ver << 48) | ((seq & 0xffff) << 32) | oid;
558 /* extract ost index from IDIF FID */
559 static inline __u32 fid_idif_ost_idx(const struct lu_fid *fid)
561 return (fid_seq(fid) >> 16) & 0xffff;
564 /* extract OST sequence (group) from a wire ost_id (id/seq) pair */
565 static inline __u64 ostid_seq(const struct ost_id *ostid)
567 if (fid_seq_is_mdt0(ostid->oi.oi_seq))
568 return FID_SEQ_OST_MDT0;
570 if (fid_seq_is_default(ostid->oi.oi_seq))
571 return FID_SEQ_LOV_DEFAULT;
573 if (fid_is_idif(&ostid->oi_fid))
574 return FID_SEQ_OST_MDT0;
576 return fid_seq(&ostid->oi_fid);
579 /* extract OST objid from a wire ost_id (id/seq) pair */
580 static inline __u64 ostid_id(const struct ost_id *ostid)
582 if (fid_seq_is_mdt0(ostid_seq(ostid)))
583 return ostid->oi.oi_id & IDIF_OID_MASK;
585 if (fid_is_idif(&ostid->oi_fid))
586 return fid_idif_id(fid_seq(&ostid->oi_fid),
587 fid_oid(&ostid->oi_fid), 0);
589 return fid_oid(&ostid->oi_fid);
592 static inline void ostid_set_seq(struct ost_id *oi, __u64 seq)
594 if (fid_seq_is_mdt0(seq) || fid_seq_is_default(seq)) {
597 oi->oi_fid.f_seq = seq;
598 /* Note: if f_oid + f_ver is zero, we need init it
599 * to be 1, otherwise, ostid_seq will treat this
600 * as old ostid (oi_seq == 0) */
601 if (oi->oi_fid.f_oid == 0 && oi->oi_fid.f_ver == 0)
602 oi->oi_fid.f_oid = LUSTRE_FID_INIT_OID;
606 static inline void ostid_set_seq_mdt0(struct ost_id *oi)
608 ostid_set_seq(oi, FID_SEQ_OST_MDT0);
611 static inline void ostid_set_seq_echo(struct ost_id *oi)
613 ostid_set_seq(oi, FID_SEQ_ECHO);
616 static inline void ostid_set_seq_llog(struct ost_id *oi)
618 ostid_set_seq(oi, FID_SEQ_LLOG);
622 * Note: we need check oi_seq to decide where to set oi_id,
623 * so oi_seq should always be set ahead of oi_id.
625 static inline void ostid_set_id(struct ost_id *oi, __u64 oid)
627 if (fid_seq_is_mdt0(ostid_seq(oi))) {
628 if (oid >= IDIF_MAX_OID) {
629 CERROR("Bad %llu to set "DOSTID"\n",
635 if (oid > OBIF_MAX_OID) {
636 CERROR("Bad %llu to set "DOSTID"\n",
640 oi->oi_fid.f_oid = oid;
644 static inline void ostid_inc_id(struct ost_id *oi)
646 if (fid_seq_is_mdt0(ostid_seq(oi))) {
647 if (unlikely(ostid_id(oi) + 1 > IDIF_MAX_OID)) {
648 CERROR("Bad inc "DOSTID"\n", POSTID(oi));
657 static inline void ostid_dec_id(struct ost_id *oi)
659 if (fid_seq_is_mdt0(ostid_seq(oi)))
666 * Unpack an OST object id/seq (group) into a FID. This is needed for
667 * converting all obdo, lmm, lsm, etc. 64-bit id/seq pairs into proper
668 * FIDs. Note that if an id/seq is already in FID/IDIF format it will
669 * be passed through unchanged. Only legacy OST objects in "group 0"
670 * will be mapped into the IDIF namespace so that they can fit into the
671 * struct lu_fid fields without loss. For reference see:
672 * http://arch.lustre.org/index.php?title=Interoperability_fids_zfs
674 static inline int ostid_to_fid(struct lu_fid *fid, struct ost_id *ostid,
677 if (ost_idx > 0xffff) {
678 CERROR("bad ost_idx, "DOSTID" ost_idx:%u\n", POSTID(ostid),
683 if (fid_seq_is_mdt0(ostid_seq(ostid))) {
684 /* This is a "legacy" (old 1.x/2.early) OST object in "group 0"
685 * that we map into the IDIF namespace. It allows up to 2^48
686 * objects per OST, as this is the object namespace that has
687 * been in production for years. This can handle create rates
688 * of 1M objects/s/OST for 9 years, or combinations thereof. */
689 if (ostid_id(ostid) >= IDIF_MAX_OID) {
690 CERROR("bad MDT0 id, "DOSTID" ost_idx:%u\n",
691 POSTID(ostid), ost_idx);
694 fid->f_seq = fid_idif_seq(ostid_id(ostid), ost_idx);
695 /* truncate to 32 bits by assignment */
696 fid->f_oid = ostid_id(ostid);
697 /* in theory, not currently used */
698 fid->f_ver = ostid_id(ostid) >> 48;
699 } else /* if (fid_seq_is_idif(seq) || fid_seq_is_norm(seq)) */ {
700 /* This is either an IDIF object, which identifies objects across
701 * all OSTs, or a regular FID. The IDIF namespace maps legacy
702 * OST objects into the FID namespace. In both cases, we just
703 * pass the FID through, no conversion needed. */
704 if (ostid->oi_fid.f_ver != 0) {
705 CERROR("bad MDT0 id, "DOSTID" ost_idx:%u\n",
706 POSTID(ostid), ost_idx);
709 *fid = ostid->oi_fid;
715 /* pack any OST FID into an ostid (id/seq) for the wire/disk */
716 static inline int fid_to_ostid(const struct lu_fid *fid, struct ost_id *ostid)
718 if (unlikely(fid_seq_is_igif(fid->f_seq))) {
719 CERROR("bad IGIF, "DFID"\n", PFID(fid));
723 if (fid_is_idif(fid)) {
724 ostid_set_seq_mdt0(ostid);
725 ostid_set_id(ostid, fid_idif_id(fid_seq(fid), fid_oid(fid),
728 ostid->oi_fid = *fid;
734 /* Check whether the fid is for LAST_ID */
735 static inline int fid_is_last_id(const struct lu_fid *fid)
737 return (fid_oid(fid) == 0);
741 * Get inode number from a igif.
742 * \param fid a igif to get inode number from.
743 * \return inode number for the igif.
745 static inline ino_t lu_igif_ino(const struct lu_fid *fid)
750 extern void lustre_swab_ost_id(struct ost_id *oid);
753 * Get inode generation from a igif.
754 * \param fid a igif to get inode generation from.
755 * \return inode generation for the igif.
757 static inline __u32 lu_igif_gen(const struct lu_fid *fid)
763 * Build igif from the inode number/generation.
765 static inline void lu_igif_build(struct lu_fid *fid, __u32 ino, __u32 gen)
773 * Fids are transmitted across network (in the sender byte-ordering),
774 * and stored on disk in big-endian order.
776 static inline void fid_cpu_to_le(struct lu_fid *dst, const struct lu_fid *src)
778 dst->f_seq = cpu_to_le64(fid_seq(src));
779 dst->f_oid = cpu_to_le32(fid_oid(src));
780 dst->f_ver = cpu_to_le32(fid_ver(src));
783 static inline void fid_le_to_cpu(struct lu_fid *dst, const struct lu_fid *src)
785 dst->f_seq = le64_to_cpu(fid_seq(src));
786 dst->f_oid = le32_to_cpu(fid_oid(src));
787 dst->f_ver = le32_to_cpu(fid_ver(src));
790 static inline void fid_cpu_to_be(struct lu_fid *dst, const struct lu_fid *src)
792 dst->f_seq = cpu_to_be64(fid_seq(src));
793 dst->f_oid = cpu_to_be32(fid_oid(src));
794 dst->f_ver = cpu_to_be32(fid_ver(src));
797 static inline void fid_be_to_cpu(struct lu_fid *dst, const struct lu_fid *src)
799 dst->f_seq = be64_to_cpu(fid_seq(src));
800 dst->f_oid = be32_to_cpu(fid_oid(src));
801 dst->f_ver = be32_to_cpu(fid_ver(src));
804 static inline int fid_is_sane(const struct lu_fid *fid)
806 return fid != NULL &&
807 ((fid_seq(fid) >= FID_SEQ_START && fid_ver(fid) == 0) ||
808 fid_is_igif(fid) || fid_is_idif(fid) ||
809 fid_seq_is_rsvd(fid_seq(fid)));
812 static inline int fid_is_zero(const struct lu_fid *fid)
814 return fid_seq(fid) == 0 && fid_oid(fid) == 0;
817 extern void lustre_swab_lu_fid(struct lu_fid *fid);
818 extern void lustre_swab_lu_seq_range(struct lu_seq_range *range);
820 static inline int lu_fid_eq(const struct lu_fid *f0, const struct lu_fid *f1)
822 return memcmp(f0, f1, sizeof(*f0)) == 0;
825 #define __diff_normalize(val0, val1) \
827 typeof(val0) __val0 = (val0); \
828 typeof(val1) __val1 = (val1); \
830 (__val0 == __val1 ? 0 : __val0 > __val1 ? +1 : -1); \
833 static inline int lu_fid_cmp(const struct lu_fid *f0,
834 const struct lu_fid *f1)
837 __diff_normalize(fid_seq(f0), fid_seq(f1)) ?:
838 __diff_normalize(fid_oid(f0), fid_oid(f1)) ?:
839 __diff_normalize(fid_ver(f0), fid_ver(f1));
842 static inline void ostid_cpu_to_le(const struct ost_id *src_oi,
843 struct ost_id *dst_oi)
845 if (fid_seq_is_mdt0(ostid_seq(src_oi))) {
846 dst_oi->oi.oi_id = cpu_to_le64(src_oi->oi.oi_id);
847 dst_oi->oi.oi_seq = cpu_to_le64(src_oi->oi.oi_seq);
849 fid_cpu_to_le(&dst_oi->oi_fid, &src_oi->oi_fid);
853 static inline void ostid_le_to_cpu(const struct ost_id *src_oi,
854 struct ost_id *dst_oi)
856 if (fid_seq_is_mdt0(ostid_seq(src_oi))) {
857 dst_oi->oi.oi_id = le64_to_cpu(src_oi->oi.oi_id);
858 dst_oi->oi.oi_seq = le64_to_cpu(src_oi->oi.oi_seq);
860 fid_le_to_cpu(&dst_oi->oi_fid, &src_oi->oi_fid);
866 /** \defgroup lu_dir lu_dir
870 * Enumeration of possible directory entry attributes.
872 * Attributes follow directory entry header in the order they appear in this
875 enum lu_dirent_attrs {
878 LUDA_64BITHASH = 0x0004,
880 /* The following attrs are used for MDT internal only,
881 * not visible to client */
883 /* Verify the dirent consistency */
884 LUDA_VERIFY = 0x8000,
885 /* Only check but not repair the dirent inconsistency */
886 LUDA_VERIFY_DRYRUN = 0x4000,
887 /* The dirent has been repaired, or to be repaired (dryrun). */
888 LUDA_REPAIR = 0x2000,
889 /* The system is upgraded, has beed or to be repaired (dryrun). */
890 LUDA_UPGRADE = 0x1000,
891 /* Ignore this record, go to next directly. */
892 LUDA_IGNORE = 0x0800,
895 #define LU_DIRENT_ATTRS_MASK 0xf800
898 * Layout of readdir pages, as transmitted on wire.
901 /** valid if LUDA_FID is set. */
902 struct lu_fid lde_fid;
903 /** a unique entry identifier: a hash or an offset. */
905 /** total record length, including all attributes. */
909 /** optional variable size attributes following this entry.
910 * taken from enum lu_dirent_attrs.
913 /** name is followed by the attributes indicated in ->ldp_attrs, in
914 * their natural order. After the last attribute, padding bytes are
915 * added to make ->lde_reclen a multiple of 8.
921 * Definitions of optional directory entry attributes formats.
923 * Individual attributes do not have their length encoded in a generic way. It
924 * is assumed that consumer of an attribute knows its format. This means that
925 * it is impossible to skip over an unknown attribute, except by skipping over all
926 * remaining attributes (by using ->lde_reclen), which is not too
927 * constraining, because new server versions will append new attributes at
928 * the end of an entry.
932 * Fid directory attribute: a fid of an object referenced by the entry. This
933 * will be almost always requested by the client and supplied by the server.
935 * Aligned to 8 bytes.
937 /* To have compatibility with 1.8, lets have fid in lu_dirent struct. */
942 * Aligned to 2 bytes.
953 #define IFTODT(type) (((type) & S_IFMT) >> IFSHIFT)
956 #define DTTOIF(dirtype) ((dirtype) << IFSHIFT)
961 __u64 ldp_hash_start;
965 struct lu_dirent ldp_entries[0];
968 enum lu_dirpage_flags {
970 * dirpage contains no entry.
974 * last entry's lde_hash equals ldp_hash_end.
979 static inline struct lu_dirent *lu_dirent_start(struct lu_dirpage *dp)
981 if (le32_to_cpu(dp->ldp_flags) & LDF_EMPTY)
984 return dp->ldp_entries;
987 static inline struct lu_dirent *lu_dirent_next(struct lu_dirent *ent)
989 struct lu_dirent *next;
991 if (le16_to_cpu(ent->lde_reclen) != 0)
992 next = ((void *)ent) + le16_to_cpu(ent->lde_reclen);
999 static inline int lu_dirent_calc_size(int namelen, __u16 attr)
1003 if (attr & LUDA_TYPE) {
1004 const unsigned align = sizeof(struct luda_type) - 1;
1005 size = (sizeof(struct lu_dirent) + namelen + align) & ~align;
1006 size += sizeof(struct luda_type);
1008 size = sizeof(struct lu_dirent) + namelen;
1010 return (size + 7) & ~7;
1013 static inline int lu_dirent_size(struct lu_dirent *ent)
1015 if (le16_to_cpu(ent->lde_reclen) == 0) {
1016 return lu_dirent_calc_size(le16_to_cpu(ent->lde_namelen),
1017 le32_to_cpu(ent->lde_attrs));
1019 return le16_to_cpu(ent->lde_reclen);
1022 #define MDS_DIR_END_OFF 0xfffffffffffffffeULL
1025 * MDS_READPAGE page size
1027 * This is the directory page size packed in MDS_READPAGE RPC.
1028 * It's different than PAGE_CACHE_SIZE because the client needs to
1029 * access the struct lu_dirpage header packed at the beginning of
1030 * the "page" and without this there isn't any way to know find the
1031 * lu_dirpage header is if client and server PAGE_CACHE_SIZE differ.
1033 #define LU_PAGE_SHIFT 12
1034 #define LU_PAGE_SIZE (1UL << LU_PAGE_SHIFT)
1035 #define LU_PAGE_MASK (~(LU_PAGE_SIZE - 1))
1037 #define LU_PAGE_COUNT (1 << (PAGE_CACHE_SHIFT - LU_PAGE_SHIFT))
1041 struct lustre_handle {
1044 #define DEAD_HANDLE_MAGIC 0xdeadbeefcafebabeULL
1046 static inline int lustre_handle_is_used(struct lustre_handle *lh)
1048 return lh->cookie != 0ull;
1051 static inline int lustre_handle_equal(const struct lustre_handle *lh1,
1052 const struct lustre_handle *lh2)
1054 return lh1->cookie == lh2->cookie;
1057 static inline void lustre_handle_copy(struct lustre_handle *tgt,
1058 struct lustre_handle *src)
1060 tgt->cookie = src->cookie;
1063 /* flags for lm_flags */
1064 #define MSGHDR_AT_SUPPORT 0x1
1065 #define MSGHDR_CKSUM_INCOMPAT18 0x2
1067 #define lustre_msg lustre_msg_v2
1068 /* we depend on this structure to be 8-byte aligned */
1069 /* this type is only endian-adjusted in lustre_unpack_msg() */
1070 struct lustre_msg_v2 {
1079 __u32 lm_buflens[0];
1082 /* without gss, ptlrpc_body is put at the first buffer. */
1083 #define PTLRPC_NUM_VERSIONS 4
1084 #define JOBSTATS_JOBID_SIZE 32 /* 32 bytes string */
1085 struct ptlrpc_body_v3 {
1086 struct lustre_handle pb_handle;
1093 __u64 pb_last_committed;
1098 __u32 pb_timeout; /* for req, the deadline, for rep, the service est */
1099 __u32 pb_service_time; /* for rep, actual service time */
1102 /* VBR: pre-versions */
1103 __u64 pb_pre_versions[PTLRPC_NUM_VERSIONS];
1104 /* padding for future needs */
1105 __u64 pb_padding[4];
1106 char pb_jobid[JOBSTATS_JOBID_SIZE];
1108 #define ptlrpc_body ptlrpc_body_v3
1110 struct ptlrpc_body_v2 {
1111 struct lustre_handle pb_handle;
1118 __u64 pb_last_committed;
1123 __u32 pb_timeout; /* for req, the deadline, for rep, the service est */
1124 __u32 pb_service_time; /* for rep, actual service time, also used for
1125 net_latency of req */
1128 /* VBR: pre-versions */
1129 __u64 pb_pre_versions[PTLRPC_NUM_VERSIONS];
1130 /* padding for future needs */
1131 __u64 pb_padding[4];
1134 extern void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb);
1136 /* message body offset for lustre_msg_v2 */
1137 /* ptlrpc body offset in all request/reply messages */
1138 #define MSG_PTLRPC_BODY_OFF 0
1140 /* normal request/reply message record offset */
1141 #define REQ_REC_OFF 1
1142 #define REPLY_REC_OFF 1
1144 /* ldlm request message body offset */
1145 #define DLM_LOCKREQ_OFF 1 /* lockreq offset */
1146 #define DLM_REQ_REC_OFF 2 /* normal dlm request record offset */
1148 /* ldlm intent lock message body offset */
1149 #define DLM_INTENT_IT_OFF 2 /* intent lock it offset */
1150 #define DLM_INTENT_REC_OFF 3 /* intent lock record offset */
1152 /* ldlm reply message body offset */
1153 #define DLM_LOCKREPLY_OFF 1 /* lockrep offset */
1154 #define DLM_REPLY_REC_OFF 2 /* reply record offset */
1156 /** only use in req->rq_{req,rep}_swab_mask */
1157 #define MSG_PTLRPC_HEADER_OFF 31
1159 /* Flags that are operation-specific go in the top 16 bits. */
1160 #define MSG_OP_FLAG_MASK 0xffff0000
1161 #define MSG_OP_FLAG_SHIFT 16
1163 /* Flags that apply to all requests are in the bottom 16 bits */
1164 #define MSG_GEN_FLAG_MASK 0x0000ffff
1165 #define MSG_LAST_REPLAY 0x0001
1166 #define MSG_RESENT 0x0002
1167 #define MSG_REPLAY 0x0004
1168 /* #define MSG_AT_SUPPORT 0x0008
1169 * This was used in early prototypes of adaptive timeouts, and while there
1170 * shouldn't be any users of that code there also isn't a need for using this
1171 * bits. Defer usage until at least 1.10 to avoid potential conflict. */
1172 #define MSG_DELAY_REPLAY 0x0010
1173 #define MSG_VERSION_REPLAY 0x0020
1174 #define MSG_REQ_REPLAY_DONE 0x0040
1175 #define MSG_LOCK_REPLAY_DONE 0x0080
1178 * Flags for all connect opcodes (MDS_CONNECT, OST_CONNECT)
1181 #define MSG_CONNECT_RECOVERING 0x00000001
1182 #define MSG_CONNECT_RECONNECT 0x00000002
1183 #define MSG_CONNECT_REPLAYABLE 0x00000004
1184 //#define MSG_CONNECT_PEER 0x8
1185 #define MSG_CONNECT_LIBCLIENT 0x00000010
1186 #define MSG_CONNECT_INITIAL 0x00000020
1187 #define MSG_CONNECT_ASYNC 0x00000040
1188 #define MSG_CONNECT_NEXT_VER 0x00000080 /* use next version of lustre_msg */
1189 #define MSG_CONNECT_TRANSNO 0x00000100 /* report transno */
1192 #define OBD_CONNECT_RDONLY 0x1ULL /*client has read-only access*/
1193 #define OBD_CONNECT_INDEX 0x2ULL /*connect specific LOV idx */
1194 #define OBD_CONNECT_MDS 0x4ULL /*connect from MDT to OST */
1195 #define OBD_CONNECT_GRANT 0x8ULL /*OSC gets grant at connect */
1196 #define OBD_CONNECT_SRVLOCK 0x10ULL /*server takes locks for cli */
1197 #define OBD_CONNECT_VERSION 0x20ULL /*Lustre versions in ocd */
1198 #define OBD_CONNECT_REQPORTAL 0x40ULL /*Separate non-IO req portal */
1199 #define OBD_CONNECT_ACL 0x80ULL /*access control lists */
1200 #define OBD_CONNECT_XATTR 0x100ULL /*client use extended attr */
1201 #define OBD_CONNECT_CROW 0x200ULL /*MDS+OST create obj on write*/
1202 #define OBD_CONNECT_TRUNCLOCK 0x400ULL /*locks on server for punch */
1203 #define OBD_CONNECT_TRANSNO 0x800ULL /*replay sends init transno */
1204 #define OBD_CONNECT_IBITS 0x1000ULL /*support for inodebits locks*/
1205 #define OBD_CONNECT_JOIN 0x2000ULL /*files can be concatenated.
1206 *We do not support JOIN FILE
1207 *anymore, reserve this flags
1208 *just for preventing such bit
1210 #define OBD_CONNECT_ATTRFID 0x4000ULL /*Server can GetAttr By Fid*/
1211 #define OBD_CONNECT_NODEVOH 0x8000ULL /*No open hndl on specl nodes*/
1212 #define OBD_CONNECT_RMT_CLIENT 0x10000ULL /*Remote client */
1213 #define OBD_CONNECT_RMT_CLIENT_FORCE 0x20000ULL /*Remote client by force */
1214 #define OBD_CONNECT_BRW_SIZE 0x40000ULL /*Max bytes per rpc */
1215 #define OBD_CONNECT_QUOTA64 0x80000ULL /*Not used since 2.4 */
1216 #define OBD_CONNECT_MDS_CAPA 0x100000ULL /*MDS capability */
1217 #define OBD_CONNECT_OSS_CAPA 0x200000ULL /*OSS capability */
1218 #define OBD_CONNECT_CANCELSET 0x400000ULL /*Early batched cancels. */
1219 #define OBD_CONNECT_SOM 0x800000ULL /*Size on MDS */
1220 #define OBD_CONNECT_AT 0x1000000ULL /*client uses AT */
1221 #define OBD_CONNECT_LRU_RESIZE 0x2000000ULL /*LRU resize feature. */
1222 #define OBD_CONNECT_MDS_MDS 0x4000000ULL /*MDS-MDS connection */
1223 #define OBD_CONNECT_REAL 0x8000000ULL /*real connection */
1224 #define OBD_CONNECT_CHANGE_QS 0x10000000ULL /*Not used since 2.4 */
1225 #define OBD_CONNECT_CKSUM 0x20000000ULL /*support several cksum algos*/
1226 #define OBD_CONNECT_FID 0x40000000ULL /*FID is supported by server */
1227 #define OBD_CONNECT_VBR 0x80000000ULL /*version based recovery */
1228 #define OBD_CONNECT_LOV_V3 0x100000000ULL /*client supports LOV v3 EA */
1229 #define OBD_CONNECT_GRANT_SHRINK 0x200000000ULL /* support grant shrink */
1230 #define OBD_CONNECT_SKIP_ORPHAN 0x400000000ULL /* don't reuse orphan objids */
1231 #define OBD_CONNECT_MAX_EASIZE 0x800000000ULL /* preserved for large EA */
1232 #define OBD_CONNECT_FULL20 0x1000000000ULL /* it is 2.0 client */
1233 #define OBD_CONNECT_LAYOUTLOCK 0x2000000000ULL /* client uses layout lock */
1234 #define OBD_CONNECT_64BITHASH 0x4000000000ULL /* client supports 64-bits
1236 #define OBD_CONNECT_MAXBYTES 0x8000000000ULL /* max stripe size */
1237 #define OBD_CONNECT_IMP_RECOV 0x10000000000ULL /* imp recovery support */
1238 #define OBD_CONNECT_JOBSTATS 0x20000000000ULL /* jobid in ptlrpc_body */
1239 #define OBD_CONNECT_UMASK 0x40000000000ULL /* create uses client umask */
1240 #define OBD_CONNECT_EINPROGRESS 0x80000000000ULL /* client handles -EINPROGRESS
1241 * RPC error properly */
1242 #define OBD_CONNECT_GRANT_PARAM 0x100000000000ULL/* extra grant params used for
1243 * finer space reservation */
1244 #define OBD_CONNECT_FLOCK_OWNER 0x200000000000ULL /* for the fixed 1.8
1245 * policy and 2.x server */
1246 #define OBD_CONNECT_LVB_TYPE 0x400000000000ULL /* variable type of LVB */
1247 #define OBD_CONNECT_NANOSEC_TIME 0x800000000000ULL /* nanosecond timestamps */
1248 #define OBD_CONNECT_LIGHTWEIGHT 0x1000000000000ULL/* lightweight connection */
1249 #define OBD_CONNECT_SHORTIO 0x2000000000000ULL/* short io */
1250 #define OBD_CONNECT_PINGLESS 0x4000000000000ULL/* pings not required */
1251 #define OBD_CONNECT_FLOCK_DEAD 0x8000000000000ULL/* flock deadlock detection */
1252 #define OBD_CONNECT_DISP_STRIPE 0x10000000000000ULL/*create stripe disposition*/
1255 * Please DO NOT add flag values here before first ensuring that this same
1256 * flag value is not in use on some other branch. Please clear any such
1257 * changes with senior engineers before starting to use a new flag. Then,
1258 * submit a small patch against EVERY branch that ONLY adds the new flag,
1259 * updates obd_connect_names[] for lprocfs_rd_connect_flags(), adds the
1260 * flag to check_obd_connect_data(), and updates wiretests accordingly, so it
1261 * can be approved and landed easily to reserve the flag for future use. */
1263 /* The MNE_SWAB flag is overloading the MDS_MDS bit only for the MGS
1264 * connection. It is a temporary bug fix for Imperative Recovery interop
1265 * between 2.2 and 2.3 x86/ppc nodes, and can be removed when interop for
1266 * 2.2 clients/servers is no longer needed. LU-1252/LU-1644. */
1267 #define OBD_CONNECT_MNE_SWAB OBD_CONNECT_MDS_MDS
1269 #define OCD_HAS_FLAG(ocd, flg) \
1270 (!!((ocd)->ocd_connect_flags & OBD_CONNECT_##flg))
1273 #define LRU_RESIZE_CONNECT_FLAG OBD_CONNECT_LRU_RESIZE
1275 #define MDT_CONNECT_SUPPORTED (OBD_CONNECT_RDONLY | OBD_CONNECT_VERSION | \
1276 OBD_CONNECT_ACL | OBD_CONNECT_XATTR | \
1277 OBD_CONNECT_IBITS | \
1278 OBD_CONNECT_NODEVOH | OBD_CONNECT_ATTRFID | \
1279 OBD_CONNECT_CANCELSET | OBD_CONNECT_AT | \
1280 OBD_CONNECT_RMT_CLIENT | \
1281 OBD_CONNECT_RMT_CLIENT_FORCE | \
1282 OBD_CONNECT_BRW_SIZE | OBD_CONNECT_MDS_CAPA | \
1283 OBD_CONNECT_OSS_CAPA | OBD_CONNECT_MDS_MDS | \
1284 OBD_CONNECT_FID | LRU_RESIZE_CONNECT_FLAG | \
1285 OBD_CONNECT_VBR | OBD_CONNECT_LOV_V3 | \
1286 OBD_CONNECT_SOM | OBD_CONNECT_FULL20 | \
1287 OBD_CONNECT_64BITHASH | OBD_CONNECT_JOBSTATS | \
1288 OBD_CONNECT_EINPROGRESS | \
1289 OBD_CONNECT_LIGHTWEIGHT | OBD_CONNECT_UMASK | \
1290 OBD_CONNECT_LVB_TYPE | OBD_CONNECT_LAYOUTLOCK |\
1291 OBD_CONNECT_PINGLESS | OBD_CONNECT_MAX_EASIZE |\
1292 OBD_CONNECT_FLOCK_DEAD | \
1293 OBD_CONNECT_DISP_STRIPE)
1295 #define OST_CONNECT_SUPPORTED (OBD_CONNECT_SRVLOCK | OBD_CONNECT_GRANT | \
1296 OBD_CONNECT_REQPORTAL | OBD_CONNECT_VERSION | \
1297 OBD_CONNECT_TRUNCLOCK | OBD_CONNECT_INDEX | \
1298 OBD_CONNECT_BRW_SIZE | OBD_CONNECT_OSS_CAPA | \
1299 OBD_CONNECT_CANCELSET | OBD_CONNECT_AT | \
1300 LRU_RESIZE_CONNECT_FLAG | OBD_CONNECT_CKSUM | \
1301 OBD_CONNECT_RMT_CLIENT | \
1302 OBD_CONNECT_RMT_CLIENT_FORCE | OBD_CONNECT_VBR | \
1303 OBD_CONNECT_MDS | OBD_CONNECT_SKIP_ORPHAN | \
1304 OBD_CONNECT_GRANT_SHRINK | OBD_CONNECT_FULL20 | \
1305 OBD_CONNECT_64BITHASH | OBD_CONNECT_MAXBYTES | \
1306 OBD_CONNECT_MAX_EASIZE | \
1307 OBD_CONNECT_EINPROGRESS | \
1308 OBD_CONNECT_JOBSTATS | \
1309 OBD_CONNECT_LIGHTWEIGHT | OBD_CONNECT_LVB_TYPE|\
1310 OBD_CONNECT_LAYOUTLOCK | OBD_CONNECT_FID | \
1311 OBD_CONNECT_PINGLESS)
1312 #define ECHO_CONNECT_SUPPORTED (0)
1313 #define MGS_CONNECT_SUPPORTED (OBD_CONNECT_VERSION | OBD_CONNECT_AT | \
1314 OBD_CONNECT_FULL20 | OBD_CONNECT_IMP_RECOV | \
1315 OBD_CONNECT_MNE_SWAB | OBD_CONNECT_PINGLESS)
1317 /* Features required for this version of the client to work with server */
1318 #define CLIENT_CONNECT_MDT_REQD (OBD_CONNECT_IBITS | OBD_CONNECT_FID | \
1321 #define OBD_OCD_VERSION(major, minor, patch, fix) (((major)<<24) + \
1323 ((patch)<<8) + (fix))
1324 #define OBD_OCD_VERSION_MAJOR(version) ((int)((version)>>24)&255)
1325 #define OBD_OCD_VERSION_MINOR(version) ((int)((version)>>16)&255)
1326 #define OBD_OCD_VERSION_PATCH(version) ((int)((version)>>8)&255)
1327 #define OBD_OCD_VERSION_FIX(version) ((int)(version)&255)
1329 /* This structure is used for both request and reply.
1331 * If we eventually have separate connect data for different types, which we
1332 * almost certainly will, then perhaps we stick a union in here. */
1333 struct obd_connect_data_v1 {
1334 __u64 ocd_connect_flags; /* OBD_CONNECT_* per above */
1335 __u32 ocd_version; /* lustre release version number */
1336 __u32 ocd_grant; /* initial cache grant amount (bytes) */
1337 __u32 ocd_index; /* LOV index to connect to */
1338 __u32 ocd_brw_size; /* Maximum BRW size in bytes, must be 2^n */
1339 __u64 ocd_ibits_known; /* inode bits this client understands */
1340 __u8 ocd_blocksize; /* log2 of the backend filesystem blocksize */
1341 __u8 ocd_inodespace; /* log2 of the per-inode space consumption */
1342 __u16 ocd_grant_extent; /* per-extent grant overhead, in 1K blocks */
1343 __u32 ocd_unused; /* also fix lustre_swab_connect */
1344 __u64 ocd_transno; /* first transno from client to be replayed */
1345 __u32 ocd_group; /* MDS group on OST */
1346 __u32 ocd_cksum_types; /* supported checksum algorithms */
1347 __u32 ocd_max_easize; /* How big LOV EA can be on MDS */
1348 __u32 ocd_instance; /* also fix lustre_swab_connect */
1349 __u64 ocd_maxbytes; /* Maximum stripe size in bytes */
1352 struct obd_connect_data {
1353 __u64 ocd_connect_flags; /* OBD_CONNECT_* per above */
1354 __u32 ocd_version; /* lustre release version number */
1355 __u32 ocd_grant; /* initial cache grant amount (bytes) */
1356 __u32 ocd_index; /* LOV index to connect to */
1357 __u32 ocd_brw_size; /* Maximum BRW size in bytes */
1358 __u64 ocd_ibits_known; /* inode bits this client understands */
1359 __u8 ocd_blocksize; /* log2 of the backend filesystem blocksize */
1360 __u8 ocd_inodespace; /* log2 of the per-inode space consumption */
1361 __u16 ocd_grant_extent; /* per-extent grant overhead, in 1K blocks */
1362 __u32 ocd_unused; /* also fix lustre_swab_connect */
1363 __u64 ocd_transno; /* first transno from client to be replayed */
1364 __u32 ocd_group; /* MDS group on OST */
1365 __u32 ocd_cksum_types; /* supported checksum algorithms */
1366 __u32 ocd_max_easize; /* How big LOV EA can be on MDS */
1367 __u32 ocd_instance; /* instance # of this target */
1368 __u64 ocd_maxbytes; /* Maximum stripe size in bytes */
1369 /* Fields after ocd_maxbytes are only accessible by the receiver
1370 * if the corresponding flag in ocd_connect_flags is set. Accessing
1371 * any field after ocd_maxbytes on the receiver without a valid flag
1372 * may result in out-of-bound memory access and kernel oops. */
1373 __u64 padding1; /* added 2.1.0. also fix lustre_swab_connect */
1374 __u64 padding2; /* added 2.1.0. also fix lustre_swab_connect */
1375 __u64 padding3; /* added 2.1.0. also fix lustre_swab_connect */
1376 __u64 padding4; /* added 2.1.0. also fix lustre_swab_connect */
1377 __u64 padding5; /* added 2.1.0. also fix lustre_swab_connect */
1378 __u64 padding6; /* added 2.1.0. also fix lustre_swab_connect */
1379 __u64 padding7; /* added 2.1.0. also fix lustre_swab_connect */
1380 __u64 padding8; /* added 2.1.0. also fix lustre_swab_connect */
1381 __u64 padding9; /* added 2.1.0. also fix lustre_swab_connect */
1382 __u64 paddingA; /* added 2.1.0. also fix lustre_swab_connect */
1383 __u64 paddingB; /* added 2.1.0. also fix lustre_swab_connect */
1384 __u64 paddingC; /* added 2.1.0. also fix lustre_swab_connect */
1385 __u64 paddingD; /* added 2.1.0. also fix lustre_swab_connect */
1386 __u64 paddingE; /* added 2.1.0. also fix lustre_swab_connect */
1387 __u64 paddingF; /* added 2.1.0. also fix lustre_swab_connect */
1390 * Please DO NOT use any fields here before first ensuring that this same
1391 * field is not in use on some other branch. Please clear any such changes
1392 * with senior engineers before starting to use a new field. Then, submit
1393 * a small patch against EVERY branch that ONLY adds the new field along with
1394 * the matching OBD_CONNECT flag, so that can be approved and landed easily to
1395 * reserve the flag for future use. */
1398 extern void lustre_swab_connect(struct obd_connect_data *ocd);
1401 * Supported checksum algorithms. Up to 32 checksum types are supported.
1402 * (32-bit mask stored in obd_connect_data::ocd_cksum_types)
1403 * Please update DECLARE_CKSUM_NAME/OBD_CKSUM_ALL in obd.h when adding a new
1404 * algorithm and also the OBD_FL_CKSUM* flags.
1407 OBD_CKSUM_CRC32 = 0x00000001,
1408 OBD_CKSUM_ADLER = 0x00000002,
1409 OBD_CKSUM_CRC32C= 0x00000004,
1413 * OST requests: OBDO & OBD request records
1418 OST_REPLY = 0, /* reply ? */
1434 OST_QUOTACHECK = 18,
1436 OST_QUOTA_ADJUST_QUNIT = 20, /* not used since 2.4 */
1439 #define OST_FIRST_OPC OST_REPLY
1442 OBD_FL_INLINEDATA = 0x00000001,
1443 OBD_FL_OBDMDEXISTS = 0x00000002,
1444 OBD_FL_DELORPHAN = 0x00000004, /* if set in o_flags delete orphans */
1445 OBD_FL_NORPC = 0x00000008, /* set in o_flags do in OSC not OST */
1446 OBD_FL_IDONLY = 0x00000010, /* set in o_flags only adjust obj id*/
1447 OBD_FL_RECREATE_OBJS= 0x00000020, /* recreate missing obj */
1448 OBD_FL_DEBUG_CHECK = 0x00000040, /* echo client/server debug check */
1449 OBD_FL_NO_USRQUOTA = 0x00000100, /* the object's owner is over quota */
1450 OBD_FL_NO_GRPQUOTA = 0x00000200, /* the object's group is over quota */
1451 OBD_FL_CREATE_CROW = 0x00000400, /* object should be create on write */
1452 OBD_FL_SRVLOCK = 0x00000800, /* delegate DLM locking to server */
1453 OBD_FL_CKSUM_CRC32 = 0x00001000, /* CRC32 checksum type */
1454 OBD_FL_CKSUM_ADLER = 0x00002000, /* ADLER checksum type */
1455 OBD_FL_CKSUM_CRC32C = 0x00004000, /* CRC32C checksum type */
1456 OBD_FL_CKSUM_RSVD2 = 0x00008000, /* for future cksum types */
1457 OBD_FL_CKSUM_RSVD3 = 0x00010000, /* for future cksum types */
1458 OBD_FL_SHRINK_GRANT = 0x00020000, /* object shrink the grant */
1459 OBD_FL_MMAP = 0x00040000, /* object is mmapped on the client.
1460 * XXX: obsoleted - reserved for old
1461 * clients prior than 2.2 */
1462 OBD_FL_RECOV_RESEND = 0x00080000, /* recoverable resent */
1463 OBD_FL_NOSPC_BLK = 0x00100000, /* no more block space on OST */
1465 /* Note that while these checksum values are currently separate bits,
1466 * in 2.x we can actually allow all values from 1-31 if we wanted. */
1467 OBD_FL_CKSUM_ALL = OBD_FL_CKSUM_CRC32 | OBD_FL_CKSUM_ADLER |
1468 OBD_FL_CKSUM_CRC32C,
1470 /* mask for local-only flag, which won't be sent over network */
1471 OBD_FL_LOCAL_MASK = 0xF0000000,
1474 #define LOV_MAGIC_V1 0x0BD10BD0
1475 #define LOV_MAGIC LOV_MAGIC_V1
1476 #define LOV_MAGIC_JOIN_V1 0x0BD20BD0
1477 #define LOV_MAGIC_V3 0x0BD30BD0
1480 * magic for fully defined striping
1481 * the idea is that we should have different magics for striping "hints"
1482 * (struct lov_user_md_v[13]) and defined ready-to-use striping (struct
1483 * lov_mds_md_v[13]). at the moment the magics are used in wire protocol,
1484 * we can't just change it w/o long way preparation, but we still need a
1485 * mechanism to allow LOD to differentiate hint versus ready striping.
1486 * so, at the moment we do a trick: MDT knows what to expect from request
1487 * depending on the case (replay uses ready striping, non-replay req uses
1488 * hints), so MDT replaces magic with appropriate one and now LOD can
1489 * easily understand what's inside -bzzz
1491 #define LOV_MAGIC_V1_DEF 0x0CD10BD0
1492 #define LOV_MAGIC_V3_DEF 0x0CD30BD0
1494 #define LOV_PATTERN_RAID0 0x001 /* stripes are used round-robin */
1495 #define LOV_PATTERN_RAID1 0x002 /* stripes are mirrors of each other */
1496 #define LOV_PATTERN_FIRST 0x100 /* first stripe is not in round-robin */
1497 #define LOV_PATTERN_CMOBD 0x200
1499 #define LOV_PATTERN_F_MASK 0xffff0000
1500 #define LOV_PATTERN_F_RELEASED 0x80000000 /* HSM released file */
1502 #define lov_pattern(pattern) (pattern & ~LOV_PATTERN_F_MASK)
1503 #define lov_pattern_flags(pattern) (pattern & LOV_PATTERN_F_MASK)
1505 #define lov_ost_data lov_ost_data_v1
1506 struct lov_ost_data_v1 { /* per-stripe data structure (little-endian)*/
1507 struct ost_id l_ost_oi; /* OST object ID */
1508 __u32 l_ost_gen; /* generation of this l_ost_idx */
1509 __u32 l_ost_idx; /* OST index in LOV (lov_tgt_desc->tgts) */
1512 #define lov_mds_md lov_mds_md_v1
1513 struct lov_mds_md_v1 { /* LOV EA mds/wire data (little-endian) */
1514 __u32 lmm_magic; /* magic number = LOV_MAGIC_V1 */
1515 __u32 lmm_pattern; /* LOV_PATTERN_RAID0, LOV_PATTERN_RAID1 */
1516 struct ost_id lmm_oi; /* LOV object ID */
1517 __u32 lmm_stripe_size; /* size of stripe in bytes */
1518 /* lmm_stripe_count used to be __u32 */
1519 __u16 lmm_stripe_count; /* num stripes in use for this object */
1520 __u16 lmm_layout_gen; /* layout generation number */
1521 struct lov_ost_data_v1 lmm_objects[0]; /* per-stripe data */
1525 * Sigh, because pre-2.4 uses
1526 * struct lov_mds_md_v1 {
1528 * __u64 lmm_object_id;
1529 * __u64 lmm_object_seq;
1532 * to identify the LOV(MDT) object, and lmm_object_seq will
1533 * be normal_fid, which make it hard to combine these conversion
1534 * to ostid_to FID. so we will do lmm_oi/fid conversion separately
1536 * We can tell the lmm_oi by this way,
1537 * 1.8: lmm_object_id = {inode}, lmm_object_gr = 0
1538 * 2.1: lmm_object_id = {oid < 128k}, lmm_object_seq = FID_SEQ_NORMAL
1539 * 2.4: lmm_oi.f_seq = FID_SEQ_NORMAL, lmm_oi.f_oid = {oid < 128k},
1542 * But currently lmm_oi/lsm_oi does not have any "real" usages,
1543 * except for printing some information, and the user can always
1544 * get the real FID from LMA, besides this multiple case check might
1545 * make swab more complicate. So we will keep using id/seq for lmm_oi.
1548 static inline void fid_to_lmm_oi(const struct lu_fid *fid,
1551 oi->oi.oi_id = fid_oid(fid);
1552 oi->oi.oi_seq = fid_seq(fid);
1555 static inline void lmm_oi_set_seq(struct ost_id *oi, __u64 seq)
1557 oi->oi.oi_seq = seq;
1560 static inline __u64 lmm_oi_id(struct ost_id *oi)
1562 return oi->oi.oi_id;
1565 static inline __u64 lmm_oi_seq(struct ost_id *oi)
1567 return oi->oi.oi_seq;
1570 static inline void lmm_oi_le_to_cpu(struct ost_id *dst_oi,
1571 struct ost_id *src_oi)
1573 dst_oi->oi.oi_id = le64_to_cpu(src_oi->oi.oi_id);
1574 dst_oi->oi.oi_seq = le64_to_cpu(src_oi->oi.oi_seq);
1577 static inline void lmm_oi_cpu_to_le(struct ost_id *dst_oi,
1578 struct ost_id *src_oi)
1580 dst_oi->oi.oi_id = cpu_to_le64(src_oi->oi.oi_id);
1581 dst_oi->oi.oi_seq = cpu_to_le64(src_oi->oi.oi_seq);
1584 /* extern void lustre_swab_lov_mds_md(struct lov_mds_md *llm); */
1586 #define MAX_MD_SIZE \
1587 (sizeof(struct lov_mds_md) + 4 * sizeof(struct lov_ost_data))
1588 #define MIN_MD_SIZE \
1589 (sizeof(struct lov_mds_md) + 1 * sizeof(struct lov_ost_data))
1591 #define XATTR_NAME_ACL_ACCESS "system.posix_acl_access"
1592 #define XATTR_NAME_ACL_DEFAULT "system.posix_acl_default"
1593 #define XATTR_USER_PREFIX "user."
1594 #define XATTR_TRUSTED_PREFIX "trusted."
1595 #define XATTR_SECURITY_PREFIX "security."
1596 #define XATTR_LUSTRE_PREFIX "lustre."
1598 #define XATTR_NAME_LOV "trusted.lov"
1599 #define XATTR_NAME_LMA "trusted.lma"
1600 #define XATTR_NAME_LMV "trusted.lmv"
1601 #define XATTR_NAME_LINK "trusted.link"
1602 #define XATTR_NAME_FID "trusted.fid"
1603 #define XATTR_NAME_VERSION "trusted.version"
1604 #define XATTR_NAME_SOM "trusted.som"
1605 #define XATTR_NAME_HSM "trusted.hsm"
1606 #define XATTR_NAME_LFSCK_NAMESPACE "trusted.lfsck_namespace"
1608 struct lov_mds_md_v3 { /* LOV EA mds/wire data (little-endian) */
1609 __u32 lmm_magic; /* magic number = LOV_MAGIC_V3 */
1610 __u32 lmm_pattern; /* LOV_PATTERN_RAID0, LOV_PATTERN_RAID1 */
1611 struct ost_id lmm_oi; /* LOV object ID */
1612 __u32 lmm_stripe_size; /* size of stripe in bytes */
1613 /* lmm_stripe_count used to be __u32 */
1614 __u16 lmm_stripe_count; /* num stripes in use for this object */
1615 __u16 lmm_layout_gen; /* layout generation number */
1616 char lmm_pool_name[LOV_MAXPOOLNAME]; /* must be 32bit aligned */
1617 struct lov_ost_data_v1 lmm_objects[0]; /* per-stripe data */
1620 static inline __u32 lov_mds_md_size(__u16 stripes, __u32 lmm_magic)
1622 if (lmm_magic == LOV_MAGIC_V3)
1623 return sizeof(struct lov_mds_md_v3) +
1624 stripes * sizeof(struct lov_ost_data_v1);
1626 return sizeof(struct lov_mds_md_v1) +
1627 stripes * sizeof(struct lov_ost_data_v1);
1631 lov_mds_md_max_stripe_count(size_t buf_size, __u32 lmm_magic)
1633 switch (lmm_magic) {
1634 case LOV_MAGIC_V1: {
1635 struct lov_mds_md_v1 lmm;
1637 if (buf_size < sizeof(lmm))
1640 return (buf_size - sizeof(lmm)) / sizeof(lmm.lmm_objects[0]);
1642 case LOV_MAGIC_V3: {
1643 struct lov_mds_md_v3 lmm;
1645 if (buf_size < sizeof(lmm))
1648 return (buf_size - sizeof(lmm)) / sizeof(lmm.lmm_objects[0]);
1655 #define OBD_MD_FLID (0x00000001ULL) /* object ID */
1656 #define OBD_MD_FLATIME (0x00000002ULL) /* access time */
1657 #define OBD_MD_FLMTIME (0x00000004ULL) /* data modification time */
1658 #define OBD_MD_FLCTIME (0x00000008ULL) /* change time */
1659 #define OBD_MD_FLSIZE (0x00000010ULL) /* size */
1660 #define OBD_MD_FLBLOCKS (0x00000020ULL) /* allocated blocks count */
1661 #define OBD_MD_FLBLKSZ (0x00000040ULL) /* block size */
1662 #define OBD_MD_FLMODE (0x00000080ULL) /* access bits (mode & ~S_IFMT) */
1663 #define OBD_MD_FLTYPE (0x00000100ULL) /* object type (mode & S_IFMT) */
1664 #define OBD_MD_FLUID (0x00000200ULL) /* user ID */
1665 #define OBD_MD_FLGID (0x00000400ULL) /* group ID */
1666 #define OBD_MD_FLFLAGS (0x00000800ULL) /* flags word */
1667 #define OBD_MD_FLNLINK (0x00002000ULL) /* link count */
1668 #define OBD_MD_FLGENER (0x00004000ULL) /* generation number */
1669 /*#define OBD_MD_FLINLINE (0x00008000ULL) inline data. used until 1.6.5 */
1670 #define OBD_MD_FLRDEV (0x00010000ULL) /* device number */
1671 #define OBD_MD_FLEASIZE (0x00020000ULL) /* extended attribute data */
1672 #define OBD_MD_LINKNAME (0x00040000ULL) /* symbolic link target */
1673 #define OBD_MD_FLHANDLE (0x00080000ULL) /* file/lock handle */
1674 #define OBD_MD_FLCKSUM (0x00100000ULL) /* bulk data checksum */
1675 #define OBD_MD_FLQOS (0x00200000ULL) /* quality of service stats */
1676 /*#define OBD_MD_FLOSCOPQ (0x00400000ULL) osc opaque data, never used */
1677 #define OBD_MD_FLCOOKIE (0x00800000ULL) /* log cancellation cookie */
1678 #define OBD_MD_FLGROUP (0x01000000ULL) /* group */
1679 #define OBD_MD_FLFID (0x02000000ULL) /* ->ost write inline fid */
1680 #define OBD_MD_FLEPOCH (0x04000000ULL) /* ->ost write with ioepoch */
1681 /* ->mds if epoch opens or closes */
1682 #define OBD_MD_FLGRANT (0x08000000ULL) /* ost preallocation space grant */
1683 #define OBD_MD_FLDIREA (0x10000000ULL) /* dir's extended attribute data */
1684 #define OBD_MD_FLUSRQUOTA (0x20000000ULL) /* over quota flags sent from ost */
1685 #define OBD_MD_FLGRPQUOTA (0x40000000ULL) /* over quota flags sent from ost */
1686 #define OBD_MD_FLMODEASIZE (0x80000000ULL) /* EA size will be changed */
1688 #define OBD_MD_MDS (0x0000000100000000ULL) /* where an inode lives on */
1689 #define OBD_MD_REINT (0x0000000200000000ULL) /* reintegrate oa */
1690 #define OBD_MD_MEA (0x0000000400000000ULL) /* CMD split EA */
1691 #define OBD_MD_TSTATE (0x0000000800000000ULL) /* transient state field */
1693 #define OBD_MD_FLXATTR (0x0000001000000000ULL) /* xattr */
1694 #define OBD_MD_FLXATTRLS (0x0000002000000000ULL) /* xattr list */
1695 #define OBD_MD_FLXATTRRM (0x0000004000000000ULL) /* xattr remove */
1696 #define OBD_MD_FLACL (0x0000008000000000ULL) /* ACL */
1697 #define OBD_MD_FLRMTPERM (0x0000010000000000ULL) /* remote permission */
1698 #define OBD_MD_FLMDSCAPA (0x0000020000000000ULL) /* MDS capability */
1699 #define OBD_MD_FLOSSCAPA (0x0000040000000000ULL) /* OSS capability */
1700 #define OBD_MD_FLCKSPLIT (0x0000080000000000ULL) /* Check split on server */
1701 #define OBD_MD_FLCROSSREF (0x0000100000000000ULL) /* Cross-ref case */
1702 #define OBD_MD_FLGETATTRLOCK (0x0000200000000000ULL) /* Get IOEpoch attributes
1703 * under lock; for xattr
1704 * requests means the
1705 * client holds the lock */
1706 #define OBD_MD_FLOBJCOUNT (0x0000400000000000ULL) /* for multiple destroy */
1708 #define OBD_MD_FLRMTLSETFACL (0x0001000000000000ULL) /* lfs lsetfacl case */
1709 #define OBD_MD_FLRMTLGETFACL (0x0002000000000000ULL) /* lfs lgetfacl case */
1710 #define OBD_MD_FLRMTRSETFACL (0x0004000000000000ULL) /* lfs rsetfacl case */
1711 #define OBD_MD_FLRMTRGETFACL (0x0008000000000000ULL) /* lfs rgetfacl case */
1713 #define OBD_MD_FLDATAVERSION (0x0010000000000000ULL) /* iversion sum */
1714 #define OBD_MD_FLRELEASED (0x0020000000000000ULL) /* file released */
1716 #define OBD_MD_FLGETATTR (OBD_MD_FLID | OBD_MD_FLATIME | OBD_MD_FLMTIME | \
1717 OBD_MD_FLCTIME | OBD_MD_FLSIZE | OBD_MD_FLBLKSZ | \
1718 OBD_MD_FLMODE | OBD_MD_FLTYPE | OBD_MD_FLUID | \
1719 OBD_MD_FLGID | OBD_MD_FLFLAGS | OBD_MD_FLNLINK | \
1720 OBD_MD_FLGENER | OBD_MD_FLRDEV | OBD_MD_FLGROUP)
1722 #define OBD_MD_FLXATTRALL (OBD_MD_FLXATTR | OBD_MD_FLXATTRLS)
1724 /* don't forget obdo_fid which is way down at the bottom so it can
1725 * come after the definition of llog_cookie */
1729 HSS_CLEARMASK = 0x02,
1730 HSS_ARCHIVE_ID = 0x04,
1733 struct hsm_state_set {
1735 __u32 hss_archive_id;
1737 __u64 hss_clearmask;
1740 extern void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
1741 extern void lustre_swab_hsm_state_set(struct hsm_state_set *hss);
1743 extern void lustre_swab_obd_statfs (struct obd_statfs *os);
1745 /* ost_body.data values for OST_BRW */
1747 #define OBD_BRW_READ 0x01
1748 #define OBD_BRW_WRITE 0x02
1749 #define OBD_BRW_RWMASK (OBD_BRW_READ | OBD_BRW_WRITE)
1750 #define OBD_BRW_SYNC 0x08 /* this page is a part of synchronous
1751 * transfer and is not accounted in
1753 #define OBD_BRW_CHECK 0x10
1754 #define OBD_BRW_FROM_GRANT 0x20 /* the osc manages this under llite */
1755 #define OBD_BRW_GRANTED 0x40 /* the ost manages this */
1756 #define OBD_BRW_NOCACHE 0x80 /* this page is a part of non-cached IO */
1757 #define OBD_BRW_NOQUOTA 0x100
1758 #define OBD_BRW_SRVLOCK 0x200 /* Client holds no lock over this page */
1759 #define OBD_BRW_ASYNC 0x400 /* Server may delay commit to disk */
1760 #define OBD_BRW_MEMALLOC 0x800 /* Client runs in the "kswapd" context */
1761 #define OBD_BRW_OVER_USRQUOTA 0x1000 /* Running out of user quota */
1762 #define OBD_BRW_OVER_GRPQUOTA 0x2000 /* Running out of group quota */
1764 #define OBD_OBJECT_EOF 0xffffffffffffffffULL
1766 #define OST_MIN_PRECREATE 32
1767 #define OST_MAX_PRECREATE 20000
1770 struct ost_id ioo_oid; /* object ID, if multi-obj BRW */
1771 __u32 ioo_max_brw; /* low 16 bits were o_mode before 2.4,
1772 * now (PTLRPC_BULK_OPS_COUNT - 1) in
1773 * high 16 bits in 2.4 and later */
1774 __u32 ioo_bufcnt; /* number of niobufs for this object */
1777 #define IOOBJ_MAX_BRW_BITS 16
1778 #define IOOBJ_TYPE_MASK ((1U << IOOBJ_MAX_BRW_BITS) - 1)
1779 #define ioobj_max_brw_get(ioo) (((ioo)->ioo_max_brw >> IOOBJ_MAX_BRW_BITS) + 1)
1780 #define ioobj_max_brw_set(ioo, num) \
1781 do { (ioo)->ioo_max_brw = ((num) - 1) << IOOBJ_MAX_BRW_BITS; } while (0)
1783 extern void lustre_swab_obd_ioobj (struct obd_ioobj *ioo);
1785 /* multiple of 8 bytes => can array */
1786 struct niobuf_remote {
1792 extern void lustre_swab_niobuf_remote (struct niobuf_remote *nbr);
1794 /* lock value block communicated between the filter and llite */
1796 /* OST_LVB_ERR_INIT is needed because the return code in rc is
1797 * negative, i.e. because ((MASK + rc) & MASK) != MASK. */
1798 #define OST_LVB_ERR_INIT 0xffbadbad80000000ULL
1799 #define OST_LVB_ERR_MASK 0xffbadbad00000000ULL
1800 #define OST_LVB_IS_ERR(blocks) \
1801 ((blocks & OST_LVB_ERR_MASK) == OST_LVB_ERR_MASK)
1802 #define OST_LVB_SET_ERR(blocks, rc) \
1803 do { blocks = OST_LVB_ERR_INIT + rc; } while (0)
1804 #define OST_LVB_GET_ERR(blocks) (int)(blocks - OST_LVB_ERR_INIT)
1814 extern void lustre_swab_ost_lvb_v1(struct ost_lvb_v1 *lvb);
1828 extern void lustre_swab_ost_lvb(struct ost_lvb *lvb);
1831 * lquota data structures
1834 #ifndef QUOTABLOCK_BITS
1835 #define QUOTABLOCK_BITS 10
1838 #ifndef QUOTABLOCK_SIZE
1839 #define QUOTABLOCK_SIZE (1 << QUOTABLOCK_BITS)
1843 #define toqb(x) (((x) + QUOTABLOCK_SIZE - 1) >> QUOTABLOCK_BITS)
1846 /* The lquota_id structure is an union of all the possible identifier types that
1847 * can be used with quota, this includes:
1850 * - a FID which can be used for per-directory quota in the future */
1852 struct lu_fid qid_fid; /* FID for per-directory quota */
1853 __u64 qid_uid; /* user identifier */
1854 __u64 qid_gid; /* group identifier */
1857 /* quotactl management */
1858 struct obd_quotactl {
1860 __u32 qc_type; /* see Q_* flag below */
1863 struct obd_dqinfo qc_dqinfo;
1864 struct obd_dqblk qc_dqblk;
1867 extern void lustre_swab_obd_quotactl(struct obd_quotactl *q);
1869 #define Q_QUOTACHECK 0x800100 /* deprecated as of 2.4 */
1870 #define Q_INITQUOTA 0x800101 /* deprecated as of 2.4 */
1871 #define Q_GETOINFO 0x800102 /* get obd quota info */
1872 #define Q_GETOQUOTA 0x800103 /* get obd quotas */
1873 #define Q_FINVALIDATE 0x800104 /* deprecated as of 2.4 */
1875 #define Q_COPY(out, in, member) (out)->member = (in)->member
1877 #define QCTL_COPY(out, in) \
1879 Q_COPY(out, in, qc_cmd); \
1880 Q_COPY(out, in, qc_type); \
1881 Q_COPY(out, in, qc_id); \
1882 Q_COPY(out, in, qc_stat); \
1883 Q_COPY(out, in, qc_dqinfo); \
1884 Q_COPY(out, in, qc_dqblk); \
1887 /* Body of quota request used for quota acquire/release RPCs between quota
1888 * master (aka QMT) and slaves (ak QSD). */
1890 struct lu_fid qb_fid; /* FID of global index packing the pool ID
1891 * and type (data or metadata) as well as
1892 * the quota type (user or group). */
1893 union lquota_id qb_id; /* uid or gid or directory FID */
1894 __u32 qb_flags; /* see below */
1896 __u64 qb_count; /* acquire/release count (kbytes/inodes) */
1897 __u64 qb_usage; /* current slave usage (kbytes/inodes) */
1898 __u64 qb_slv_ver; /* slave index file version */
1899 struct lustre_handle qb_lockh; /* per-ID lock handle */
1900 struct lustre_handle qb_glb_lockh; /* global lock handle */
1901 __u64 qb_padding1[4];
1904 /* When the quota_body is used in the reply of quota global intent
1905 * lock (IT_QUOTA_CONN) reply, qb_fid contains slave index file FID. */
1906 #define qb_slv_fid qb_fid
1907 /* qb_usage is the current qunit (in kbytes/inodes) when quota_body is used in
1909 #define qb_qunit qb_usage
1911 #define QUOTA_DQACQ_FL_ACQ 0x1 /* acquire quota */
1912 #define QUOTA_DQACQ_FL_PREACQ 0x2 /* pre-acquire */
1913 #define QUOTA_DQACQ_FL_REL 0x4 /* release quota */
1914 #define QUOTA_DQACQ_FL_REPORT 0x8 /* report usage */
1916 extern void lustre_swab_quota_body(struct quota_body *b);
1918 /* Quota types currently supported */
1920 LQUOTA_TYPE_USR = 0x00, /* maps to USRQUOTA */
1921 LQUOTA_TYPE_GRP = 0x01, /* maps to GRPQUOTA */
1925 /* There are 2 different resource types on which a quota limit can be enforced:
1926 * - inodes on the MDTs
1927 * - blocks on the OSTs */
1929 LQUOTA_RES_MD = 0x01, /* skip 0 to avoid null oid in FID */
1930 LQUOTA_RES_DT = 0x02,
1932 LQUOTA_FIRST_RES = LQUOTA_RES_MD
1934 #define LQUOTA_NR_RES (LQUOTA_LAST_RES - LQUOTA_FIRST_RES + 1)
1937 * Space accounting support
1938 * Format of an accounting record, providing disk usage information for a given
1941 struct lquota_acct_rec { /* 16 bytes */
1942 __u64 bspace; /* current space in use */
1943 __u64 ispace; /* current # inodes in use */
1947 * Global quota index support
1948 * Format of a global record, providing global quota settings for a given quota
1951 struct lquota_glb_rec { /* 32 bytes */
1952 __u64 qbr_hardlimit; /* quota hard limit, in #inodes or kbytes */
1953 __u64 qbr_softlimit; /* quota soft limit, in #inodes or kbytes */
1954 __u64 qbr_time; /* grace time, in seconds */
1955 __u64 qbr_granted; /* how much is granted to slaves, in #inodes or
1960 * Slave index support
1961 * Format of a slave record, recording how much space is granted to a given
1964 struct lquota_slv_rec { /* 8 bytes */
1965 __u64 qsr_granted; /* space granted to the slave for the key=ID,
1966 * in #inodes or kbytes */
1969 /* Data structures associated with the quota locks */
1971 /* Glimpse descriptor used for the index & per-ID quota locks */
1972 struct ldlm_gl_lquota_desc {
1973 union lquota_id gl_id; /* quota ID subject to the glimpse */
1974 __u64 gl_flags; /* see LQUOTA_FL* below */
1975 __u64 gl_ver; /* new index version */
1976 __u64 gl_hardlimit; /* new hardlimit or qunit value */
1977 __u64 gl_softlimit; /* new softlimit */
1981 #define gl_qunit gl_hardlimit /* current qunit value used when
1982 * glimpsing per-ID quota locks */
1984 /* quota glimpse flags */
1985 #define LQUOTA_FL_EDQUOT 0x1 /* user/group out of quota space on QMT */
1987 /* LVB used with quota (global and per-ID) locks */
1989 __u64 lvb_flags; /* see LQUOTA_FL* above */
1990 __u64 lvb_id_may_rel; /* space that might be released later */
1991 __u64 lvb_id_rel; /* space released by the slave for this ID */
1992 __u64 lvb_id_qunit; /* current qunit value */
1996 extern void lustre_swab_lquota_lvb(struct lquota_lvb *lvb);
1998 /* LVB used with global quota lock */
1999 #define lvb_glb_ver lvb_id_may_rel /* current version of the global index */
2007 #define QUOTA_FIRST_OPC QUOTA_DQACQ
2016 MDS_GETATTR_NAME = 34,
2021 MDS_DISCONNECT = 39,
2027 MDS_DONE_WRITING = 45,
2029 MDS_QUOTACHECK = 47,
2032 MDS_SETXATTR = 50, /* obsolete, now it's MDS_REINT op */
2036 MDS_HSM_STATE_GET = 54,
2037 MDS_HSM_STATE_SET = 55,
2038 MDS_HSM_ACTION = 56,
2039 MDS_HSM_PROGRESS = 57,
2040 MDS_HSM_REQUEST = 58,
2041 MDS_HSM_CT_REGISTER = 59,
2042 MDS_HSM_CT_UNREGISTER = 60,
2043 MDS_SWAP_LAYOUTS = 61,
2047 #define MDS_FIRST_OPC MDS_GETATTR
2050 /* opcodes for object update */
2056 #define UPDATE_FIRST_OPC UPDATE_OBJ
2073 } mds_reint_t, mdt_reint_t;
2075 extern void lustre_swab_generic_32s (__u32 *val);
2077 /* the disposition of the intent outlines what was executed */
2078 #define DISP_IT_EXECD 0x00000001
2079 #define DISP_LOOKUP_EXECD 0x00000002
2080 #define DISP_LOOKUP_NEG 0x00000004
2081 #define DISP_LOOKUP_POS 0x00000008
2082 #define DISP_OPEN_CREATE 0x00000010
2083 #define DISP_OPEN_OPEN 0x00000020
2084 #define DISP_ENQ_COMPLETE 0x00400000 /* obsolete and unused */
2085 #define DISP_ENQ_OPEN_REF 0x00800000
2086 #define DISP_ENQ_CREATE_REF 0x01000000
2087 #define DISP_OPEN_LOCK 0x02000000
2088 #define DISP_OPEN_LEASE 0x04000000
2089 #define DISP_OPEN_STRIPE 0x08000000
2091 /* INODE LOCK PARTS */
2092 #define MDS_INODELOCK_LOOKUP 0x000001 /* For namespace, dentry etc, and also
2093 * was used to protect permission (mode,
2094 * owner, group etc) before 2.4. */
2095 #define MDS_INODELOCK_UPDATE 0x000002 /* size, links, timestamps */
2096 #define MDS_INODELOCK_OPEN 0x000004 /* For opened files */
2097 #define MDS_INODELOCK_LAYOUT 0x000008 /* for layout */
2099 /* The PERM bit is added int 2.4, and it is used to protect permission(mode,
2100 * owner, group, acl etc), so to separate the permission from LOOKUP lock.
2101 * Because for remote directories(in DNE), these locks will be granted by
2102 * different MDTs(different ldlm namespace).
2104 * For local directory, MDT will always grant UPDATE_LOCK|PERM_LOCK together.
2105 * For Remote directory, the master MDT, where the remote directory is, will
2106 * grant UPDATE_LOCK|PERM_LOCK, and the remote MDT, where the name entry is,
2107 * will grant LOOKUP_LOCK. */
2108 #define MDS_INODELOCK_PERM 0x000010
2109 #define MDS_INODELOCK_XATTR 0x000020 /* extended attributes */
2111 #define MDS_INODELOCK_MAXSHIFT 5
2112 /* This FULL lock is useful to take on unlink sort of operations */
2113 #define MDS_INODELOCK_FULL ((1<<(MDS_INODELOCK_MAXSHIFT+1))-1)
2115 extern void lustre_swab_ll_fid (struct ll_fid *fid);
2117 /* NOTE: until Lustre 1.8.7/2.1.1 the fid_ver() was packed into name[2],
2118 * but was moved into name[1] along with the OID to avoid consuming the
2119 * name[2,3] fields that need to be used for the quota id (also a FID). */
2121 LUSTRE_RES_ID_SEQ_OFF = 0,
2122 LUSTRE_RES_ID_VER_OID_OFF = 1,
2123 LUSTRE_RES_ID_WAS_VER_OFF = 2, /* see note above */
2124 LUSTRE_RES_ID_QUOTA_SEQ_OFF = 2,
2125 LUSTRE_RES_ID_QUOTA_VER_OID_OFF = 3,
2126 LUSTRE_RES_ID_HSH_OFF = 3
2129 #define MDS_STATUS_CONN 1
2130 #define MDS_STATUS_LOV 2
2132 /* mdt_thread_info.mti_flags. */
2134 /* The flag indicates Size-on-MDS attributes are changed. */
2135 MF_SOM_CHANGE = (1 << 0),
2136 /* Flags indicates an epoch opens or closes. */
2137 MF_EPOCH_OPEN = (1 << 1),
2138 MF_EPOCH_CLOSE = (1 << 2),
2139 MF_MDC_CANCEL_FID1 = (1 << 3),
2140 MF_MDC_CANCEL_FID2 = (1 << 4),
2141 MF_MDC_CANCEL_FID3 = (1 << 5),
2142 MF_MDC_CANCEL_FID4 = (1 << 6),
2143 /* There is a pending attribute update. */
2144 MF_SOM_AU = (1 << 7),
2145 /* Cancel OST locks while getattr OST attributes. */
2146 MF_GETATTR_LOCK = (1 << 8),
2147 MF_GET_MDT_IDX = (1 << 9),
2150 #define MF_SOM_LOCAL_FLAGS (MF_SOM_CHANGE | MF_EPOCH_OPEN | MF_EPOCH_CLOSE)
2152 #define LUSTRE_BFLAG_UNCOMMITTED_WRITES 0x1
2154 /* these should be identical to their EXT4_*_FL counterparts, they are
2155 * redefined here only to avoid dragging in fs/ext4/ext4.h */
2156 #define LUSTRE_SYNC_FL 0x00000008 /* Synchronous updates */
2157 #define LUSTRE_IMMUTABLE_FL 0x00000010 /* Immutable file */
2158 #define LUSTRE_APPEND_FL 0x00000020 /* writes to file may only append */
2159 #define LUSTRE_NOATIME_FL 0x00000080 /* do not update atime */
2160 #define LUSTRE_DIRSYNC_FL 0x00010000 /* dirsync behaviour (dir only) */
2162 /* Convert wire LUSTRE_*_FL to corresponding client local VFS S_* values
2163 * for the client inode i_flags. The LUSTRE_*_FL are the Lustre wire
2164 * protocol equivalents of LDISKFS_*_FL values stored on disk, while
2165 * the S_* flags are kernel-internal values that change between kernel
2166 * versions. These flags are set/cleared via FSFILT_IOC_{GET,SET}_FLAGS.
2167 * See b=16526 for a full history. */
2168 static inline int ll_ext_to_inode_flags(int flags)
2170 return (((flags & LUSTRE_SYNC_FL) ? S_SYNC : 0) |
2171 ((flags & LUSTRE_NOATIME_FL) ? S_NOATIME : 0) |
2172 ((flags & LUSTRE_APPEND_FL) ? S_APPEND : 0) |
2173 #if defined(S_DIRSYNC)
2174 ((flags & LUSTRE_DIRSYNC_FL) ? S_DIRSYNC : 0) |
2176 ((flags & LUSTRE_IMMUTABLE_FL) ? S_IMMUTABLE : 0));
2179 static inline int ll_inode_to_ext_flags(int iflags)
2181 return (((iflags & S_SYNC) ? LUSTRE_SYNC_FL : 0) |
2182 ((iflags & S_NOATIME) ? LUSTRE_NOATIME_FL : 0) |
2183 ((iflags & S_APPEND) ? LUSTRE_APPEND_FL : 0) |
2184 #if defined(S_DIRSYNC)
2185 ((iflags & S_DIRSYNC) ? LUSTRE_DIRSYNC_FL : 0) |
2187 ((iflags & S_IMMUTABLE) ? LUSTRE_IMMUTABLE_FL : 0));
2190 /* 64 possible states */
2191 enum md_transient_state {
2192 MS_RESTORE = (1 << 0), /* restore is running */
2198 struct lustre_handle handle;
2200 __u64 size; /* Offset, in the case of MDS_READPAGE */
2204 __u64 blocks; /* XID, in the case of MDS_READPAGE */
2206 __u64 t_state; /* transient file state defined in
2207 * enum md_transient_state
2208 * was "ino" until 2.4.0 */
2215 __u32 flags; /* from vfs for pin/unpin, LUSTRE_BFLAG close */
2217 __u32 nlink; /* #bytes to read in the case of MDS_READPAGE */
2218 __u32 unused2; /* was "generation" until 2.4.0 */
2223 __u32 max_cookiesize;
2224 __u32 uid_h; /* high 32-bits of uid, for FUID */
2225 __u32 gid_h; /* high 32-bits of gid, for FUID */
2226 __u32 padding_5; /* also fix lustre_swab_mdt_body */
2234 extern void lustre_swab_mdt_body (struct mdt_body *b);
2236 struct mdt_ioepoch {
2237 struct lustre_handle handle;
2243 extern void lustre_swab_mdt_ioepoch (struct mdt_ioepoch *b);
2245 /* permissions for md_perm.mp_perm */
2247 CFS_SETUID_PERM = 0x01,
2248 CFS_SETGID_PERM = 0x02,
2249 CFS_SETGRP_PERM = 0x04,
2250 CFS_RMTACL_PERM = 0x08,
2251 CFS_RMTOWN_PERM = 0x10
2254 /* inode access permission for remote user, the inode info are omitted,
2255 * for client knows them. */
2256 struct mdt_remote_perm {
2263 __u32 rp_access_perm; /* MAY_READ/WRITE/EXEC */
2267 extern void lustre_swab_mdt_remote_perm(struct mdt_remote_perm *p);
2269 struct mdt_rec_setattr {
2279 __u32 sa_padding_1_h;
2280 struct lu_fid sa_fid;
2289 __u32 sa_attr_flags;
2291 __u32 sa_bias; /* some operation flags */
2297 extern void lustre_swab_mdt_rec_setattr (struct mdt_rec_setattr *sa);
2300 * Attribute flags used in mdt_rec_setattr::sa_valid.
2301 * The kernel's #defines for ATTR_* should not be used over the network
2302 * since the client and MDS may run different kernels (see bug 13828)
2303 * Therefore, we should only use MDS_ATTR_* attributes for sa_valid.
2305 #define MDS_ATTR_MODE 0x1ULL /* = 1 */
2306 #define MDS_ATTR_UID 0x2ULL /* = 2 */
2307 #define MDS_ATTR_GID 0x4ULL /* = 4 */
2308 #define MDS_ATTR_SIZE 0x8ULL /* = 8 */
2309 #define MDS_ATTR_ATIME 0x10ULL /* = 16 */
2310 #define MDS_ATTR_MTIME 0x20ULL /* = 32 */
2311 #define MDS_ATTR_CTIME 0x40ULL /* = 64 */
2312 #define MDS_ATTR_ATIME_SET 0x80ULL /* = 128 */
2313 #define MDS_ATTR_MTIME_SET 0x100ULL /* = 256 */
2314 #define MDS_ATTR_FORCE 0x200ULL /* = 512, Not a change, but a change it */
2315 #define MDS_ATTR_ATTR_FLAG 0x400ULL /* = 1024 */
2316 #define MDS_ATTR_KILL_SUID 0x800ULL /* = 2048 */
2317 #define MDS_ATTR_KILL_SGID 0x1000ULL /* = 4096 */
2318 #define MDS_ATTR_CTIME_SET 0x2000ULL /* = 8192 */
2319 #define MDS_ATTR_FROM_OPEN 0x4000ULL /* = 16384, called from open path, ie O_TRUNC */
2320 #define MDS_ATTR_BLOCKS 0x8000ULL /* = 32768 */
2323 #define FMODE_READ 00000001
2324 #define FMODE_WRITE 00000002
2327 #define MDS_FMODE_CLOSED 00000000
2328 #define MDS_FMODE_EXEC 00000004
2329 /* IO Epoch is opened on a closed file. */
2330 #define MDS_FMODE_EPOCH 01000000
2331 /* IO Epoch is opened on a file truncate. */
2332 #define MDS_FMODE_TRUNC 02000000
2333 /* Size-on-MDS Attribute Update is pending. */
2334 #define MDS_FMODE_SOM 04000000
2336 #define MDS_OPEN_CREATED 00000010
2337 #define MDS_OPEN_CROSS 00000020
2339 #define MDS_OPEN_CREAT 00000100
2340 #define MDS_OPEN_EXCL 00000200
2341 #define MDS_OPEN_TRUNC 00001000
2342 #define MDS_OPEN_APPEND 00002000
2343 #define MDS_OPEN_SYNC 00010000
2344 #define MDS_OPEN_DIRECTORY 00200000
2346 #define MDS_OPEN_BY_FID 040000000 /* open_by_fid for known object */
2347 #define MDS_OPEN_DELAY_CREATE 0100000000 /* delay initial object create */
2348 #define MDS_OPEN_OWNEROVERRIDE 0200000000 /* NFSD rw-reopen ro file for owner */
2349 #define MDS_OPEN_JOIN_FILE 0400000000 /* open for join file.
2350 * We do not support JOIN FILE
2351 * anymore, reserve this flags
2352 * just for preventing such bit
2355 #define MDS_OPEN_LOCK 04000000000 /* This open requires open lock */
2356 #define MDS_OPEN_HAS_EA 010000000000 /* specify object create pattern */
2357 #define MDS_OPEN_HAS_OBJS 020000000000 /* Just set the EA the obj exist */
2358 #define MDS_OPEN_NORESTORE 0100000000000ULL /* Do not restore file at open */
2359 #define MDS_OPEN_NEWSTRIPE 0200000000000ULL /* New stripe needed (restripe or
2361 #define MDS_OPEN_VOLATILE 0400000000000ULL /* File is volatile = created
2363 #define MDS_OPEN_LEASE 01000000000000ULL /* Open the file and grant lease
2364 * delegation, succeed if it's not
2365 * being opened with conflict mode.
2367 #define MDS_OPEN_RELEASE 02000000000000ULL /* Open the file for HSM release */
2369 /* permission for create non-directory file */
2370 #define MAY_CREATE (1 << 7)
2371 /* permission for create directory file */
2372 #define MAY_LINK (1 << 8)
2373 /* permission for delete from the directory */
2374 #define MAY_UNLINK (1 << 9)
2375 /* source's permission for rename */
2376 #define MAY_RENAME_SRC (1 << 10)
2377 /* target's permission for rename */
2378 #define MAY_RENAME_TAR (1 << 11)
2379 /* part (parent's) VTX permission check */
2380 #define MAY_VTX_PART (1 << 12)
2381 /* full VTX permission check */
2382 #define MAY_VTX_FULL (1 << 13)
2383 /* lfs rgetfacl permission check */
2384 #define MAY_RGETFACL (1 << 14)
2387 MDS_CHECK_SPLIT = 1 << 0,
2388 MDS_CROSS_REF = 1 << 1,
2389 MDS_VTX_BYPASS = 1 << 2,
2390 MDS_PERM_BYPASS = 1 << 3,
2392 MDS_QUOTA_IGNORE = 1 << 5,
2393 MDS_CLOSE_CLEANUP = 1 << 6,
2394 MDS_KEEP_ORPHAN = 1 << 7,
2395 MDS_RECOV_OPEN = 1 << 8,
2396 MDS_DATA_MODIFIED = 1 << 9,
2397 MDS_CREATE_VOLATILE = 1 << 10,
2398 MDS_OWNEROVERRIDE = 1 << 11,
2399 MDS_HSM_RELEASE = 1 << 12,
2402 /* instance of mdt_reint_rec */
2403 struct mdt_rec_create {
2411 __u32 cr_suppgid1_h;
2413 __u32 cr_suppgid2_h;
2414 struct lu_fid cr_fid1;
2415 struct lu_fid cr_fid2;
2416 struct lustre_handle cr_old_handle; /* handle in case of open replay */
2420 __u64 cr_padding_1; /* rr_blocks */
2423 /* use of helpers set/get_mrc_cr_flags() is needed to access
2424 * 64 bits cr_flags [cr_flags_l, cr_flags_h], this is done to
2425 * extend cr_flags size without breaking 1.8 compat */
2426 __u32 cr_flags_l; /* for use with open, low 32 bits */
2427 __u32 cr_flags_h; /* for use with open, high 32 bits */
2428 __u32 cr_umask; /* umask for create */
2429 __u32 cr_padding_4; /* rr_padding_4 */
2432 static inline void set_mrc_cr_flags(struct mdt_rec_create *mrc, __u64 flags)
2434 mrc->cr_flags_l = (__u32)(flags & 0xFFFFFFFFUll);
2435 mrc->cr_flags_h = (__u32)(flags >> 32);
2438 static inline __u64 get_mrc_cr_flags(struct mdt_rec_create *mrc)
2440 return ((__u64)(mrc->cr_flags_l) | ((__u64)mrc->cr_flags_h << 32));
2443 /* instance of mdt_reint_rec */
2444 struct mdt_rec_link {
2452 __u32 lk_suppgid1_h;
2454 __u32 lk_suppgid2_h;
2455 struct lu_fid lk_fid1;
2456 struct lu_fid lk_fid2;
2458 __u64 lk_padding_1; /* rr_atime */
2459 __u64 lk_padding_2; /* rr_ctime */
2460 __u64 lk_padding_3; /* rr_size */
2461 __u64 lk_padding_4; /* rr_blocks */
2463 __u32 lk_padding_5; /* rr_mode */
2464 __u32 lk_padding_6; /* rr_flags */
2465 __u32 lk_padding_7; /* rr_padding_2 */
2466 __u32 lk_padding_8; /* rr_padding_3 */
2467 __u32 lk_padding_9; /* rr_padding_4 */
2470 /* instance of mdt_reint_rec */
2471 struct mdt_rec_unlink {
2479 __u32 ul_suppgid1_h;
2481 __u32 ul_suppgid2_h;
2482 struct lu_fid ul_fid1;
2483 struct lu_fid ul_fid2;
2485 __u64 ul_padding_2; /* rr_atime */
2486 __u64 ul_padding_3; /* rr_ctime */
2487 __u64 ul_padding_4; /* rr_size */
2488 __u64 ul_padding_5; /* rr_blocks */
2491 __u32 ul_padding_6; /* rr_flags */
2492 __u32 ul_padding_7; /* rr_padding_2 */
2493 __u32 ul_padding_8; /* rr_padding_3 */
2494 __u32 ul_padding_9; /* rr_padding_4 */
2497 /* instance of mdt_reint_rec */
2498 struct mdt_rec_rename {
2506 __u32 rn_suppgid1_h;
2508 __u32 rn_suppgid2_h;
2509 struct lu_fid rn_fid1;
2510 struct lu_fid rn_fid2;
2512 __u64 rn_padding_1; /* rr_atime */
2513 __u64 rn_padding_2; /* rr_ctime */
2514 __u64 rn_padding_3; /* rr_size */
2515 __u64 rn_padding_4; /* rr_blocks */
2516 __u32 rn_bias; /* some operation flags */
2517 __u32 rn_mode; /* cross-ref rename has mode */
2518 __u32 rn_padding_5; /* rr_flags */
2519 __u32 rn_padding_6; /* rr_padding_2 */
2520 __u32 rn_padding_7; /* rr_padding_3 */
2521 __u32 rn_padding_8; /* rr_padding_4 */
2524 /* instance of mdt_reint_rec */
2525 struct mdt_rec_setxattr {
2533 __u32 sx_suppgid1_h;
2535 __u32 sx_suppgid2_h;
2536 struct lu_fid sx_fid;
2537 __u64 sx_padding_1; /* These three are rr_fid2 */
2542 __u64 sx_padding_5; /* rr_ctime */
2543 __u64 sx_padding_6; /* rr_size */
2544 __u64 sx_padding_7; /* rr_blocks */
2547 __u32 sx_padding_8; /* rr_flags */
2548 __u32 sx_padding_9; /* rr_padding_2 */
2549 __u32 sx_padding_10; /* rr_padding_3 */
2550 __u32 sx_padding_11; /* rr_padding_4 */
2554 * mdt_rec_reint is the template for all mdt_reint_xxx structures.
2555 * Do NOT change the size of various members, otherwise the value
2556 * will be broken in lustre_swab_mdt_rec_reint().
2558 * If you add new members in other mdt_reint_xxx structures and need to use the
2559 * rr_padding_x fields, then update lustre_swab_mdt_rec_reint() also.
2561 struct mdt_rec_reint {
2569 __u32 rr_suppgid1_h;
2571 __u32 rr_suppgid2_h;
2572 struct lu_fid rr_fid1;
2573 struct lu_fid rr_fid2;
2584 __u32 rr_padding_4; /* also fix lustre_swab_mdt_rec_reint */
2587 extern void lustre_swab_mdt_rec_reint(struct mdt_rec_reint *rr);
2590 __u32 ld_tgt_count; /* how many MDS's */
2591 __u32 ld_active_tgt_count; /* how many active */
2592 __u32 ld_default_stripe_count; /* how many objects are used */
2593 __u32 ld_pattern; /* default MEA_MAGIC_* */
2594 __u64 ld_default_hash_size;
2595 __u64 ld_padding_1; /* also fix lustre_swab_lmv_desc */
2596 __u32 ld_padding_2; /* also fix lustre_swab_lmv_desc */
2597 __u32 ld_qos_maxage; /* in second */
2598 __u32 ld_padding_3; /* also fix lustre_swab_lmv_desc */
2599 __u32 ld_padding_4; /* also fix lustre_swab_lmv_desc */
2600 struct obd_uuid ld_uuid;
2603 extern void lustre_swab_lmv_desc (struct lmv_desc *ld);
2605 /* TODO: lmv_stripe_md should contain mds capabilities for all slave fids */
2606 struct lmv_stripe_md {
2611 char mea_pool_name[LOV_MAXPOOLNAME];
2612 struct lu_fid mea_ids[0];
2615 extern void lustre_swab_lmv_stripe_md(struct lmv_stripe_md *mea);
2617 /* lmv structures */
2618 #define MEA_MAGIC_LAST_CHAR 0xb2221ca1
2619 #define MEA_MAGIC_ALL_CHARS 0xb222a11c
2620 #define MEA_MAGIC_HASH_SEGMENT 0xb222a11b
2622 #define MAX_HASH_SIZE_32 0x7fffffffUL
2623 #define MAX_HASH_SIZE 0x7fffffffffffffffULL
2624 #define MAX_HASH_HIGHEST_BIT 0x1000000000000000ULL
2629 FLD_FIRST_OPC = FLD_QUERY
2635 SEQ_FIRST_OPC = SEQ_QUERY
2639 SEQ_ALLOC_SUPER = 0,
2644 * LOV data structures
2647 #define LOV_MAX_UUID_BUFFER_SIZE 8192
2648 /* The size of the buffer the lov/mdc reserves for the
2649 * array of UUIDs returned by the MDS. With the current
2650 * protocol, this will limit the max number of OSTs per LOV */
2652 #define LOV_DESC_MAGIC 0xB0CCDE5C
2653 #define LOV_DESC_QOS_MAXAGE_DEFAULT 5 /* Seconds */
2654 #define LOV_DESC_STRIPE_SIZE_DEFAULT (1 << LNET_MTU_BITS)
2656 /* LOV settings descriptor (should only contain static info) */
2658 __u32 ld_tgt_count; /* how many OBD's */
2659 __u32 ld_active_tgt_count; /* how many active */
2660 __u32 ld_default_stripe_count; /* how many objects are used */
2661 __u32 ld_pattern; /* default PATTERN_RAID0 */
2662 __u64 ld_default_stripe_size; /* in bytes */
2663 __u64 ld_default_stripe_offset; /* in bytes */
2664 __u32 ld_padding_0; /* unused */
2665 __u32 ld_qos_maxage; /* in second */
2666 __u32 ld_padding_1; /* also fix lustre_swab_lov_desc */
2667 __u32 ld_padding_2; /* also fix lustre_swab_lov_desc */
2668 struct obd_uuid ld_uuid;
2671 #define ld_magic ld_active_tgt_count /* for swabbing from llogs */
2673 extern void lustre_swab_lov_desc (struct lov_desc *ld);
2678 /* opcodes -- MUST be distinct from OST/MDS opcodes */
2683 LDLM_BL_CALLBACK = 104,
2684 LDLM_CP_CALLBACK = 105,
2685 LDLM_GL_CALLBACK = 106,
2686 LDLM_SET_INFO = 107,
2689 #define LDLM_FIRST_OPC LDLM_ENQUEUE
2691 #define RES_NAME_SIZE 4
2692 struct ldlm_res_id {
2693 __u64 name[RES_NAME_SIZE];
2696 #define DLDLMRES "[%#llx:%#llx:%#llx].%llx"
2697 #define PLDLMRES(res) (res)->lr_name.name[0], (res)->lr_name.name[1], \
2698 (res)->lr_name.name[2], (res)->lr_name.name[3]
2700 extern void lustre_swab_ldlm_res_id (struct ldlm_res_id *id);
2702 static inline int ldlm_res_eq(const struct ldlm_res_id *res0,
2703 const struct ldlm_res_id *res1)
2705 return !memcmp(res0, res1, sizeof(*res0));
2722 #define LCK_MODE_NUM 8
2732 #define LDLM_MIN_TYPE LDLM_PLAIN
2734 struct ldlm_extent {
2740 static inline int ldlm_extent_overlap(struct ldlm_extent *ex1,
2741 struct ldlm_extent *ex2)
2743 return (ex1->start <= ex2->end) && (ex2->start <= ex1->end);
2746 /* check if @ex1 contains @ex2 */
2747 static inline int ldlm_extent_contain(struct ldlm_extent *ex1,
2748 struct ldlm_extent *ex2)
2750 return (ex1->start <= ex2->start) && (ex1->end >= ex2->end);
2753 struct ldlm_inodebits {
2757 struct ldlm_flock_wire {
2765 /* it's important that the fields of the ldlm_extent structure match
2766 * the first fields of the ldlm_flock structure because there is only
2767 * one ldlm_swab routine to process the ldlm_policy_data_t union. if
2768 * this ever changes we will need to swab the union differently based
2769 * on the resource type. */
2772 struct ldlm_extent l_extent;
2773 struct ldlm_flock_wire l_flock;
2774 struct ldlm_inodebits l_inodebits;
2775 } ldlm_wire_policy_data_t;
2777 extern void lustre_swab_ldlm_policy_data (ldlm_wire_policy_data_t *d);
2779 union ldlm_gl_desc {
2780 struct ldlm_gl_lquota_desc lquota_desc;
2783 extern void lustre_swab_gl_desc(union ldlm_gl_desc *);
2785 struct ldlm_intent {
2789 extern void lustre_swab_ldlm_intent (struct ldlm_intent *i);
2791 struct ldlm_resource_desc {
2792 ldlm_type_t lr_type;
2793 __u32 lr_padding; /* also fix lustre_swab_ldlm_resource_desc */
2794 struct ldlm_res_id lr_name;
2797 extern void lustre_swab_ldlm_resource_desc (struct ldlm_resource_desc *r);
2799 struct ldlm_lock_desc {
2800 struct ldlm_resource_desc l_resource;
2801 ldlm_mode_t l_req_mode;
2802 ldlm_mode_t l_granted_mode;
2803 ldlm_wire_policy_data_t l_policy_data;
2806 extern void lustre_swab_ldlm_lock_desc (struct ldlm_lock_desc *l);
2808 #define LDLM_LOCKREQ_HANDLES 2
2809 #define LDLM_ENQUEUE_CANCEL_OFF 1
2811 struct ldlm_request {
2814 struct ldlm_lock_desc lock_desc;
2815 struct lustre_handle lock_handle[LDLM_LOCKREQ_HANDLES];
2818 extern void lustre_swab_ldlm_request (struct ldlm_request *rq);
2820 /* If LDLM_ENQUEUE, 1 slot is already occupied, 1 is available.
2821 * Otherwise, 2 are available. */
2822 #define ldlm_request_bufsize(count, type) \
2824 int _avail = LDLM_LOCKREQ_HANDLES; \
2825 _avail -= (type == LDLM_ENQUEUE ? LDLM_ENQUEUE_CANCEL_OFF : 0); \
2826 sizeof(struct ldlm_request) + \
2827 (count > _avail ? count - _avail : 0) * \
2828 sizeof(struct lustre_handle); \
2833 __u32 lock_padding; /* also fix lustre_swab_ldlm_reply */
2834 struct ldlm_lock_desc lock_desc;
2835 struct lustre_handle lock_handle;
2836 __u64 lock_policy_res1;
2837 __u64 lock_policy_res2;
2840 extern void lustre_swab_ldlm_reply (struct ldlm_reply *r);
2842 #define ldlm_flags_to_wire(flags) ((__u32)(flags))
2843 #define ldlm_flags_from_wire(flags) ((__u64)(flags))
2846 * Opcodes for mountconf (mgs and mgc)
2851 MGS_EXCEPTION, /* node died, etc. */
2852 MGS_TARGET_REG, /* whenever target starts up */
2858 #define MGS_FIRST_OPC MGS_CONNECT
2860 #define MGS_PARAM_MAXLEN 1024
2861 #define KEY_SET_INFO "set_info"
2863 struct mgs_send_param {
2864 char mgs_param[MGS_PARAM_MAXLEN];
2867 /* We pass this info to the MGS so it can write config logs */
2868 #define MTI_NAME_MAXLEN 64
2869 #define MTI_PARAM_MAXLEN 4096
2870 #define MTI_NIDS_MAX 32
2871 struct mgs_target_info {
2872 __u32 mti_lustre_ver;
2873 __u32 mti_stripe_index;
2874 __u32 mti_config_ver;
2876 __u32 mti_nid_count;
2877 __u32 mti_instance; /* Running instance of target */
2878 char mti_fsname[MTI_NAME_MAXLEN];
2879 char mti_svname[MTI_NAME_MAXLEN];
2880 char mti_uuid[sizeof(struct obd_uuid)];
2881 __u64 mti_nids[MTI_NIDS_MAX]; /* host nids (lnet_nid_t)*/
2882 char mti_params[MTI_PARAM_MAXLEN];
2884 extern void lustre_swab_mgs_target_info(struct mgs_target_info *oinfo);
2886 struct mgs_nidtbl_entry {
2887 __u64 mne_version; /* table version of this entry */
2888 __u32 mne_instance; /* target instance # */
2889 __u32 mne_index; /* target index */
2890 __u32 mne_length; /* length of this entry - by bytes */
2891 __u8 mne_type; /* target type LDD_F_SV_TYPE_OST/MDT */
2892 __u8 mne_nid_type; /* type of nid(mbz). for ipv6. */
2893 __u8 mne_nid_size; /* size of each NID, by bytes */
2894 __u8 mne_nid_count; /* # of NIDs in buffer */
2896 lnet_nid_t nids[0]; /* variable size buffer for NIDs. */
2899 extern void lustre_swab_mgs_nidtbl_entry(struct mgs_nidtbl_entry *oinfo);
2901 struct mgs_config_body {
2902 char mcb_name[MTI_NAME_MAXLEN]; /* logname */
2903 __u64 mcb_offset; /* next index of config log to request */
2904 __u16 mcb_type; /* type of log: CONFIG_T_[CONFIG|RECOVER] */
2906 __u8 mcb_bits; /* bits unit size of config log */
2907 __u32 mcb_units; /* # of units for bulk transfer */
2909 extern void lustre_swab_mgs_config_body(struct mgs_config_body *body);
2911 struct mgs_config_res {
2912 __u64 mcr_offset; /* index of last config log */
2913 __u64 mcr_size; /* size of the log */
2915 extern void lustre_swab_mgs_config_res(struct mgs_config_res *body);
2917 /* Config marker flags (in config log) */
2918 #define CM_START 0x01
2920 #define CM_SKIP 0x04
2921 #define CM_UPGRADE146 0x08
2922 #define CM_EXCLUDE 0x10
2923 #define CM_START_SKIP (CM_START | CM_SKIP)
2926 __u32 cm_step; /* aka config version */
2928 __u32 cm_vers; /* lustre release version number */
2929 __u32 cm_padding; /* 64 bit align */
2930 __s64 cm_createtime; /*when this record was first created */
2931 __s64 cm_canceltime; /*when this record is no longer valid*/
2932 char cm_tgtname[MTI_NAME_MAXLEN];
2933 char cm_comment[MTI_NAME_MAXLEN];
2936 extern void lustre_swab_cfg_marker(struct cfg_marker *marker,
2937 int swab, int size);
2940 * Opcodes for multiple servers.
2950 #define OBD_FIRST_OPC OBD_PING
2952 /* catalog of log objects */
2954 /** Identifier for a single log object */
2956 struct ost_id lgl_oi;
2958 } __attribute__((packed));
2960 /** Records written to the CATALOGS list */
2961 #define CATLIST "CATALOGS"
2963 struct llog_logid lci_logid;
2967 } __attribute__((packed));
2969 /* Log data record types - there is no specific reason that these need to
2970 * be related to the RPC opcodes, but no reason not to (may be handy later?)
2972 #define LLOG_OP_MAGIC 0x10600000
2973 #define LLOG_OP_MASK 0xfff00000
2976 LLOG_PAD_MAGIC = LLOG_OP_MAGIC | 0x00000,
2977 OST_SZ_REC = LLOG_OP_MAGIC | 0x00f00,
2978 /* OST_RAID1_REC = LLOG_OP_MAGIC | 0x01000, never used */
2979 MDS_UNLINK_REC = LLOG_OP_MAGIC | 0x10000 | (MDS_REINT << 8) |
2980 REINT_UNLINK, /* obsolete after 2.5.0 */
2981 MDS_UNLINK64_REC = LLOG_OP_MAGIC | 0x90000 | (MDS_REINT << 8) |
2983 /* MDS_SETATTR_REC = LLOG_OP_MAGIC | 0x12401, obsolete 1.8.0 */
2984 MDS_SETATTR64_REC = LLOG_OP_MAGIC | 0x90000 | (MDS_REINT << 8) |
2986 OBD_CFG_REC = LLOG_OP_MAGIC | 0x20000,
2987 /* PTL_CFG_REC = LLOG_OP_MAGIC | 0x30000, obsolete 1.4.0 */
2988 LLOG_GEN_REC = LLOG_OP_MAGIC | 0x40000,
2989 /* LLOG_JOIN_REC = LLOG_OP_MAGIC | 0x50000, obsolete 1.8.0 */
2990 CHANGELOG_REC = LLOG_OP_MAGIC | 0x60000,
2991 CHANGELOG_USER_REC = LLOG_OP_MAGIC | 0x70000,
2992 HSM_AGENT_REC = LLOG_OP_MAGIC | 0x80000,
2993 LLOG_HDR_MAGIC = LLOG_OP_MAGIC | 0x45539,
2994 LLOG_LOGID_MAGIC = LLOG_OP_MAGIC | 0x4553b,
2997 #define LLOG_REC_HDR_NEEDS_SWABBING(r) \
2998 (((r)->lrh_type & __swab32(LLOG_OP_MASK)) == __swab32(LLOG_OP_MAGIC))
3000 /** Log record header - stored in little endian order.
3001 * Each record must start with this struct, end with a llog_rec_tail,
3002 * and be a multiple of 256 bits in size.
3004 struct llog_rec_hdr {
3011 struct llog_rec_tail {
3016 /* Where data follow just after header */
3017 #define REC_DATA(ptr) \
3018 ((void *)((char *)ptr + sizeof(struct llog_rec_hdr)))
3020 #define REC_DATA_LEN(rec) \
3021 (rec->lrh_len - sizeof(struct llog_rec_hdr) - \
3022 sizeof(struct llog_rec_tail))
3024 struct llog_logid_rec {
3025 struct llog_rec_hdr lid_hdr;
3026 struct llog_logid lid_id;
3030 struct llog_rec_tail lid_tail;
3031 } __attribute__((packed));
3033 struct llog_unlink_rec {
3034 struct llog_rec_hdr lur_hdr;
3038 struct llog_rec_tail lur_tail;
3039 } __attribute__((packed));
3041 struct llog_unlink64_rec {
3042 struct llog_rec_hdr lur_hdr;
3043 struct lu_fid lur_fid;
3044 __u32 lur_count; /* to destroy the lost precreated */
3048 struct llog_rec_tail lur_tail;
3049 } __attribute__((packed));
3051 struct llog_setattr64_rec {
3052 struct llog_rec_hdr lsr_hdr;
3053 struct ost_id lsr_oi;
3059 struct llog_rec_tail lsr_tail;
3060 } __attribute__((packed));
3062 struct llog_size_change_rec {
3063 struct llog_rec_hdr lsc_hdr;
3064 struct ll_fid lsc_fid;
3069 struct llog_rec_tail lsc_tail;
3070 } __attribute__((packed));
3072 #define CHANGELOG_MAGIC 0xca103000
3074 /** \a changelog_rec_type's that can't be masked */
3075 #define CHANGELOG_MINMASK (1 << CL_MARK)
3076 /** bits covering all \a changelog_rec_type's */
3077 #define CHANGELOG_ALLMASK 0XFFFFFFFF
3078 /** default \a changelog_rec_type mask */
3079 #define CHANGELOG_DEFMASK CHANGELOG_ALLMASK & ~(1 << CL_ATIME | 1 << CL_CLOSE)
3081 /* changelog llog name, needed by client replicators */
3082 #define CHANGELOG_CATALOG "changelog_catalog"
3084 struct changelog_setinfo {
3087 } __attribute__((packed));
3089 /** changelog record */
3090 struct llog_changelog_rec {
3091 struct llog_rec_hdr cr_hdr;
3092 struct changelog_rec cr;
3093 struct llog_rec_tail cr_tail; /**< for_sizezof_only */
3094 } __attribute__((packed));
3096 struct llog_changelog_ext_rec {
3097 struct llog_rec_hdr cr_hdr;
3098 struct changelog_ext_rec cr;
3099 struct llog_rec_tail cr_tail; /**< for_sizezof_only */
3100 } __attribute__((packed));
3102 #define CHANGELOG_USER_PREFIX "cl"
3104 struct llog_changelog_user_rec {
3105 struct llog_rec_hdr cur_hdr;
3109 struct llog_rec_tail cur_tail;
3110 } __attribute__((packed));
3112 enum agent_req_status {
3120 static inline char *agent_req_status2name(enum agent_req_status ars)
3138 static inline bool agent_req_in_final_state(enum agent_req_status ars)
3140 return ((ars == ARS_SUCCEED) || (ars == ARS_FAILED) ||
3141 (ars == ARS_CANCELED));
3144 struct llog_agent_req_rec {
3145 struct llog_rec_hdr arr_hdr; /**< record header */
3146 __u32 arr_status; /**< status of the request */
3148 * agent_req_status */
3149 __u32 arr_archive_id; /**< backend archive number */
3150 __u64 arr_flags; /**< req flags */
3151 __u64 arr_compound_id; /**< compound cookie */
3152 __u64 arr_req_create; /**< req. creation time */
3153 __u64 arr_req_change; /**< req. status change time */
3154 struct hsm_action_item arr_hai; /**< req. to the agent */
3155 struct llog_rec_tail arr_tail; /**< record tail for_sizezof_only */
3156 } __attribute__((packed));
3158 /* Old llog gen for compatibility */
3162 } __attribute__((packed));
3164 struct llog_gen_rec {
3165 struct llog_rec_hdr lgr_hdr;
3166 struct llog_gen lgr_gen;
3170 struct llog_rec_tail lgr_tail;
3173 /* On-disk header structure of each log object, stored in little endian order */
3174 #define LLOG_CHUNK_SIZE 8192
3175 #define LLOG_HEADER_SIZE (96)
3176 #define LLOG_BITMAP_BYTES (LLOG_CHUNK_SIZE - LLOG_HEADER_SIZE)
3178 #define LLOG_MIN_REC_SIZE (24) /* round(llog_rec_hdr + llog_rec_tail) */
3180 /* flags for the logs */
3182 LLOG_F_ZAP_WHEN_EMPTY = 0x1,
3183 LLOG_F_IS_CAT = 0x2,
3184 LLOG_F_IS_PLAIN = 0x4,
3187 struct llog_log_hdr {
3188 struct llog_rec_hdr llh_hdr;
3189 __s64 llh_timestamp;
3191 __u32 llh_bitmap_offset;
3195 /* for a catalog the first plain slot is next to it */
3196 struct obd_uuid llh_tgtuuid;
3197 __u32 llh_reserved[LLOG_HEADER_SIZE/sizeof(__u32) - 23];
3198 __u32 llh_bitmap[LLOG_BITMAP_BYTES/sizeof(__u32)];
3199 struct llog_rec_tail llh_tail;
3200 } __attribute__((packed));
3202 #define LLOG_BITMAP_SIZE(llh) (__u32)((llh->llh_hdr.lrh_len - \
3203 llh->llh_bitmap_offset - \
3204 sizeof(llh->llh_tail)) * 8)
3206 /** log cookies are used to reference a specific log file and a record therein */
3207 struct llog_cookie {
3208 struct llog_logid lgc_lgl;
3212 } __attribute__((packed));
3214 /** llog protocol */
3215 enum llogd_rpc_ops {
3216 LLOG_ORIGIN_HANDLE_CREATE = 501,
3217 LLOG_ORIGIN_HANDLE_NEXT_BLOCK = 502,
3218 LLOG_ORIGIN_HANDLE_READ_HEADER = 503,
3219 LLOG_ORIGIN_HANDLE_WRITE_REC = 504,
3220 LLOG_ORIGIN_HANDLE_CLOSE = 505,
3221 LLOG_ORIGIN_CONNECT = 506,
3222 LLOG_CATINFO = 507, /* deprecated */
3223 LLOG_ORIGIN_HANDLE_PREV_BLOCK = 508,
3224 LLOG_ORIGIN_HANDLE_DESTROY = 509, /* for destroy llog object*/
3226 LLOG_FIRST_OPC = LLOG_ORIGIN_HANDLE_CREATE
3230 struct llog_logid lgd_logid;
3232 __u32 lgd_llh_flags;
3234 __u32 lgd_saved_index;
3236 __u64 lgd_cur_offset;
3237 } __attribute__((packed));
3239 struct llogd_conn_body {
3240 struct llog_gen lgdc_gen;
3241 struct llog_logid lgdc_logid;
3242 __u32 lgdc_ctxt_idx;
3243 } __attribute__((packed));
3245 /* Note: 64-bit types are 64-bit aligned in structure */
3247 __u64 o_valid; /* hot fields in this obdo */
3250 __u64 o_size; /* o_size-o_blocks == ost_lvb */
3254 __u64 o_blocks; /* brw: cli sent cached bytes */
3257 /* 32-bit fields start here: keep an even number of them via padding */
3258 __u32 o_blksize; /* optimal IO blocksize */
3259 __u32 o_mode; /* brw: cli sent cache remain */
3263 __u32 o_nlink; /* brw: checksum */
3265 __u32 o_misc; /* brw: o_dropped */
3267 __u64 o_ioepoch; /* epoch in ost writes */
3268 __u32 o_stripe_idx; /* holds stripe idx */
3270 struct lustre_handle o_handle; /* brw: lock handle to prolong
3272 struct llog_cookie o_lcookie; /* destroy: unlink cookie from
3277 __u64 o_data_version; /* getattr: sum of iversion for
3279 * brw: grant space consumed on
3280 * the client for the write */
3286 #define o_dirty o_blocks
3287 #define o_undirty o_mode
3288 #define o_dropped o_misc
3289 #define o_cksum o_nlink
3290 #define o_grant_used o_data_version
3292 static inline void lustre_set_wire_obdo(struct obd_connect_data *ocd,
3294 const struct obdo *lobdo)
3297 wobdo->o_flags &= ~OBD_FL_LOCAL_MASK;
3301 if (unlikely(!(ocd->ocd_connect_flags & OBD_CONNECT_FID)) &&
3302 fid_seq_is_echo(ostid_seq(&lobdo->o_oi))) {
3303 /* Currently OBD_FL_OSTID will only be used when 2.4 echo
3304 * client communicate with pre-2.4 server */
3305 wobdo->o_oi.oi.oi_id = fid_oid(&lobdo->o_oi.oi_fid);
3306 wobdo->o_oi.oi.oi_seq = fid_seq(&lobdo->o_oi.oi_fid);
3310 static inline void lustre_get_wire_obdo(struct obd_connect_data *ocd,
3312 const struct obdo *wobdo)
3314 __u32 local_flags = 0;
3316 if (lobdo->o_valid & OBD_MD_FLFLAGS)
3317 local_flags = lobdo->o_flags & OBD_FL_LOCAL_MASK;
3320 if (local_flags != 0) {
3321 lobdo->o_valid |= OBD_MD_FLFLAGS;
3322 lobdo->o_flags &= ~OBD_FL_LOCAL_MASK;
3323 lobdo->o_flags |= local_flags;
3328 if (unlikely(!(ocd->ocd_connect_flags & OBD_CONNECT_FID)) &&
3329 fid_seq_is_echo(wobdo->o_oi.oi.oi_seq)) {
3331 lobdo->o_oi.oi_fid.f_seq = wobdo->o_oi.oi.oi_seq;
3332 lobdo->o_oi.oi_fid.f_oid = wobdo->o_oi.oi.oi_id;
3333 lobdo->o_oi.oi_fid.f_ver = 0;
3337 extern void lustre_swab_obdo (struct obdo *o);
3339 /* request structure for OST's */
3344 /* Key for FIEMAP to be used in get_info calls */
3345 struct ll_fiemap_info_key {
3348 struct ll_user_fiemap fiemap;
3351 extern void lustre_swab_ost_body (struct ost_body *b);
3352 extern void lustre_swab_ost_last_id(__u64 *id);
3353 extern void lustre_swab_fiemap(struct ll_user_fiemap *fiemap);
3355 extern void lustre_swab_lov_user_md_v1(struct lov_user_md_v1 *lum);
3356 extern void lustre_swab_lov_user_md_v3(struct lov_user_md_v3 *lum);
3357 extern void lustre_swab_lov_user_md_objects(struct lov_user_ost_data *lod,
3359 extern void lustre_swab_lov_mds_md(struct lov_mds_md *lmm);
3362 extern void lustre_swab_llogd_body (struct llogd_body *d);
3363 extern void lustre_swab_llog_hdr (struct llog_log_hdr *h);
3364 extern void lustre_swab_llogd_conn_body (struct llogd_conn_body *d);
3365 extern void lustre_swab_llog_rec(struct llog_rec_hdr *rec);
3366 extern void lustre_swab_llog_id(struct llog_logid *lid);
3369 extern void lustre_swab_lustre_cfg(struct lustre_cfg *lcfg);
3371 /* Functions for dumping PTLRPC fields */
3372 void dump_rniobuf(struct niobuf_remote *rnb);
3373 void dump_ioo(struct obd_ioobj *nb);
3374 void dump_obdo(struct obdo *oa);
3375 void dump_ost_body(struct ost_body *ob);
3376 void dump_rcs(__u32 *rc);
3378 #define IDX_INFO_MAGIC 0x3D37CC37
3380 /* Index file transfer through the network. The server serializes the index into
3381 * a byte stream which is sent to the client via a bulk transfer */
3385 /* reply: see idx_info_flags below */
3388 /* request & reply: number of lu_idxpage (to be) transferred */
3392 /* request: requested attributes passed down to the iterator API */
3395 /* request & reply: index file identifier (FID) */
3396 struct lu_fid ii_fid;
3398 /* reply: version of the index file before starting to walk the index.
3399 * Please note that the version can be modified at any time during the
3403 /* request: hash to start with:
3404 * reply: hash of the first entry of the first lu_idxpage and hash
3405 * of the entry to read next if any */
3406 __u64 ii_hash_start;
3409 /* reply: size of keys in lu_idxpages, minimal one if II_FL_VARKEY is
3413 /* reply: size of records in lu_idxpages, minimal one if II_FL_VARREC
3421 extern void lustre_swab_idx_info(struct idx_info *ii);
3423 #define II_END_OFF MDS_DIR_END_OFF /* all entries have been read */
3425 /* List of flags used in idx_info::ii_flags */
3426 enum idx_info_flags {
3427 II_FL_NOHASH = 1 << 0, /* client doesn't care about hash value */
3428 II_FL_VARKEY = 1 << 1, /* keys can be of variable size */
3429 II_FL_VARREC = 1 << 2, /* records can be of variable size */
3430 II_FL_NONUNQ = 1 << 3, /* index supports non-unique keys */
3433 #define LIP_MAGIC 0x8A6D6B6C
3435 /* 4KB (= LU_PAGE_SIZE) container gathering key/record pairs */
3437 /* 16-byte header */
3440 __u16 lip_nr; /* number of entries in the container */
3441 __u64 lip_pad0; /* additional padding for future use */
3443 /* key/record pairs are stored in the remaining 4080 bytes.
3444 * depending upon the flags in idx_info::ii_flags, each key/record
3445 * pair might be preceded by:
3447 * - the key size (II_FL_VARKEY is set)
3448 * - the record size (II_FL_VARREC is set)
3450 * For the time being, we only support fixed-size key & record. */
3451 char lip_entries[0];
3453 extern void lustre_swab_lip_header(struct lu_idxpage *lip);
3455 #define LIP_HDR_SIZE (offsetof(struct lu_idxpage, lip_entries))
3457 /* Gather all possible type associated with a 4KB container */
3459 struct lu_dirpage lp_dir; /* for MDS_READPAGE */
3460 struct lu_idxpage lp_idx; /* for OBD_IDX_READ */
3461 char lp_array[LU_PAGE_SIZE];
3464 /* security opcodes */
3467 SEC_CTX_INIT_CONT = 802,
3470 SEC_FIRST_OPC = SEC_CTX_INIT
3474 * capa related definitions
3476 #define CAPA_HMAC_MAX_LEN 64
3477 #define CAPA_HMAC_KEY_MAX_LEN 56
3479 /* NB take care when changing the sequence of elements this struct,
3480 * because the offset info is used in find_capa() */
3481 struct lustre_capa {
3482 struct lu_fid lc_fid; /** fid */
3483 __u64 lc_opc; /** operations allowed */
3484 __u64 lc_uid; /** file owner */
3485 __u64 lc_gid; /** file group */
3486 __u32 lc_flags; /** HMAC algorithm & flags */
3487 __u32 lc_keyid; /** key# used for the capability */
3488 __u32 lc_timeout; /** capa timeout value (sec) */
3489 __u32 lc_expiry; /** expiry time (sec) */
3490 __u8 lc_hmac[CAPA_HMAC_MAX_LEN]; /** HMAC */
3491 } __attribute__((packed));
3493 extern void lustre_swab_lustre_capa(struct lustre_capa *c);
3495 /** lustre_capa::lc_opc */
3497 CAPA_OPC_BODY_WRITE = 1<<0, /**< write object data */
3498 CAPA_OPC_BODY_READ = 1<<1, /**< read object data */
3499 CAPA_OPC_INDEX_LOOKUP = 1<<2, /**< lookup object fid */
3500 CAPA_OPC_INDEX_INSERT = 1<<3, /**< insert object fid */
3501 CAPA_OPC_INDEX_DELETE = 1<<4, /**< delete object fid */
3502 CAPA_OPC_OSS_WRITE = 1<<5, /**< write oss object data */
3503 CAPA_OPC_OSS_READ = 1<<6, /**< read oss object data */
3504 CAPA_OPC_OSS_TRUNC = 1<<7, /**< truncate oss object */
3505 CAPA_OPC_OSS_DESTROY = 1<<8, /**< destroy oss object */
3506 CAPA_OPC_META_WRITE = 1<<9, /**< write object meta data */
3507 CAPA_OPC_META_READ = 1<<10, /**< read object meta data */
3510 #define CAPA_OPC_OSS_RW (CAPA_OPC_OSS_READ | CAPA_OPC_OSS_WRITE)
3511 #define CAPA_OPC_MDS_ONLY \
3512 (CAPA_OPC_BODY_WRITE | CAPA_OPC_BODY_READ | CAPA_OPC_INDEX_LOOKUP | \
3513 CAPA_OPC_INDEX_INSERT | CAPA_OPC_INDEX_DELETE)
3514 #define CAPA_OPC_OSS_ONLY \
3515 (CAPA_OPC_OSS_WRITE | CAPA_OPC_OSS_READ | CAPA_OPC_OSS_TRUNC | \
3516 CAPA_OPC_OSS_DESTROY)
3517 #define CAPA_OPC_MDS_DEFAULT ~CAPA_OPC_OSS_ONLY
3518 #define CAPA_OPC_OSS_DEFAULT ~(CAPA_OPC_MDS_ONLY | CAPA_OPC_OSS_ONLY)
3520 /* MDS capability covers object capability for operations of body r/w
3521 * (dir readpage/sendpage), index lookup/insert/delete and meta data r/w,
3522 * while OSS capability only covers object capability for operations of
3523 * oss data(file content) r/w/truncate.
3525 static inline int capa_for_mds(struct lustre_capa *c)
3527 return (c->lc_opc & CAPA_OPC_INDEX_LOOKUP) != 0;
3530 static inline int capa_for_oss(struct lustre_capa *c)
3532 return (c->lc_opc & CAPA_OPC_INDEX_LOOKUP) == 0;
3535 /* lustre_capa::lc_hmac_alg */
3537 CAPA_HMAC_ALG_SHA1 = 1, /**< sha1 algorithm */
3541 #define CAPA_FL_MASK 0x00ffffff
3542 #define CAPA_HMAC_ALG_MASK 0xff000000
3544 struct lustre_capa_key {
3545 __u64 lk_seq; /**< mds# */
3546 __u32 lk_keyid; /**< key# */
3548 __u8 lk_key[CAPA_HMAC_KEY_MAX_LEN]; /**< key */
3549 } __attribute__((packed));
3551 extern void lustre_swab_lustre_capa_key(struct lustre_capa_key *k);
3553 /** The link ea holds 1 \a link_ea_entry for each hardlink */
3554 #define LINK_EA_MAGIC 0x11EAF1DFUL
3555 struct link_ea_header {
3558 __u64 leh_len; /* total size */
3564 /** Hardlink data is name and parent fid.
3565 * Stored in this crazy struct for maximum packing and endian-neutrality
3567 struct link_ea_entry {
3568 /** __u16 stored big-endian, unaligned */
3569 unsigned char lee_reclen[2];
3570 unsigned char lee_parent_fid[sizeof(struct lu_fid)];
3572 }__attribute__((packed));
3574 /** fid2path request/reply structure */
3575 struct getinfo_fid2path {
3576 struct lu_fid gf_fid;
3581 } __attribute__((packed));
3583 void lustre_swab_fid2path (struct getinfo_fid2path *gf);
3586 LAYOUT_INTENT_ACCESS = 0,
3587 LAYOUT_INTENT_READ = 1,
3588 LAYOUT_INTENT_WRITE = 2,
3589 LAYOUT_INTENT_GLIMPSE = 3,
3590 LAYOUT_INTENT_TRUNC = 4,
3591 LAYOUT_INTENT_RELEASE = 5,
3592 LAYOUT_INTENT_RESTORE = 6
3595 /* enqueue layout lock with intent */
3596 struct layout_intent {
3597 __u32 li_opc; /* intent operation for enqueue, read, write etc */
3603 void lustre_swab_layout_intent(struct layout_intent *li);
3606 * On the wire version of hsm_progress structure.
3608 * Contains the userspace hsm_progress and some internal fields.
3610 struct hsm_progress_kernel {
3611 /* Field taken from struct hsm_progress */
3614 struct hsm_extent hpk_extent;
3616 __u16 hpk_errval; /* positive val */
3618 /* Additional fields */
3619 __u64 hpk_data_version;
3621 } __attribute__((packed));
3623 extern void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
3624 extern void lustre_swab_hsm_current_action(struct hsm_current_action *action);
3625 extern void lustre_swab_hsm_progress_kernel(struct hsm_progress_kernel *hpk);
3626 extern void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
3627 extern void lustre_swab_hsm_user_item(struct hsm_user_item *hui);
3628 extern void lustre_swab_hsm_request(struct hsm_request *hr);
3631 * These are object update opcode under UPDATE_OBJ, which is currently
3632 * being used by cross-ref operations between MDT.
3634 * During the cross-ref operation, the Master MDT, which the client send the
3635 * request to, will disassembly the operation into object updates, then OSP
3636 * will send these updates to the remote MDT to be executed.
3638 * Update request format
3639 * magic: UPDATE_BUFFER_MAGIC_V1
3640 * Count: How many updates in the req.
3641 * bufs[0] : following are packets of object.
3643 * type: object_update_op, the op code of update
3644 * fid: The object fid of the update.
3645 * lens/bufs: other parameters of the update.
3647 * type: object_update_op, the op code of update
3648 * fid: The object fid of the update.
3649 * lens/bufs: other parameters of the update.
3651 * update[7]: type: object_update_op, the op code of update
3652 * fid: The object fid of the update.
3653 * lens/bufs: other parameters of the update.
3654 * Current 8 maxim updates per object update request.
3656 *******************************************************************
3657 * update reply format:
3659 * ur_version: UPDATE_REPLY_V1
3660 * ur_count: The count of the reply, which is usually equal
3661 * to the number of updates in the request.
3662 * ur_lens: The reply lengths of each object update.
3664 * replies: 1st update reply [4bytes_ret: other body]
3665 * 2nd update reply [4bytes_ret: other body]
3667 * nth update reply [4bytes_ret: other body]
3669 * For each reply of the update, the format would be
3670 * result(4 bytes):Other stuff
3673 #define UPDATE_MAX_OPS 10
3674 #define UPDATE_BUFFER_MAGIC_V1 0xBDDE0001
3675 #define UPDATE_BUFFER_MAGIC UPDATE_BUFFER_MAGIC_V1
3676 #define UPDATE_BUF_COUNT 8
3677 enum object_update_op {
3686 OBJ_INDEX_LOOKUP = 9,
3687 OBJ_INDEX_INSERT = 10,
3688 OBJ_INDEX_DELETE = 11,
3695 struct lu_fid u_fid;
3696 __u32 u_lens[UPDATE_BUF_COUNT];
3706 #define UPDATE_REPLY_V1 0x00BD0001
3707 struct update_reply {
3713 void lustre_swab_update_buf(struct update_buf *ub);
3714 void lustre_swab_update_reply_buf(struct update_reply *ur);
3716 /** layout swap request structure
3717 * fid1 and fid2 are in mdt_body
3719 struct mdc_swap_layouts {
3723 void lustre_swab_swap_layouts(struct mdc_swap_layouts *msl);
3726 struct lustre_handle cd_handle;
3727 struct lu_fid cd_fid;
3728 __u64 cd_data_version;
3729 __u64 cd_reserved[8];
3732 void lustre_swab_close_data(struct close_data *data);