2 #include <linux/ceph/ceph_debug.h>
4 #include <linux/module.h>
6 #include <linux/highmem.h>
8 #include <linux/pagemap.h>
9 #include <linux/slab.h>
10 #include <linux/uaccess.h>
12 #include <linux/bio.h>
15 #include <linux/ceph/libceph.h>
16 #include <linux/ceph/osd_client.h>
17 #include <linux/ceph/messenger.h>
18 #include <linux/ceph/decode.h>
19 #include <linux/ceph/auth.h>
20 #include <linux/ceph/pagelist.h>
22 #define OSD_OPREPLY_FRONT_LEN 512
24 static struct kmem_cache *ceph_osd_request_cache;
26 static const struct ceph_connection_operations osd_con_ops;
29 * Implement client access to distributed object storage cluster.
31 * All data objects are stored within a cluster/cloud of OSDs, or
32 * "object storage devices." (Note that Ceph OSDs have _nothing_ to
33 * do with the T10 OSD extensions to SCSI.) Ceph OSDs are simply
34 * remote daemons serving up and coordinating consistent and safe
37 * Cluster membership and the mapping of data objects onto storage devices
38 * are described by the osd map.
40 * We keep track of pending OSD requests (read, write), resubmit
41 * requests to different OSDs when the cluster topology/data layout
42 * change, or retry the affected requests when the communications
43 * channel with an OSD is reset.
46 static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req);
47 static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req);
48 static void link_linger(struct ceph_osd *osd,
49 struct ceph_osd_linger_request *lreq);
50 static void unlink_linger(struct ceph_osd *osd,
51 struct ceph_osd_linger_request *lreq);
54 static inline bool rwsem_is_wrlocked(struct rw_semaphore *sem)
58 if (unlikely(down_read_trylock(sem))) {
65 static inline void verify_osdc_locked(struct ceph_osd_client *osdc)
67 WARN_ON(!rwsem_is_locked(&osdc->lock));
69 static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc)
71 WARN_ON(!rwsem_is_wrlocked(&osdc->lock));
73 static inline void verify_osd_locked(struct ceph_osd *osd)
75 struct ceph_osd_client *osdc = osd->o_osdc;
77 WARN_ON(!(mutex_is_locked(&osd->lock) &&
78 rwsem_is_locked(&osdc->lock)) &&
79 !rwsem_is_wrlocked(&osdc->lock));
81 static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq)
83 WARN_ON(!mutex_is_locked(&lreq->lock));
86 static inline void verify_osdc_locked(struct ceph_osd_client *osdc) { }
87 static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc) { }
88 static inline void verify_osd_locked(struct ceph_osd *osd) { }
89 static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq) { }
93 * calculate the mapping of a file extent onto an object, and fill out the
94 * request accordingly. shorten extent as necessary if it crosses an
97 * fill osd op in request message.
99 static int calc_layout(struct ceph_file_layout *layout, u64 off, u64 *plen,
100 u64 *objnum, u64 *objoff, u64 *objlen)
102 u64 orig_len = *plen;
106 r = ceph_calc_file_object_mapping(layout, off, orig_len, objnum,
110 if (*objlen < orig_len) {
112 dout(" skipping last %llu, final file extent %llu~%llu\n",
113 orig_len - *plen, off, *plen);
116 dout("calc_layout objnum=%llx %llu~%llu\n", *objnum, *objoff, *objlen);
121 static void ceph_osd_data_init(struct ceph_osd_data *osd_data)
123 memset(osd_data, 0, sizeof (*osd_data));
124 osd_data->type = CEPH_OSD_DATA_TYPE_NONE;
127 static void ceph_osd_data_pages_init(struct ceph_osd_data *osd_data,
128 struct page **pages, u64 length, u32 alignment,
129 bool pages_from_pool, bool own_pages)
131 osd_data->type = CEPH_OSD_DATA_TYPE_PAGES;
132 osd_data->pages = pages;
133 osd_data->length = length;
134 osd_data->alignment = alignment;
135 osd_data->pages_from_pool = pages_from_pool;
136 osd_data->own_pages = own_pages;
139 static void ceph_osd_data_pagelist_init(struct ceph_osd_data *osd_data,
140 struct ceph_pagelist *pagelist)
142 osd_data->type = CEPH_OSD_DATA_TYPE_PAGELIST;
143 osd_data->pagelist = pagelist;
147 static void ceph_osd_data_bio_init(struct ceph_osd_data *osd_data,
148 struct bio *bio, size_t bio_length)
150 osd_data->type = CEPH_OSD_DATA_TYPE_BIO;
152 osd_data->bio_length = bio_length;
154 #endif /* CONFIG_BLOCK */
156 #define osd_req_op_data(oreq, whch, typ, fld) \
158 struct ceph_osd_request *__oreq = (oreq); \
159 unsigned int __whch = (whch); \
160 BUG_ON(__whch >= __oreq->r_num_ops); \
161 &__oreq->r_ops[__whch].typ.fld; \
164 static struct ceph_osd_data *
165 osd_req_op_raw_data_in(struct ceph_osd_request *osd_req, unsigned int which)
167 BUG_ON(which >= osd_req->r_num_ops);
169 return &osd_req->r_ops[which].raw_data_in;
172 struct ceph_osd_data *
173 osd_req_op_extent_osd_data(struct ceph_osd_request *osd_req,
176 return osd_req_op_data(osd_req, which, extent, osd_data);
178 EXPORT_SYMBOL(osd_req_op_extent_osd_data);
180 void osd_req_op_raw_data_in_pages(struct ceph_osd_request *osd_req,
181 unsigned int which, struct page **pages,
182 u64 length, u32 alignment,
183 bool pages_from_pool, bool own_pages)
185 struct ceph_osd_data *osd_data;
187 osd_data = osd_req_op_raw_data_in(osd_req, which);
188 ceph_osd_data_pages_init(osd_data, pages, length, alignment,
189 pages_from_pool, own_pages);
191 EXPORT_SYMBOL(osd_req_op_raw_data_in_pages);
193 void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *osd_req,
194 unsigned int which, struct page **pages,
195 u64 length, u32 alignment,
196 bool pages_from_pool, bool own_pages)
198 struct ceph_osd_data *osd_data;
200 osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
201 ceph_osd_data_pages_init(osd_data, pages, length, alignment,
202 pages_from_pool, own_pages);
204 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pages);
206 void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *osd_req,
207 unsigned int which, struct ceph_pagelist *pagelist)
209 struct ceph_osd_data *osd_data;
211 osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
212 ceph_osd_data_pagelist_init(osd_data, pagelist);
214 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pagelist);
217 void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req,
218 unsigned int which, struct bio *bio, size_t bio_length)
220 struct ceph_osd_data *osd_data;
222 osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
223 ceph_osd_data_bio_init(osd_data, bio, bio_length);
225 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bio);
226 #endif /* CONFIG_BLOCK */
228 static void osd_req_op_cls_request_info_pagelist(
229 struct ceph_osd_request *osd_req,
230 unsigned int which, struct ceph_pagelist *pagelist)
232 struct ceph_osd_data *osd_data;
234 osd_data = osd_req_op_data(osd_req, which, cls, request_info);
235 ceph_osd_data_pagelist_init(osd_data, pagelist);
238 void osd_req_op_cls_request_data_pagelist(
239 struct ceph_osd_request *osd_req,
240 unsigned int which, struct ceph_pagelist *pagelist)
242 struct ceph_osd_data *osd_data;
244 osd_data = osd_req_op_data(osd_req, which, cls, request_data);
245 ceph_osd_data_pagelist_init(osd_data, pagelist);
246 osd_req->r_ops[which].cls.indata_len += pagelist->length;
247 osd_req->r_ops[which].indata_len += pagelist->length;
249 EXPORT_SYMBOL(osd_req_op_cls_request_data_pagelist);
251 void osd_req_op_cls_request_data_pages(struct ceph_osd_request *osd_req,
252 unsigned int which, struct page **pages, u64 length,
253 u32 alignment, bool pages_from_pool, bool own_pages)
255 struct ceph_osd_data *osd_data;
257 osd_data = osd_req_op_data(osd_req, which, cls, request_data);
258 ceph_osd_data_pages_init(osd_data, pages, length, alignment,
259 pages_from_pool, own_pages);
260 osd_req->r_ops[which].cls.indata_len += length;
261 osd_req->r_ops[which].indata_len += length;
263 EXPORT_SYMBOL(osd_req_op_cls_request_data_pages);
265 void osd_req_op_cls_response_data_pages(struct ceph_osd_request *osd_req,
266 unsigned int which, struct page **pages, u64 length,
267 u32 alignment, bool pages_from_pool, bool own_pages)
269 struct ceph_osd_data *osd_data;
271 osd_data = osd_req_op_data(osd_req, which, cls, response_data);
272 ceph_osd_data_pages_init(osd_data, pages, length, alignment,
273 pages_from_pool, own_pages);
275 EXPORT_SYMBOL(osd_req_op_cls_response_data_pages);
277 static u64 ceph_osd_data_length(struct ceph_osd_data *osd_data)
279 switch (osd_data->type) {
280 case CEPH_OSD_DATA_TYPE_NONE:
282 case CEPH_OSD_DATA_TYPE_PAGES:
283 return osd_data->length;
284 case CEPH_OSD_DATA_TYPE_PAGELIST:
285 return (u64)osd_data->pagelist->length;
287 case CEPH_OSD_DATA_TYPE_BIO:
288 return (u64)osd_data->bio_length;
289 #endif /* CONFIG_BLOCK */
291 WARN(true, "unrecognized data type %d\n", (int)osd_data->type);
296 static void ceph_osd_data_release(struct ceph_osd_data *osd_data)
298 if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES && osd_data->own_pages) {
301 num_pages = calc_pages_for((u64)osd_data->alignment,
302 (u64)osd_data->length);
303 ceph_release_page_vector(osd_data->pages, num_pages);
305 ceph_osd_data_init(osd_data);
308 static void osd_req_op_data_release(struct ceph_osd_request *osd_req,
311 struct ceph_osd_req_op *op;
313 BUG_ON(which >= osd_req->r_num_ops);
314 op = &osd_req->r_ops[which];
317 case CEPH_OSD_OP_READ:
318 case CEPH_OSD_OP_WRITE:
319 case CEPH_OSD_OP_WRITEFULL:
320 ceph_osd_data_release(&op->extent.osd_data);
322 case CEPH_OSD_OP_CALL:
323 ceph_osd_data_release(&op->cls.request_info);
324 ceph_osd_data_release(&op->cls.request_data);
325 ceph_osd_data_release(&op->cls.response_data);
327 case CEPH_OSD_OP_SETXATTR:
328 case CEPH_OSD_OP_CMPXATTR:
329 ceph_osd_data_release(&op->xattr.osd_data);
331 case CEPH_OSD_OP_STAT:
332 ceph_osd_data_release(&op->raw_data_in);
334 case CEPH_OSD_OP_NOTIFY_ACK:
335 ceph_osd_data_release(&op->notify_ack.request_data);
337 case CEPH_OSD_OP_NOTIFY:
338 ceph_osd_data_release(&op->notify.request_data);
339 ceph_osd_data_release(&op->notify.response_data);
347 * Assumes @t is zero-initialized.
349 static void target_init(struct ceph_osd_request_target *t)
351 ceph_oid_init(&t->base_oid);
352 ceph_oloc_init(&t->base_oloc);
353 ceph_oid_init(&t->target_oid);
354 ceph_oloc_init(&t->target_oloc);
356 ceph_osds_init(&t->acting);
357 ceph_osds_init(&t->up);
361 t->osd = CEPH_HOMELESS_OSD;
364 static void target_copy(struct ceph_osd_request_target *dest,
365 const struct ceph_osd_request_target *src)
367 ceph_oid_copy(&dest->base_oid, &src->base_oid);
368 ceph_oloc_copy(&dest->base_oloc, &src->base_oloc);
369 ceph_oid_copy(&dest->target_oid, &src->target_oid);
370 ceph_oloc_copy(&dest->target_oloc, &src->target_oloc);
372 dest->pgid = src->pgid; /* struct */
373 dest->pg_num = src->pg_num;
374 dest->pg_num_mask = src->pg_num_mask;
375 ceph_osds_copy(&dest->acting, &src->acting);
376 ceph_osds_copy(&dest->up, &src->up);
377 dest->size = src->size;
378 dest->min_size = src->min_size;
379 dest->sort_bitwise = src->sort_bitwise;
381 dest->flags = src->flags;
382 dest->paused = src->paused;
384 dest->osd = src->osd;
387 static void target_destroy(struct ceph_osd_request_target *t)
389 ceph_oid_destroy(&t->base_oid);
390 ceph_oid_destroy(&t->target_oid);
396 static void request_release_checks(struct ceph_osd_request *req)
398 WARN_ON(!RB_EMPTY_NODE(&req->r_node));
399 WARN_ON(!list_empty(&req->r_unsafe_item));
403 static void ceph_osdc_release_request(struct kref *kref)
405 struct ceph_osd_request *req = container_of(kref,
406 struct ceph_osd_request, r_kref);
409 dout("%s %p (r_request %p r_reply %p)\n", __func__, req,
410 req->r_request, req->r_reply);
411 request_release_checks(req);
414 ceph_msg_put(req->r_request);
416 ceph_msg_put(req->r_reply);
418 for (which = 0; which < req->r_num_ops; which++)
419 osd_req_op_data_release(req, which);
421 target_destroy(&req->r_t);
422 ceph_put_snap_context(req->r_snapc);
425 mempool_free(req, req->r_osdc->req_mempool);
426 else if (req->r_num_ops <= CEPH_OSD_SLAB_OPS)
427 kmem_cache_free(ceph_osd_request_cache, req);
432 void ceph_osdc_get_request(struct ceph_osd_request *req)
434 dout("%s %p (was %d)\n", __func__, req,
435 atomic_read(&req->r_kref.refcount));
436 kref_get(&req->r_kref);
438 EXPORT_SYMBOL(ceph_osdc_get_request);
440 void ceph_osdc_put_request(struct ceph_osd_request *req)
443 dout("%s %p (was %d)\n", __func__, req,
444 atomic_read(&req->r_kref.refcount));
445 kref_put(&req->r_kref, ceph_osdc_release_request);
448 EXPORT_SYMBOL(ceph_osdc_put_request);
450 static void request_init(struct ceph_osd_request *req)
452 /* req only, each op is zeroed in _osd_req_op_init() */
453 memset(req, 0, sizeof(*req));
455 kref_init(&req->r_kref);
456 init_completion(&req->r_completion);
457 init_completion(&req->r_safe_completion);
458 RB_CLEAR_NODE(&req->r_node);
459 INIT_LIST_HEAD(&req->r_unsafe_item);
461 target_init(&req->r_t);
465 * This is ugly, but it allows us to reuse linger registration and ping
466 * requests, keeping the structure of the code around send_linger{_ping}()
467 * reasonable. Setting up a min_nr=2 mempool for each linger request
468 * and dealing with copying ops (this blasts req only, watch op remains
469 * intact) isn't any better.
471 static void request_reinit(struct ceph_osd_request *req)
473 struct ceph_osd_client *osdc = req->r_osdc;
474 bool mempool = req->r_mempool;
475 unsigned int num_ops = req->r_num_ops;
476 u64 snapid = req->r_snapid;
477 struct ceph_snap_context *snapc = req->r_snapc;
478 bool linger = req->r_linger;
479 struct ceph_msg *request_msg = req->r_request;
480 struct ceph_msg *reply_msg = req->r_reply;
482 dout("%s req %p\n", __func__, req);
483 WARN_ON(atomic_read(&req->r_kref.refcount) != 1);
484 request_release_checks(req);
486 WARN_ON(atomic_read(&request_msg->kref.refcount) != 1);
487 WARN_ON(atomic_read(&reply_msg->kref.refcount) != 1);
488 target_destroy(&req->r_t);
492 req->r_mempool = mempool;
493 req->r_num_ops = num_ops;
494 req->r_snapid = snapid;
495 req->r_snapc = snapc;
496 req->r_linger = linger;
497 req->r_request = request_msg;
498 req->r_reply = reply_msg;
501 struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
502 struct ceph_snap_context *snapc,
503 unsigned int num_ops,
507 struct ceph_osd_request *req;
510 BUG_ON(num_ops > CEPH_OSD_SLAB_OPS);
511 req = mempool_alloc(osdc->req_mempool, gfp_flags);
512 } else if (num_ops <= CEPH_OSD_SLAB_OPS) {
513 req = kmem_cache_alloc(ceph_osd_request_cache, gfp_flags);
515 BUG_ON(num_ops > CEPH_OSD_MAX_OPS);
516 req = kmalloc(sizeof(*req) + num_ops * sizeof(req->r_ops[0]),
524 req->r_mempool = use_mempool;
525 req->r_num_ops = num_ops;
526 req->r_snapid = CEPH_NOSNAP;
527 req->r_snapc = ceph_get_snap_context(snapc);
529 dout("%s req %p\n", __func__, req);
532 EXPORT_SYMBOL(ceph_osdc_alloc_request);
534 int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp)
536 struct ceph_osd_client *osdc = req->r_osdc;
537 struct ceph_msg *msg;
540 WARN_ON(ceph_oid_empty(&req->r_base_oid));
542 /* create request message */
543 msg_size = 4 + 4 + 4; /* client_inc, osdmap_epoch, flags */
544 msg_size += 4 + 4 + 4 + 8; /* mtime, reassert_version */
545 msg_size += 2 + 4 + 8 + 4 + 4; /* oloc */
546 msg_size += 1 + 8 + 4 + 4; /* pgid */
547 msg_size += 4 + req->r_base_oid.name_len; /* oid */
548 msg_size += 2 + req->r_num_ops * sizeof(struct ceph_osd_op);
549 msg_size += 8; /* snapid */
550 msg_size += 8; /* snap_seq */
551 msg_size += 4 + 8 * (req->r_snapc ? req->r_snapc->num_snaps : 0);
552 msg_size += 4; /* retry_attempt */
555 msg = ceph_msgpool_get(&osdc->msgpool_op, 0);
557 msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, gfp, true);
561 memset(msg->front.iov_base, 0, msg->front.iov_len);
562 req->r_request = msg;
564 /* create reply message */
565 msg_size = OSD_OPREPLY_FRONT_LEN;
566 msg_size += req->r_base_oid.name_len;
567 msg_size += req->r_num_ops * sizeof(struct ceph_osd_op);
570 msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0);
572 msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, msg_size, gfp, true);
580 EXPORT_SYMBOL(ceph_osdc_alloc_messages);
582 static bool osd_req_opcode_valid(u16 opcode)
585 #define GENERATE_CASE(op, opcode, str) case CEPH_OSD_OP_##op: return true;
586 __CEPH_FORALL_OSD_OPS(GENERATE_CASE)
594 * This is an osd op init function for opcodes that have no data or
595 * other information associated with them. It also serves as a
596 * common init routine for all the other init functions, below.
598 static struct ceph_osd_req_op *
599 _osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which,
600 u16 opcode, u32 flags)
602 struct ceph_osd_req_op *op;
604 BUG_ON(which >= osd_req->r_num_ops);
605 BUG_ON(!osd_req_opcode_valid(opcode));
607 op = &osd_req->r_ops[which];
608 memset(op, 0, sizeof (*op));
615 void osd_req_op_init(struct ceph_osd_request *osd_req,
616 unsigned int which, u16 opcode, u32 flags)
618 (void)_osd_req_op_init(osd_req, which, opcode, flags);
620 EXPORT_SYMBOL(osd_req_op_init);
622 void osd_req_op_extent_init(struct ceph_osd_request *osd_req,
623 unsigned int which, u16 opcode,
624 u64 offset, u64 length,
625 u64 truncate_size, u32 truncate_seq)
627 struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
629 size_t payload_len = 0;
631 BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
632 opcode != CEPH_OSD_OP_WRITEFULL && opcode != CEPH_OSD_OP_ZERO &&
633 opcode != CEPH_OSD_OP_TRUNCATE);
635 op->extent.offset = offset;
636 op->extent.length = length;
637 op->extent.truncate_size = truncate_size;
638 op->extent.truncate_seq = truncate_seq;
639 if (opcode == CEPH_OSD_OP_WRITE || opcode == CEPH_OSD_OP_WRITEFULL)
640 payload_len += length;
642 op->indata_len = payload_len;
644 EXPORT_SYMBOL(osd_req_op_extent_init);
646 void osd_req_op_extent_update(struct ceph_osd_request *osd_req,
647 unsigned int which, u64 length)
649 struct ceph_osd_req_op *op;
652 BUG_ON(which >= osd_req->r_num_ops);
653 op = &osd_req->r_ops[which];
654 previous = op->extent.length;
656 if (length == previous)
657 return; /* Nothing to do */
658 BUG_ON(length > previous);
660 op->extent.length = length;
661 op->indata_len -= previous - length;
663 EXPORT_SYMBOL(osd_req_op_extent_update);
665 void osd_req_op_extent_dup_last(struct ceph_osd_request *osd_req,
666 unsigned int which, u64 offset_inc)
668 struct ceph_osd_req_op *op, *prev_op;
670 BUG_ON(which + 1 >= osd_req->r_num_ops);
672 prev_op = &osd_req->r_ops[which];
673 op = _osd_req_op_init(osd_req, which + 1, prev_op->op, prev_op->flags);
674 /* dup previous one */
675 op->indata_len = prev_op->indata_len;
676 op->outdata_len = prev_op->outdata_len;
677 op->extent = prev_op->extent;
679 op->extent.offset += offset_inc;
680 op->extent.length -= offset_inc;
682 if (op->op == CEPH_OSD_OP_WRITE || op->op == CEPH_OSD_OP_WRITEFULL)
683 op->indata_len -= offset_inc;
685 EXPORT_SYMBOL(osd_req_op_extent_dup_last);
687 void osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which,
688 u16 opcode, const char *class, const char *method)
690 struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
692 struct ceph_pagelist *pagelist;
693 size_t payload_len = 0;
696 BUG_ON(opcode != CEPH_OSD_OP_CALL);
698 pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
700 ceph_pagelist_init(pagelist);
702 op->cls.class_name = class;
703 size = strlen(class);
704 BUG_ON(size > (size_t) U8_MAX);
705 op->cls.class_len = size;
706 ceph_pagelist_append(pagelist, class, size);
709 op->cls.method_name = method;
710 size = strlen(method);
711 BUG_ON(size > (size_t) U8_MAX);
712 op->cls.method_len = size;
713 ceph_pagelist_append(pagelist, method, size);
716 osd_req_op_cls_request_info_pagelist(osd_req, which, pagelist);
718 op->indata_len = payload_len;
720 EXPORT_SYMBOL(osd_req_op_cls_init);
722 int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which,
723 u16 opcode, const char *name, const void *value,
724 size_t size, u8 cmp_op, u8 cmp_mode)
726 struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
728 struct ceph_pagelist *pagelist;
731 BUG_ON(opcode != CEPH_OSD_OP_SETXATTR && opcode != CEPH_OSD_OP_CMPXATTR);
733 pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS);
737 ceph_pagelist_init(pagelist);
739 payload_len = strlen(name);
740 op->xattr.name_len = payload_len;
741 ceph_pagelist_append(pagelist, name, payload_len);
743 op->xattr.value_len = size;
744 ceph_pagelist_append(pagelist, value, size);
747 op->xattr.cmp_op = cmp_op;
748 op->xattr.cmp_mode = cmp_mode;
750 ceph_osd_data_pagelist_init(&op->xattr.osd_data, pagelist);
751 op->indata_len = payload_len;
754 EXPORT_SYMBOL(osd_req_op_xattr_init);
757 * @watch_opcode: CEPH_OSD_WATCH_OP_*
759 static void osd_req_op_watch_init(struct ceph_osd_request *req, int which,
760 u64 cookie, u8 watch_opcode)
762 struct ceph_osd_req_op *op;
764 op = _osd_req_op_init(req, which, CEPH_OSD_OP_WATCH, 0);
765 op->watch.cookie = cookie;
766 op->watch.op = watch_opcode;
770 void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req,
772 u64 expected_object_size,
773 u64 expected_write_size)
775 struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
776 CEPH_OSD_OP_SETALLOCHINT,
779 op->alloc_hint.expected_object_size = expected_object_size;
780 op->alloc_hint.expected_write_size = expected_write_size;
783 * CEPH_OSD_OP_SETALLOCHINT op is advisory and therefore deemed
784 * not worth a feature bit. Set FAILOK per-op flag to make
785 * sure older osds don't trip over an unsupported opcode.
787 op->flags |= CEPH_OSD_OP_FLAG_FAILOK;
789 EXPORT_SYMBOL(osd_req_op_alloc_hint_init);
791 static void ceph_osdc_msg_data_add(struct ceph_msg *msg,
792 struct ceph_osd_data *osd_data)
794 u64 length = ceph_osd_data_length(osd_data);
796 if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) {
797 BUG_ON(length > (u64) SIZE_MAX);
799 ceph_msg_data_add_pages(msg, osd_data->pages,
800 length, osd_data->alignment);
801 } else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) {
803 ceph_msg_data_add_pagelist(msg, osd_data->pagelist);
805 } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BIO) {
806 ceph_msg_data_add_bio(msg, osd_data->bio, length);
809 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_NONE);
813 static u32 osd_req_encode_op(struct ceph_osd_op *dst,
814 const struct ceph_osd_req_op *src)
816 if (WARN_ON(!osd_req_opcode_valid(src->op))) {
817 pr_err("unrecognized osd opcode %d\n", src->op);
823 case CEPH_OSD_OP_STAT:
825 case CEPH_OSD_OP_READ:
826 case CEPH_OSD_OP_WRITE:
827 case CEPH_OSD_OP_WRITEFULL:
828 case CEPH_OSD_OP_ZERO:
829 case CEPH_OSD_OP_TRUNCATE:
830 dst->extent.offset = cpu_to_le64(src->extent.offset);
831 dst->extent.length = cpu_to_le64(src->extent.length);
832 dst->extent.truncate_size =
833 cpu_to_le64(src->extent.truncate_size);
834 dst->extent.truncate_seq =
835 cpu_to_le32(src->extent.truncate_seq);
837 case CEPH_OSD_OP_CALL:
838 dst->cls.class_len = src->cls.class_len;
839 dst->cls.method_len = src->cls.method_len;
840 dst->cls.indata_len = cpu_to_le32(src->cls.indata_len);
842 case CEPH_OSD_OP_STARTSYNC:
844 case CEPH_OSD_OP_WATCH:
845 dst->watch.cookie = cpu_to_le64(src->watch.cookie);
846 dst->watch.ver = cpu_to_le64(0);
847 dst->watch.op = src->watch.op;
848 dst->watch.gen = cpu_to_le32(src->watch.gen);
850 case CEPH_OSD_OP_NOTIFY_ACK:
852 case CEPH_OSD_OP_NOTIFY:
853 dst->notify.cookie = cpu_to_le64(src->notify.cookie);
855 case CEPH_OSD_OP_SETALLOCHINT:
856 dst->alloc_hint.expected_object_size =
857 cpu_to_le64(src->alloc_hint.expected_object_size);
858 dst->alloc_hint.expected_write_size =
859 cpu_to_le64(src->alloc_hint.expected_write_size);
861 case CEPH_OSD_OP_SETXATTR:
862 case CEPH_OSD_OP_CMPXATTR:
863 dst->xattr.name_len = cpu_to_le32(src->xattr.name_len);
864 dst->xattr.value_len = cpu_to_le32(src->xattr.value_len);
865 dst->xattr.cmp_op = src->xattr.cmp_op;
866 dst->xattr.cmp_mode = src->xattr.cmp_mode;
868 case CEPH_OSD_OP_CREATE:
869 case CEPH_OSD_OP_DELETE:
872 pr_err("unsupported osd opcode %s\n",
873 ceph_osd_op_name(src->op));
879 dst->op = cpu_to_le16(src->op);
880 dst->flags = cpu_to_le32(src->flags);
881 dst->payload_len = cpu_to_le32(src->indata_len);
883 return src->indata_len;
887 * build new request AND message, calculate layout, and adjust file
890 * if the file was recently truncated, we include information about its
891 * old and new size so that the object can be updated appropriately. (we
892 * avoid synchronously deleting truncated objects because it's slow.)
894 * if @do_sync, include a 'startsync' command so that the osd will flush
897 struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
898 struct ceph_file_layout *layout,
899 struct ceph_vino vino,
901 unsigned int which, int num_ops,
902 int opcode, int flags,
903 struct ceph_snap_context *snapc,
908 struct ceph_osd_request *req;
914 BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
915 opcode != CEPH_OSD_OP_ZERO && opcode != CEPH_OSD_OP_TRUNCATE &&
916 opcode != CEPH_OSD_OP_CREATE && opcode != CEPH_OSD_OP_DELETE);
918 req = ceph_osdc_alloc_request(osdc, snapc, num_ops, use_mempool,
925 /* calculate max write size */
926 r = calc_layout(layout, off, plen, &objnum, &objoff, &objlen);
930 if (opcode == CEPH_OSD_OP_CREATE || opcode == CEPH_OSD_OP_DELETE) {
931 osd_req_op_init(req, which, opcode, 0);
933 u32 object_size = le32_to_cpu(layout->fl_object_size);
934 u32 object_base = off - objoff;
935 if (!(truncate_seq == 1 && truncate_size == -1ULL)) {
936 if (truncate_size <= object_base) {
939 truncate_size -= object_base;
940 if (truncate_size > object_size)
941 truncate_size = object_size;
944 osd_req_op_extent_init(req, which, opcode, objoff, objlen,
945 truncate_size, truncate_seq);
948 req->r_flags = flags;
949 req->r_base_oloc.pool = ceph_file_layout_pg_pool(*layout);
950 ceph_oid_printf(&req->r_base_oid, "%llx.%08llx", vino.ino, objnum);
952 req->r_snapid = vino.snap;
953 if (flags & CEPH_OSD_FLAG_WRITE)
954 req->r_data_offset = off;
956 r = ceph_osdc_alloc_messages(req, GFP_NOFS);
963 ceph_osdc_put_request(req);
966 EXPORT_SYMBOL(ceph_osdc_new_request);
969 * We keep osd requests in an rbtree, sorted by ->r_tid.
971 DEFINE_RB_FUNCS(request, struct ceph_osd_request, r_tid, r_node)
973 static bool osd_homeless(struct ceph_osd *osd)
975 return osd->o_osd == CEPH_HOMELESS_OSD;
978 static bool osd_registered(struct ceph_osd *osd)
980 verify_osdc_locked(osd->o_osdc);
982 return !RB_EMPTY_NODE(&osd->o_node);
986 * Assumes @osd is zero-initialized.
988 static void osd_init(struct ceph_osd *osd)
990 atomic_set(&osd->o_ref, 1);
991 RB_CLEAR_NODE(&osd->o_node);
992 osd->o_requests = RB_ROOT;
993 osd->o_linger_requests = RB_ROOT;
994 INIT_LIST_HEAD(&osd->o_osd_lru);
995 INIT_LIST_HEAD(&osd->o_keepalive_item);
996 osd->o_incarnation = 1;
997 mutex_init(&osd->lock);
1000 static void osd_cleanup(struct ceph_osd *osd)
1002 WARN_ON(!RB_EMPTY_NODE(&osd->o_node));
1003 WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests));
1004 WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests));
1005 WARN_ON(!list_empty(&osd->o_osd_lru));
1006 WARN_ON(!list_empty(&osd->o_keepalive_item));
1008 if (osd->o_auth.authorizer) {
1009 WARN_ON(osd_homeless(osd));
1010 ceph_auth_destroy_authorizer(osd->o_auth.authorizer);
1015 * Track open sessions with osds.
1017 static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum)
1019 struct ceph_osd *osd;
1021 WARN_ON(onum == CEPH_HOMELESS_OSD);
1023 osd = kzalloc(sizeof(*osd), GFP_NOIO | __GFP_NOFAIL);
1028 ceph_con_init(&osd->o_con, osd, &osd_con_ops, &osdc->client->msgr);
1033 static struct ceph_osd *get_osd(struct ceph_osd *osd)
1035 if (atomic_inc_not_zero(&osd->o_ref)) {
1036 dout("get_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref)-1,
1037 atomic_read(&osd->o_ref));
1040 dout("get_osd %p FAIL\n", osd);
1045 static void put_osd(struct ceph_osd *osd)
1047 dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref),
1048 atomic_read(&osd->o_ref) - 1);
1049 if (atomic_dec_and_test(&osd->o_ref)) {
1055 DEFINE_RB_FUNCS(osd, struct ceph_osd, o_osd, o_node)
1057 static void __move_osd_to_lru(struct ceph_osd *osd)
1059 struct ceph_osd_client *osdc = osd->o_osdc;
1061 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1062 BUG_ON(!list_empty(&osd->o_osd_lru));
1064 spin_lock(&osdc->osd_lru_lock);
1065 list_add_tail(&osd->o_osd_lru, &osdc->osd_lru);
1066 spin_unlock(&osdc->osd_lru_lock);
1068 osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl;
1071 static void maybe_move_osd_to_lru(struct ceph_osd *osd)
1073 if (RB_EMPTY_ROOT(&osd->o_requests) &&
1074 RB_EMPTY_ROOT(&osd->o_linger_requests))
1075 __move_osd_to_lru(osd);
1078 static void __remove_osd_from_lru(struct ceph_osd *osd)
1080 struct ceph_osd_client *osdc = osd->o_osdc;
1082 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1084 spin_lock(&osdc->osd_lru_lock);
1085 if (!list_empty(&osd->o_osd_lru))
1086 list_del_init(&osd->o_osd_lru);
1087 spin_unlock(&osdc->osd_lru_lock);
1091 * Close the connection and assign any leftover requests to the
1094 static void close_osd(struct ceph_osd *osd)
1096 struct ceph_osd_client *osdc = osd->o_osdc;
1099 verify_osdc_wrlocked(osdc);
1100 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1102 ceph_con_close(&osd->o_con);
1104 for (n = rb_first(&osd->o_requests); n; ) {
1105 struct ceph_osd_request *req =
1106 rb_entry(n, struct ceph_osd_request, r_node);
1108 n = rb_next(n); /* unlink_request() */
1110 dout(" reassigning req %p tid %llu\n", req, req->r_tid);
1111 unlink_request(osd, req);
1112 link_request(&osdc->homeless_osd, req);
1114 for (n = rb_first(&osd->o_linger_requests); n; ) {
1115 struct ceph_osd_linger_request *lreq =
1116 rb_entry(n, struct ceph_osd_linger_request, node);
1118 n = rb_next(n); /* unlink_linger() */
1120 dout(" reassigning lreq %p linger_id %llu\n", lreq,
1122 unlink_linger(osd, lreq);
1123 link_linger(&osdc->homeless_osd, lreq);
1126 __remove_osd_from_lru(osd);
1127 erase_osd(&osdc->osds, osd);
1134 static int reopen_osd(struct ceph_osd *osd)
1136 struct ceph_entity_addr *peer_addr;
1138 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1140 if (RB_EMPTY_ROOT(&osd->o_requests) &&
1141 RB_EMPTY_ROOT(&osd->o_linger_requests)) {
1146 peer_addr = &osd->o_osdc->osdmap->osd_addr[osd->o_osd];
1147 if (!memcmp(peer_addr, &osd->o_con.peer_addr, sizeof (*peer_addr)) &&
1148 !ceph_con_opened(&osd->o_con)) {
1151 dout("osd addr hasn't changed and connection never opened, "
1152 "letting msgr retry\n");
1153 /* touch each r_stamp for handle_timeout()'s benfit */
1154 for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) {
1155 struct ceph_osd_request *req =
1156 rb_entry(n, struct ceph_osd_request, r_node);
1157 req->r_stamp = jiffies;
1163 ceph_con_close(&osd->o_con);
1164 ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd, peer_addr);
1165 osd->o_incarnation++;
1170 static struct ceph_osd *lookup_create_osd(struct ceph_osd_client *osdc, int o,
1173 struct ceph_osd *osd;
1176 verify_osdc_wrlocked(osdc);
1178 verify_osdc_locked(osdc);
1180 if (o != CEPH_HOMELESS_OSD)
1181 osd = lookup_osd(&osdc->osds, o);
1183 osd = &osdc->homeless_osd;
1186 return ERR_PTR(-EAGAIN);
1188 osd = create_osd(osdc, o);
1189 insert_osd(&osdc->osds, osd);
1190 ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd,
1191 &osdc->osdmap->osd_addr[osd->o_osd]);
1194 dout("%s osdc %p osd%d -> osd %p\n", __func__, osdc, o, osd);
1199 * Create request <-> OSD session relation.
1201 * @req has to be assigned a tid, @osd may be homeless.
1203 static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req)
1205 verify_osd_locked(osd);
1206 WARN_ON(!req->r_tid || req->r_osd);
1207 dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd,
1210 if (!osd_homeless(osd))
1211 __remove_osd_from_lru(osd);
1213 atomic_inc(&osd->o_osdc->num_homeless);
1216 insert_request(&osd->o_requests, req);
1220 static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req)
1222 verify_osd_locked(osd);
1223 WARN_ON(req->r_osd != osd);
1224 dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd,
1228 erase_request(&osd->o_requests, req);
1231 if (!osd_homeless(osd))
1232 maybe_move_osd_to_lru(osd);
1234 atomic_dec(&osd->o_osdc->num_homeless);
1237 static bool __pool_full(struct ceph_pg_pool_info *pi)
1239 return pi->flags & CEPH_POOL_FLAG_FULL;
1242 static bool have_pool_full(struct ceph_osd_client *osdc)
1246 for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) {
1247 struct ceph_pg_pool_info *pi =
1248 rb_entry(n, struct ceph_pg_pool_info, node);
1250 if (__pool_full(pi))
1257 static bool pool_full(struct ceph_osd_client *osdc, s64 pool_id)
1259 struct ceph_pg_pool_info *pi;
1261 pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id);
1265 return __pool_full(pi);
1269 * Returns whether a request should be blocked from being sent
1270 * based on the current osdmap and osd_client settings.
1272 static bool target_should_be_paused(struct ceph_osd_client *osdc,
1273 const struct ceph_osd_request_target *t,
1274 struct ceph_pg_pool_info *pi)
1276 bool pauserd = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD);
1277 bool pausewr = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR) ||
1278 ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) ||
1281 WARN_ON(pi->id != t->base_oloc.pool);
1282 return (t->flags & CEPH_OSD_FLAG_READ && pauserd) ||
1283 (t->flags & CEPH_OSD_FLAG_WRITE && pausewr);
1286 enum calc_target_result {
1287 CALC_TARGET_NO_ACTION = 0,
1288 CALC_TARGET_NEED_RESEND,
1289 CALC_TARGET_POOL_DNE,
1292 static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
1293 struct ceph_osd_request_target *t,
1294 u32 *last_force_resend,
1297 struct ceph_pg_pool_info *pi;
1298 struct ceph_pg pgid, last_pgid;
1299 struct ceph_osds up, acting;
1300 bool force_resend = false;
1301 bool need_check_tiering = false;
1302 bool need_resend = false;
1303 bool sort_bitwise = ceph_osdmap_flag(osdc->osdmap,
1304 CEPH_OSDMAP_SORTBITWISE);
1305 enum calc_target_result ct_res;
1308 pi = ceph_pg_pool_by_id(osdc->osdmap, t->base_oloc.pool);
1310 t->osd = CEPH_HOMELESS_OSD;
1311 ct_res = CALC_TARGET_POOL_DNE;
1315 if (osdc->osdmap->epoch == pi->last_force_request_resend) {
1316 if (last_force_resend &&
1317 *last_force_resend < pi->last_force_request_resend) {
1318 *last_force_resend = pi->last_force_request_resend;
1319 force_resend = true;
1320 } else if (!last_force_resend) {
1321 force_resend = true;
1324 if (ceph_oid_empty(&t->target_oid) || force_resend) {
1325 ceph_oid_copy(&t->target_oid, &t->base_oid);
1326 need_check_tiering = true;
1328 if (ceph_oloc_empty(&t->target_oloc) || force_resend) {
1329 ceph_oloc_copy(&t->target_oloc, &t->base_oloc);
1330 need_check_tiering = true;
1333 if (need_check_tiering &&
1334 (t->flags & CEPH_OSD_FLAG_IGNORE_OVERLAY) == 0) {
1335 if (t->flags & CEPH_OSD_FLAG_READ && pi->read_tier >= 0)
1336 t->target_oloc.pool = pi->read_tier;
1337 if (t->flags & CEPH_OSD_FLAG_WRITE && pi->write_tier >= 0)
1338 t->target_oloc.pool = pi->write_tier;
1341 ret = ceph_object_locator_to_pg(osdc->osdmap, &t->target_oid,
1342 &t->target_oloc, &pgid);
1344 WARN_ON(ret != -ENOENT);
1345 t->osd = CEPH_HOMELESS_OSD;
1346 ct_res = CALC_TARGET_POOL_DNE;
1349 last_pgid.pool = pgid.pool;
1350 last_pgid.seed = ceph_stable_mod(pgid.seed, t->pg_num, t->pg_num_mask);
1352 ceph_pg_to_up_acting_osds(osdc->osdmap, &pgid, &up, &acting);
1354 ceph_is_new_interval(&t->acting,
1367 force_resend = true;
1369 if (t->paused && !target_should_be_paused(osdc, t, pi)) {
1374 if (ceph_pg_compare(&t->pgid, &pgid) ||
1375 ceph_osds_changed(&t->acting, &acting, any_change) ||
1377 t->pgid = pgid; /* struct */
1378 ceph_osds_copy(&t->acting, &acting);
1379 ceph_osds_copy(&t->up, &up);
1381 t->min_size = pi->min_size;
1382 t->pg_num = pi->pg_num;
1383 t->pg_num_mask = pi->pg_num_mask;
1384 t->sort_bitwise = sort_bitwise;
1386 t->osd = acting.primary;
1390 ct_res = need_resend ? CALC_TARGET_NEED_RESEND : CALC_TARGET_NO_ACTION;
1392 dout("%s t %p -> ct_res %d osd %d\n", __func__, t, ct_res, t->osd);
1396 static void setup_request_data(struct ceph_osd_request *req,
1397 struct ceph_msg *msg)
1402 if (!list_empty(&msg->data))
1405 WARN_ON(msg->data_length);
1406 for (i = 0; i < req->r_num_ops; i++) {
1407 struct ceph_osd_req_op *op = &req->r_ops[i];
1411 case CEPH_OSD_OP_WRITE:
1412 case CEPH_OSD_OP_WRITEFULL:
1413 WARN_ON(op->indata_len != op->extent.length);
1414 ceph_osdc_msg_data_add(msg, &op->extent.osd_data);
1416 case CEPH_OSD_OP_SETXATTR:
1417 case CEPH_OSD_OP_CMPXATTR:
1418 WARN_ON(op->indata_len != op->xattr.name_len +
1419 op->xattr.value_len);
1420 ceph_osdc_msg_data_add(msg, &op->xattr.osd_data);
1422 case CEPH_OSD_OP_NOTIFY_ACK:
1423 ceph_osdc_msg_data_add(msg,
1424 &op->notify_ack.request_data);
1428 case CEPH_OSD_OP_STAT:
1429 ceph_osdc_msg_data_add(req->r_reply,
1432 case CEPH_OSD_OP_READ:
1433 ceph_osdc_msg_data_add(req->r_reply,
1434 &op->extent.osd_data);
1438 case CEPH_OSD_OP_CALL:
1439 WARN_ON(op->indata_len != op->cls.class_len +
1440 op->cls.method_len +
1441 op->cls.indata_len);
1442 ceph_osdc_msg_data_add(msg, &op->cls.request_info);
1443 /* optional, can be NONE */
1444 ceph_osdc_msg_data_add(msg, &op->cls.request_data);
1445 /* optional, can be NONE */
1446 ceph_osdc_msg_data_add(req->r_reply,
1447 &op->cls.response_data);
1449 case CEPH_OSD_OP_NOTIFY:
1450 ceph_osdc_msg_data_add(msg,
1451 &op->notify.request_data);
1452 ceph_osdc_msg_data_add(req->r_reply,
1453 &op->notify.response_data);
1457 data_len += op->indata_len;
1460 WARN_ON(data_len != msg->data_length);
1463 static void encode_request(struct ceph_osd_request *req, struct ceph_msg *msg)
1465 void *p = msg->front.iov_base;
1466 void *const end = p + msg->front_alloc_len;
1470 if (req->r_flags & CEPH_OSD_FLAG_WRITE) {
1471 /* snapshots aren't writeable */
1472 WARN_ON(req->r_snapid != CEPH_NOSNAP);
1474 WARN_ON(req->r_mtime.tv_sec || req->r_mtime.tv_nsec ||
1475 req->r_data_offset || req->r_snapc);
1478 setup_request_data(req, msg);
1480 ceph_encode_32(&p, 1); /* client_inc, always 1 */
1481 ceph_encode_32(&p, req->r_osdc->osdmap->epoch);
1482 ceph_encode_32(&p, req->r_flags);
1483 ceph_encode_timespec(p, &req->r_mtime);
1484 p += sizeof(struct ceph_timespec);
1485 /* aka reassert_version */
1486 memcpy(p, &req->r_replay_version, sizeof(req->r_replay_version));
1487 p += sizeof(req->r_replay_version);
1490 ceph_encode_8(&p, 4);
1491 ceph_encode_8(&p, 4);
1492 ceph_encode_32(&p, 8 + 4 + 4);
1493 ceph_encode_64(&p, req->r_t.target_oloc.pool);
1494 ceph_encode_32(&p, -1); /* preferred */
1495 ceph_encode_32(&p, 0); /* key len */
1498 ceph_encode_8(&p, 1);
1499 ceph_encode_64(&p, req->r_t.pgid.pool);
1500 ceph_encode_32(&p, req->r_t.pgid.seed);
1501 ceph_encode_32(&p, -1); /* preferred */
1504 ceph_encode_32(&p, req->r_t.target_oid.name_len);
1505 memcpy(p, req->r_t.target_oid.name, req->r_t.target_oid.name_len);
1506 p += req->r_t.target_oid.name_len;
1508 /* ops, can imply data */
1509 ceph_encode_16(&p, req->r_num_ops);
1510 for (i = 0; i < req->r_num_ops; i++) {
1511 data_len += osd_req_encode_op(p, &req->r_ops[i]);
1512 p += sizeof(struct ceph_osd_op);
1515 ceph_encode_64(&p, req->r_snapid); /* snapid */
1517 ceph_encode_64(&p, req->r_snapc->seq);
1518 ceph_encode_32(&p, req->r_snapc->num_snaps);
1519 for (i = 0; i < req->r_snapc->num_snaps; i++)
1520 ceph_encode_64(&p, req->r_snapc->snaps[i]);
1522 ceph_encode_64(&p, 0); /* snap_seq */
1523 ceph_encode_32(&p, 0); /* snaps len */
1526 ceph_encode_32(&p, req->r_attempts); /* retry_attempt */
1529 msg->front.iov_len = p - msg->front.iov_base;
1530 msg->hdr.version = cpu_to_le16(4); /* MOSDOp v4 */
1531 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
1532 msg->hdr.data_len = cpu_to_le32(data_len);
1534 * The header "data_off" is a hint to the receiver allowing it
1535 * to align received data into its buffers such that there's no
1536 * need to re-copy it before writing it to disk (direct I/O).
1538 msg->hdr.data_off = cpu_to_le16(req->r_data_offset);
1540 dout("%s req %p oid %*pE oid_len %d front %zu data %u\n", __func__,
1541 req, req->r_t.target_oid.name_len, req->r_t.target_oid.name,
1542 req->r_t.target_oid.name_len, msg->front.iov_len, data_len);
1546 * @req has to be assigned a tid and registered.
1548 static void send_request(struct ceph_osd_request *req)
1550 struct ceph_osd *osd = req->r_osd;
1552 verify_osd_locked(osd);
1553 WARN_ON(osd->o_osd != req->r_t.osd);
1556 * We may have a previously queued request message hanging
1557 * around. Cancel it to avoid corrupting the msgr.
1560 ceph_msg_revoke(req->r_request);
1562 req->r_flags |= CEPH_OSD_FLAG_KNOWN_REDIR;
1563 if (req->r_attempts)
1564 req->r_flags |= CEPH_OSD_FLAG_RETRY;
1566 WARN_ON(req->r_flags & CEPH_OSD_FLAG_RETRY);
1568 encode_request(req, req->r_request);
1570 dout("%s req %p tid %llu to pg %llu.%x osd%d flags 0x%x attempt %d\n",
1571 __func__, req, req->r_tid, req->r_t.pgid.pool, req->r_t.pgid.seed,
1572 req->r_t.osd, req->r_flags, req->r_attempts);
1574 req->r_t.paused = false;
1575 req->r_stamp = jiffies;
1578 req->r_sent = osd->o_incarnation;
1579 req->r_request->hdr.tid = cpu_to_le64(req->r_tid);
1580 ceph_con_send(&osd->o_con, ceph_msg_get(req->r_request));
1583 static void maybe_request_map(struct ceph_osd_client *osdc)
1585 bool continuous = false;
1587 verify_osdc_locked(osdc);
1588 WARN_ON(!osdc->osdmap->epoch);
1590 if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) ||
1591 ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD) ||
1592 ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR)) {
1593 dout("%s osdc %p continuous\n", __func__, osdc);
1596 dout("%s osdc %p onetime\n", __func__, osdc);
1599 if (ceph_monc_want_map(&osdc->client->monc, CEPH_SUB_OSDMAP,
1600 osdc->osdmap->epoch + 1, continuous))
1601 ceph_monc_renew_subs(&osdc->client->monc);
1604 static void __submit_request(struct ceph_osd_request *req, bool wrlocked)
1606 struct ceph_osd_client *osdc = req->r_osdc;
1607 struct ceph_osd *osd;
1608 bool need_send = false;
1609 bool promoted = false;
1611 WARN_ON(req->r_tid || req->r_got_reply);
1612 dout("%s req %p wrlocked %d\n", __func__, req, wrlocked);
1615 calc_target(osdc, &req->r_t, &req->r_last_force_resend, false);
1616 osd = lookup_create_osd(osdc, req->r_t.osd, wrlocked);
1618 WARN_ON(PTR_ERR(osd) != -EAGAIN || wrlocked);
1622 if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
1623 ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR)) {
1624 dout("req %p pausewr\n", req);
1625 req->r_t.paused = true;
1626 maybe_request_map(osdc);
1627 } else if ((req->r_flags & CEPH_OSD_FLAG_READ) &&
1628 ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD)) {
1629 dout("req %p pauserd\n", req);
1630 req->r_t.paused = true;
1631 maybe_request_map(osdc);
1632 } else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
1633 !(req->r_flags & (CEPH_OSD_FLAG_FULL_TRY |
1634 CEPH_OSD_FLAG_FULL_FORCE)) &&
1635 (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) ||
1636 pool_full(osdc, req->r_t.base_oloc.pool))) {
1637 dout("req %p full/pool_full\n", req);
1638 pr_warn_ratelimited("FULL or reached pool quota\n");
1639 req->r_t.paused = true;
1640 maybe_request_map(osdc);
1641 } else if (!osd_homeless(osd)) {
1644 maybe_request_map(osdc);
1647 mutex_lock(&osd->lock);
1649 * Assign the tid atomically with send_request() to protect
1650 * multiple writes to the same object from racing with each
1651 * other, resulting in out of order ops on the OSDs.
1653 req->r_tid = atomic64_inc_return(&osdc->last_tid);
1654 link_request(osd, req);
1657 mutex_unlock(&osd->lock);
1660 downgrade_write(&osdc->lock);
1664 up_read(&osdc->lock);
1665 down_write(&osdc->lock);
1671 static void account_request(struct ceph_osd_request *req)
1673 unsigned int mask = CEPH_OSD_FLAG_ACK | CEPH_OSD_FLAG_ONDISK;
1675 if (req->r_flags & CEPH_OSD_FLAG_READ) {
1676 WARN_ON(req->r_flags & mask);
1677 req->r_flags |= CEPH_OSD_FLAG_ACK;
1678 } else if (req->r_flags & CEPH_OSD_FLAG_WRITE)
1679 WARN_ON(!(req->r_flags & mask));
1683 WARN_ON(req->r_unsafe_callback && (req->r_flags & mask) != mask);
1684 atomic_inc(&req->r_osdc->num_requests);
1687 static void submit_request(struct ceph_osd_request *req, bool wrlocked)
1689 ceph_osdc_get_request(req);
1690 account_request(req);
1691 __submit_request(req, wrlocked);
1694 static void __finish_request(struct ceph_osd_request *req)
1696 struct ceph_osd_client *osdc = req->r_osdc;
1697 struct ceph_osd *osd = req->r_osd;
1699 verify_osd_locked(osd);
1700 dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
1702 unlink_request(osd, req);
1703 atomic_dec(&osdc->num_requests);
1706 * If an OSD has failed or returned and a request has been sent
1707 * twice, it's possible to get a reply and end up here while the
1708 * request message is queued for delivery. We will ignore the
1709 * reply, so not a big deal, but better to try and catch it.
1711 ceph_msg_revoke(req->r_request);
1712 ceph_msg_revoke_incoming(req->r_reply);
1715 static void finish_request(struct ceph_osd_request *req)
1717 __finish_request(req);
1718 ceph_osdc_put_request(req);
1721 static void __complete_request(struct ceph_osd_request *req)
1723 if (req->r_callback)
1724 req->r_callback(req);
1726 complete_all(&req->r_completion);
1729 static void cancel_request(struct ceph_osd_request *req)
1731 dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
1733 finish_request(req);
1737 * lingering requests, watch/notify v2 infrastructure
1739 static void linger_release(struct kref *kref)
1741 struct ceph_osd_linger_request *lreq =
1742 container_of(kref, struct ceph_osd_linger_request, kref);
1744 dout("%s lreq %p reg_req %p ping_req %p\n", __func__, lreq,
1745 lreq->reg_req, lreq->ping_req);
1746 WARN_ON(!RB_EMPTY_NODE(&lreq->node));
1747 WARN_ON(!RB_EMPTY_NODE(&lreq->osdc_node));
1748 WARN_ON(!list_empty(&lreq->scan_item));
1752 ceph_osdc_put_request(lreq->reg_req);
1754 ceph_osdc_put_request(lreq->ping_req);
1755 target_destroy(&lreq->t);
1759 static void linger_put(struct ceph_osd_linger_request *lreq)
1762 kref_put(&lreq->kref, linger_release);
1765 static struct ceph_osd_linger_request *
1766 linger_get(struct ceph_osd_linger_request *lreq)
1768 kref_get(&lreq->kref);
1772 static struct ceph_osd_linger_request *
1773 linger_alloc(struct ceph_osd_client *osdc)
1775 struct ceph_osd_linger_request *lreq;
1777 lreq = kzalloc(sizeof(*lreq), GFP_NOIO);
1781 kref_init(&lreq->kref);
1782 mutex_init(&lreq->lock);
1783 RB_CLEAR_NODE(&lreq->node);
1784 RB_CLEAR_NODE(&lreq->osdc_node);
1785 INIT_LIST_HEAD(&lreq->scan_item);
1786 init_completion(&lreq->reg_commit_wait);
1787 init_completion(&lreq->notify_finish_wait);
1790 target_init(&lreq->t);
1792 dout("%s lreq %p\n", __func__, lreq);
1796 DEFINE_RB_INSDEL_FUNCS(linger, struct ceph_osd_linger_request, linger_id, node)
1797 DEFINE_RB_FUNCS(linger_osdc, struct ceph_osd_linger_request, linger_id, osdc_node)
1800 * Create linger request <-> OSD session relation.
1802 * @lreq has to be registered, @osd may be homeless.
1804 static void link_linger(struct ceph_osd *osd,
1805 struct ceph_osd_linger_request *lreq)
1807 verify_osd_locked(osd);
1808 WARN_ON(!lreq->linger_id || lreq->osd);
1809 dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd,
1810 osd->o_osd, lreq, lreq->linger_id);
1812 if (!osd_homeless(osd))
1813 __remove_osd_from_lru(osd);
1815 atomic_inc(&osd->o_osdc->num_homeless);
1818 insert_linger(&osd->o_linger_requests, lreq);
1822 static void unlink_linger(struct ceph_osd *osd,
1823 struct ceph_osd_linger_request *lreq)
1825 verify_osd_locked(osd);
1826 WARN_ON(lreq->osd != osd);
1827 dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd,
1828 osd->o_osd, lreq, lreq->linger_id);
1831 erase_linger(&osd->o_linger_requests, lreq);
1834 if (!osd_homeless(osd))
1835 maybe_move_osd_to_lru(osd);
1837 atomic_dec(&osd->o_osdc->num_homeless);
1840 static bool __linger_registered(struct ceph_osd_linger_request *lreq)
1842 verify_osdc_locked(lreq->osdc);
1844 return !RB_EMPTY_NODE(&lreq->osdc_node);
1847 static bool linger_registered(struct ceph_osd_linger_request *lreq)
1849 struct ceph_osd_client *osdc = lreq->osdc;
1852 down_read(&osdc->lock);
1853 registered = __linger_registered(lreq);
1854 up_read(&osdc->lock);
1859 static void linger_register(struct ceph_osd_linger_request *lreq)
1861 struct ceph_osd_client *osdc = lreq->osdc;
1863 verify_osdc_wrlocked(osdc);
1864 WARN_ON(lreq->linger_id);
1867 lreq->linger_id = ++osdc->last_linger_id;
1868 insert_linger_osdc(&osdc->linger_requests, lreq);
1871 static void linger_unregister(struct ceph_osd_linger_request *lreq)
1873 struct ceph_osd_client *osdc = lreq->osdc;
1875 verify_osdc_wrlocked(osdc);
1877 erase_linger_osdc(&osdc->linger_requests, lreq);
1881 static void cancel_linger_request(struct ceph_osd_request *req)
1883 struct ceph_osd_linger_request *lreq = req->r_priv;
1885 WARN_ON(!req->r_linger);
1886 cancel_request(req);
1890 struct linger_work {
1891 struct work_struct work;
1892 struct ceph_osd_linger_request *lreq;
1898 void *payload; /* points into @msg front */
1901 struct ceph_msg *msg; /* for ceph_msg_put() */
1909 static struct linger_work *lwork_alloc(struct ceph_osd_linger_request *lreq,
1912 struct linger_work *lwork;
1914 lwork = kzalloc(sizeof(*lwork), GFP_NOIO);
1918 INIT_WORK(&lwork->work, workfn);
1919 lwork->lreq = linger_get(lreq);
1924 static void lwork_free(struct linger_work *lwork)
1926 struct ceph_osd_linger_request *lreq = lwork->lreq;
1932 static void lwork_queue(struct linger_work *lwork)
1934 struct ceph_osd_linger_request *lreq = lwork->lreq;
1935 struct ceph_osd_client *osdc = lreq->osdc;
1937 verify_lreq_locked(lreq);
1938 queue_work(osdc->notify_wq, &lwork->work);
1941 static void do_watch_notify(struct work_struct *w)
1943 struct linger_work *lwork = container_of(w, struct linger_work, work);
1944 struct ceph_osd_linger_request *lreq = lwork->lreq;
1946 if (!linger_registered(lreq)) {
1947 dout("%s lreq %p not registered\n", __func__, lreq);
1951 WARN_ON(!lreq->is_watch);
1952 dout("%s lreq %p notify_id %llu notifier_id %llu payload_len %zu\n",
1953 __func__, lreq, lwork->notify.notify_id, lwork->notify.notifier_id,
1954 lwork->notify.payload_len);
1955 lreq->wcb(lreq->data, lwork->notify.notify_id, lreq->linger_id,
1956 lwork->notify.notifier_id, lwork->notify.payload,
1957 lwork->notify.payload_len);
1960 ceph_msg_put(lwork->notify.msg);
1964 static void do_watch_error(struct work_struct *w)
1966 struct linger_work *lwork = container_of(w, struct linger_work, work);
1967 struct ceph_osd_linger_request *lreq = lwork->lreq;
1969 if (!linger_registered(lreq)) {
1970 dout("%s lreq %p not registered\n", __func__, lreq);
1974 dout("%s lreq %p err %d\n", __func__, lreq, lwork->error.err);
1975 lreq->errcb(lreq->data, lreq->linger_id, lwork->error.err);
1981 static void queue_watch_error(struct ceph_osd_linger_request *lreq)
1983 struct linger_work *lwork;
1985 lwork = lwork_alloc(lreq, do_watch_error);
1987 pr_err("failed to allocate error-lwork\n");
1991 lwork->error.err = lreq->last_error;
1995 static void linger_reg_commit_complete(struct ceph_osd_linger_request *lreq,
1998 if (!completion_done(&lreq->reg_commit_wait)) {
1999 lreq->reg_commit_error = (result <= 0 ? result : 0);
2000 complete_all(&lreq->reg_commit_wait);
2004 static void linger_commit_cb(struct ceph_osd_request *req)
2006 struct ceph_osd_linger_request *lreq = req->r_priv;
2008 mutex_lock(&lreq->lock);
2009 dout("%s lreq %p linger_id %llu result %d\n", __func__, lreq,
2010 lreq->linger_id, req->r_result);
2011 WARN_ON(!__linger_registered(lreq));
2012 linger_reg_commit_complete(lreq, req->r_result);
2013 lreq->committed = true;
2015 if (!lreq->is_watch) {
2016 struct ceph_osd_data *osd_data =
2017 osd_req_op_data(req, 0, notify, response_data);
2018 void *p = page_address(osd_data->pages[0]);
2020 WARN_ON(req->r_ops[0].op != CEPH_OSD_OP_NOTIFY ||
2021 osd_data->type != CEPH_OSD_DATA_TYPE_PAGES);
2023 /* make note of the notify_id */
2024 if (req->r_ops[0].outdata_len >= sizeof(u64)) {
2025 lreq->notify_id = ceph_decode_64(&p);
2026 dout("lreq %p notify_id %llu\n", lreq,
2029 dout("lreq %p no notify_id\n", lreq);
2033 mutex_unlock(&lreq->lock);
2037 static int normalize_watch_error(int err)
2040 * Translate ENOENT -> ENOTCONN so that a delete->disconnection
2041 * notification and a failure to reconnect because we raced with
2042 * the delete appear the same to the user.
2050 static void linger_reconnect_cb(struct ceph_osd_request *req)
2052 struct ceph_osd_linger_request *lreq = req->r_priv;
2054 mutex_lock(&lreq->lock);
2055 dout("%s lreq %p linger_id %llu result %d last_error %d\n", __func__,
2056 lreq, lreq->linger_id, req->r_result, lreq->last_error);
2057 if (req->r_result < 0) {
2058 if (!lreq->last_error) {
2059 lreq->last_error = normalize_watch_error(req->r_result);
2060 queue_watch_error(lreq);
2064 mutex_unlock(&lreq->lock);
2068 static void send_linger(struct ceph_osd_linger_request *lreq)
2070 struct ceph_osd_request *req = lreq->reg_req;
2071 struct ceph_osd_req_op *op = &req->r_ops[0];
2073 verify_osdc_wrlocked(req->r_osdc);
2074 dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
2077 cancel_linger_request(req);
2079 request_reinit(req);
2080 ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
2081 ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
2082 req->r_flags = lreq->t.flags;
2083 req->r_mtime = lreq->mtime;
2085 mutex_lock(&lreq->lock);
2086 if (lreq->is_watch && lreq->committed) {
2087 WARN_ON(op->op != CEPH_OSD_OP_WATCH ||
2088 op->watch.cookie != lreq->linger_id);
2089 op->watch.op = CEPH_OSD_WATCH_OP_RECONNECT;
2090 op->watch.gen = ++lreq->register_gen;
2091 dout("lreq %p reconnect register_gen %u\n", lreq,
2093 req->r_callback = linger_reconnect_cb;
2095 if (!lreq->is_watch)
2096 lreq->notify_id = 0;
2098 WARN_ON(op->watch.op != CEPH_OSD_WATCH_OP_WATCH);
2099 dout("lreq %p register\n", lreq);
2100 req->r_callback = linger_commit_cb;
2102 mutex_unlock(&lreq->lock);
2104 req->r_priv = linger_get(lreq);
2105 req->r_linger = true;
2107 submit_request(req, true);
2110 static void linger_ping_cb(struct ceph_osd_request *req)
2112 struct ceph_osd_linger_request *lreq = req->r_priv;
2114 mutex_lock(&lreq->lock);
2115 dout("%s lreq %p linger_id %llu result %d ping_sent %lu last_error %d\n",
2116 __func__, lreq, lreq->linger_id, req->r_result, lreq->ping_sent,
2118 if (lreq->register_gen == req->r_ops[0].watch.gen) {
2119 if (req->r_result && !lreq->last_error) {
2120 lreq->last_error = normalize_watch_error(req->r_result);
2121 queue_watch_error(lreq);
2124 dout("lreq %p register_gen %u ignoring old pong %u\n", lreq,
2125 lreq->register_gen, req->r_ops[0].watch.gen);
2128 mutex_unlock(&lreq->lock);
2132 static void send_linger_ping(struct ceph_osd_linger_request *lreq)
2134 struct ceph_osd_client *osdc = lreq->osdc;
2135 struct ceph_osd_request *req = lreq->ping_req;
2136 struct ceph_osd_req_op *op = &req->r_ops[0];
2138 if (ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD)) {
2139 dout("%s PAUSERD\n", __func__);
2143 lreq->ping_sent = jiffies;
2144 dout("%s lreq %p linger_id %llu ping_sent %lu register_gen %u\n",
2145 __func__, lreq, lreq->linger_id, lreq->ping_sent,
2146 lreq->register_gen);
2149 cancel_linger_request(req);
2151 request_reinit(req);
2152 target_copy(&req->r_t, &lreq->t);
2154 WARN_ON(op->op != CEPH_OSD_OP_WATCH ||
2155 op->watch.cookie != lreq->linger_id ||
2156 op->watch.op != CEPH_OSD_WATCH_OP_PING);
2157 op->watch.gen = lreq->register_gen;
2158 req->r_callback = linger_ping_cb;
2159 req->r_priv = linger_get(lreq);
2160 req->r_linger = true;
2162 ceph_osdc_get_request(req);
2163 account_request(req);
2164 req->r_tid = atomic64_inc_return(&osdc->last_tid);
2165 link_request(lreq->osd, req);
2169 static void linger_submit(struct ceph_osd_linger_request *lreq)
2171 struct ceph_osd_client *osdc = lreq->osdc;
2172 struct ceph_osd *osd;
2174 calc_target(osdc, &lreq->t, &lreq->last_force_resend, false);
2175 osd = lookup_create_osd(osdc, lreq->t.osd, true);
2176 link_linger(osd, lreq);
2182 * @lreq has to be both registered and linked.
2184 static void __linger_cancel(struct ceph_osd_linger_request *lreq)
2186 if (lreq->is_watch && lreq->ping_req->r_osd)
2187 cancel_linger_request(lreq->ping_req);
2188 if (lreq->reg_req->r_osd)
2189 cancel_linger_request(lreq->reg_req);
2190 unlink_linger(lreq->osd, lreq);
2191 linger_unregister(lreq);
2194 static void linger_cancel(struct ceph_osd_linger_request *lreq)
2196 struct ceph_osd_client *osdc = lreq->osdc;
2198 down_write(&osdc->lock);
2199 if (__linger_registered(lreq))
2200 __linger_cancel(lreq);
2201 up_write(&osdc->lock);
2204 static int linger_reg_commit_wait(struct ceph_osd_linger_request *lreq)
2208 dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
2209 ret = wait_for_completion_interruptible(&lreq->reg_commit_wait);
2210 return ret ?: lreq->reg_commit_error;
2213 static int linger_notify_finish_wait(struct ceph_osd_linger_request *lreq)
2217 dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
2218 ret = wait_for_completion_interruptible(&lreq->notify_finish_wait);
2219 return ret ?: lreq->notify_finish_error;
2223 * Timeout callback, called every N seconds. When 1 or more OSD
2224 * requests has been active for more than N seconds, we send a keepalive
2225 * (tag + timestamp) to its OSD to ensure any communications channel
2226 * reset is detected.
2228 static void handle_timeout(struct work_struct *work)
2230 struct ceph_osd_client *osdc =
2231 container_of(work, struct ceph_osd_client, timeout_work.work);
2232 struct ceph_options *opts = osdc->client->options;
2233 unsigned long cutoff = jiffies - opts->osd_keepalive_timeout;
2234 LIST_HEAD(slow_osds);
2235 struct rb_node *n, *p;
2237 dout("%s osdc %p\n", __func__, osdc);
2238 down_write(&osdc->lock);
2241 * ping osds that are a bit slow. this ensures that if there
2242 * is a break in the TCP connection we will notice, and reopen
2243 * a connection with that osd (from the fault callback).
2245 for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
2246 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
2249 for (p = rb_first(&osd->o_requests); p; p = rb_next(p)) {
2250 struct ceph_osd_request *req =
2251 rb_entry(p, struct ceph_osd_request, r_node);
2253 if (time_before(req->r_stamp, cutoff)) {
2254 dout(" req %p tid %llu on osd%d is laggy\n",
2255 req, req->r_tid, osd->o_osd);
2259 for (p = rb_first(&osd->o_linger_requests); p; p = rb_next(p)) {
2260 struct ceph_osd_linger_request *lreq =
2261 rb_entry(p, struct ceph_osd_linger_request, node);
2263 dout(" lreq %p linger_id %llu is served by osd%d\n",
2264 lreq, lreq->linger_id, osd->o_osd);
2267 mutex_lock(&lreq->lock);
2268 if (lreq->is_watch && lreq->committed && !lreq->last_error)
2269 send_linger_ping(lreq);
2270 mutex_unlock(&lreq->lock);
2274 list_move_tail(&osd->o_keepalive_item, &slow_osds);
2277 if (atomic_read(&osdc->num_homeless) || !list_empty(&slow_osds))
2278 maybe_request_map(osdc);
2280 while (!list_empty(&slow_osds)) {
2281 struct ceph_osd *osd = list_first_entry(&slow_osds,
2284 list_del_init(&osd->o_keepalive_item);
2285 ceph_con_keepalive(&osd->o_con);
2288 up_write(&osdc->lock);
2289 schedule_delayed_work(&osdc->timeout_work,
2290 osdc->client->options->osd_keepalive_timeout);
2293 static void handle_osds_timeout(struct work_struct *work)
2295 struct ceph_osd_client *osdc =
2296 container_of(work, struct ceph_osd_client,
2297 osds_timeout_work.work);
2298 unsigned long delay = osdc->client->options->osd_idle_ttl / 4;
2299 struct ceph_osd *osd, *nosd;
2301 dout("%s osdc %p\n", __func__, osdc);
2302 down_write(&osdc->lock);
2303 list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) {
2304 if (time_before(jiffies, osd->lru_ttl))
2307 WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests));
2308 WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests));
2312 up_write(&osdc->lock);
2313 schedule_delayed_work(&osdc->osds_timeout_work,
2314 round_jiffies_relative(delay));
2317 static int ceph_oloc_decode(void **p, void *end,
2318 struct ceph_object_locator *oloc)
2320 u8 struct_v, struct_cv;
2325 ceph_decode_need(p, end, 1 + 1 + 4, e_inval);
2326 struct_v = ceph_decode_8(p);
2327 struct_cv = ceph_decode_8(p);
2329 pr_warn("got v %d < 3 cv %d of ceph_object_locator\n",
2330 struct_v, struct_cv);
2333 if (struct_cv > 6) {
2334 pr_warn("got v %d cv %d > 6 of ceph_object_locator\n",
2335 struct_v, struct_cv);
2338 len = ceph_decode_32(p);
2339 ceph_decode_need(p, end, len, e_inval);
2340 struct_end = *p + len;
2342 oloc->pool = ceph_decode_64(p);
2343 *p += 4; /* skip preferred */
2345 len = ceph_decode_32(p);
2347 pr_warn("ceph_object_locator::key is set\n");
2351 if (struct_v >= 5) {
2352 len = ceph_decode_32(p);
2354 pr_warn("ceph_object_locator::nspace is set\n");
2359 if (struct_v >= 6) {
2360 s64 hash = ceph_decode_64(p);
2362 pr_warn("ceph_object_locator::hash is set\n");
2377 static int ceph_redirect_decode(void **p, void *end,
2378 struct ceph_request_redirect *redir)
2380 u8 struct_v, struct_cv;
2385 ceph_decode_need(p, end, 1 + 1 + 4, e_inval);
2386 struct_v = ceph_decode_8(p);
2387 struct_cv = ceph_decode_8(p);
2388 if (struct_cv > 1) {
2389 pr_warn("got v %d cv %d > 1 of ceph_request_redirect\n",
2390 struct_v, struct_cv);
2393 len = ceph_decode_32(p);
2394 ceph_decode_need(p, end, len, e_inval);
2395 struct_end = *p + len;
2397 ret = ceph_oloc_decode(p, end, &redir->oloc);
2401 len = ceph_decode_32(p);
2403 pr_warn("ceph_request_redirect::object_name is set\n");
2407 len = ceph_decode_32(p);
2408 *p += len; /* skip osd_instructions */
2420 struct MOSDOpReply {
2421 struct ceph_pg pgid;
2426 u32 outdata_len[CEPH_OSD_MAX_OPS];
2427 s32 rval[CEPH_OSD_MAX_OPS];
2429 struct ceph_eversion replay_version;
2431 struct ceph_request_redirect redirect;
2434 static int decode_MOSDOpReply(const struct ceph_msg *msg, struct MOSDOpReply *m)
2436 void *p = msg->front.iov_base;
2437 void *const end = p + msg->front.iov_len;
2438 u16 version = le16_to_cpu(msg->hdr.version);
2439 struct ceph_eversion bad_replay_version;
2445 ceph_decode_32_safe(&p, end, len, e_inval);
2446 ceph_decode_need(&p, end, len, e_inval);
2447 p += len; /* skip oid */
2449 ret = ceph_decode_pgid(&p, end, &m->pgid);
2453 ceph_decode_64_safe(&p, end, m->flags, e_inval);
2454 ceph_decode_32_safe(&p, end, m->result, e_inval);
2455 ceph_decode_need(&p, end, sizeof(bad_replay_version), e_inval);
2456 memcpy(&bad_replay_version, p, sizeof(bad_replay_version));
2457 p += sizeof(bad_replay_version);
2458 ceph_decode_32_safe(&p, end, m->epoch, e_inval);
2460 ceph_decode_32_safe(&p, end, m->num_ops, e_inval);
2461 if (m->num_ops > ARRAY_SIZE(m->outdata_len))
2464 ceph_decode_need(&p, end, m->num_ops * sizeof(struct ceph_osd_op),
2466 for (i = 0; i < m->num_ops; i++) {
2467 struct ceph_osd_op *op = p;
2469 m->outdata_len[i] = le32_to_cpu(op->payload_len);
2473 ceph_decode_32_safe(&p, end, m->retry_attempt, e_inval);
2474 for (i = 0; i < m->num_ops; i++)
2475 ceph_decode_32_safe(&p, end, m->rval[i], e_inval);
2478 ceph_decode_need(&p, end, sizeof(m->replay_version), e_inval);
2479 memcpy(&m->replay_version, p, sizeof(m->replay_version));
2480 p += sizeof(m->replay_version);
2481 ceph_decode_64_safe(&p, end, m->user_version, e_inval);
2483 m->replay_version = bad_replay_version; /* struct */
2484 m->user_version = le64_to_cpu(m->replay_version.version);
2489 ceph_decode_8_safe(&p, end, decode_redir, e_inval);
2497 ret = ceph_redirect_decode(&p, end, &m->redirect);
2501 ceph_oloc_init(&m->redirect.oloc);
2511 * We are done with @req if
2512 * - @m is a safe reply, or
2513 * - @m is an unsafe reply and we didn't want a safe one
2515 static bool done_request(const struct ceph_osd_request *req,
2516 const struct MOSDOpReply *m)
2518 return (m->result < 0 ||
2519 (m->flags & CEPH_OSD_FLAG_ONDISK) ||
2520 !(req->r_flags & CEPH_OSD_FLAG_ONDISK));
2524 * handle osd op reply. either call the callback if it is specified,
2525 * or do the completion to wake up the waiting thread.
2527 * ->r_unsafe_callback is set? yes no
2529 * first reply is OK (needed r_cb/r_completion, r_cb/r_completion,
2530 * any or needed/got safe) r_safe_completion r_safe_completion
2532 * first reply is unsafe r_unsafe_cb(true) (nothing)
2534 * when we get the safe reply r_unsafe_cb(false), r_cb/r_completion,
2535 * r_safe_completion r_safe_completion
2537 static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg)
2539 struct ceph_osd_client *osdc = osd->o_osdc;
2540 struct ceph_osd_request *req;
2541 struct MOSDOpReply m;
2542 u64 tid = le64_to_cpu(msg->hdr.tid);
2548 dout("%s msg %p tid %llu\n", __func__, msg, tid);
2550 down_read(&osdc->lock);
2551 if (!osd_registered(osd)) {
2552 dout("%s osd%d unknown\n", __func__, osd->o_osd);
2553 goto out_unlock_osdc;
2555 WARN_ON(osd->o_osd != le64_to_cpu(msg->hdr.src.num));
2557 mutex_lock(&osd->lock);
2558 req = lookup_request(&osd->o_requests, tid);
2560 dout("%s osd%d tid %llu unknown\n", __func__, osd->o_osd, tid);
2561 goto out_unlock_session;
2564 ret = decode_MOSDOpReply(msg, &m);
2566 pr_err("failed to decode MOSDOpReply for tid %llu: %d\n",
2571 dout("%s req %p tid %llu flags 0x%llx pgid %llu.%x epoch %u attempt %d v %u'%llu uv %llu\n",
2572 __func__, req, req->r_tid, m.flags, m.pgid.pool, m.pgid.seed,
2573 m.epoch, m.retry_attempt, le32_to_cpu(m.replay_version.epoch),
2574 le64_to_cpu(m.replay_version.version), m.user_version);
2576 if (m.retry_attempt >= 0) {
2577 if (m.retry_attempt != req->r_attempts - 1) {
2578 dout("req %p tid %llu retry_attempt %d != %d, ignoring\n",
2579 req, req->r_tid, m.retry_attempt,
2580 req->r_attempts - 1);
2581 goto out_unlock_session;
2584 WARN_ON(1); /* MOSDOpReply v4 is assumed */
2587 if (!ceph_oloc_empty(&m.redirect.oloc)) {
2588 dout("req %p tid %llu redirect pool %lld\n", req, req->r_tid,
2589 m.redirect.oloc.pool);
2590 unlink_request(osd, req);
2591 mutex_unlock(&osd->lock);
2593 ceph_oloc_copy(&req->r_t.target_oloc, &m.redirect.oloc);
2594 req->r_flags |= CEPH_OSD_FLAG_REDIRECTED;
2596 __submit_request(req, false);
2597 goto out_unlock_osdc;
2600 if (m.num_ops != req->r_num_ops) {
2601 pr_err("num_ops %d != %d for tid %llu\n", m.num_ops,
2602 req->r_num_ops, req->r_tid);
2605 for (i = 0; i < req->r_num_ops; i++) {
2606 dout(" req %p tid %llu op %d rval %d len %u\n", req,
2607 req->r_tid, i, m.rval[i], m.outdata_len[i]);
2608 req->r_ops[i].rval = m.rval[i];
2609 req->r_ops[i].outdata_len = m.outdata_len[i];
2610 data_len += m.outdata_len[i];
2612 if (data_len != le32_to_cpu(msg->hdr.data_len)) {
2613 pr_err("sum of lens %u != %u for tid %llu\n", data_len,
2614 le32_to_cpu(msg->hdr.data_len), req->r_tid);
2617 dout("%s req %p tid %llu acked %d result %d data_len %u\n", __func__,
2618 req, req->r_tid, req->r_got_reply, m.result, data_len);
2620 already_acked = req->r_got_reply;
2621 if (!already_acked) {
2622 req->r_result = m.result ?: data_len;
2623 req->r_replay_version = m.replay_version; /* struct */
2624 req->r_got_reply = true;
2625 } else if (!(m.flags & CEPH_OSD_FLAG_ONDISK)) {
2626 dout("req %p tid %llu dup ack\n", req, req->r_tid);
2627 goto out_unlock_session;
2630 if (done_request(req, &m)) {
2631 __finish_request(req);
2632 if (req->r_linger) {
2633 WARN_ON(req->r_unsafe_callback);
2634 dout("req %p tid %llu cb (locked)\n", req, req->r_tid);
2635 __complete_request(req);
2639 mutex_unlock(&osd->lock);
2640 up_read(&osdc->lock);
2642 if (done_request(req, &m)) {
2643 if (already_acked && req->r_unsafe_callback) {
2644 dout("req %p tid %llu safe-cb\n", req, req->r_tid);
2645 req->r_unsafe_callback(req, false);
2646 } else if (!req->r_linger) {
2647 dout("req %p tid %llu cb\n", req, req->r_tid);
2648 __complete_request(req);
2651 if (req->r_unsafe_callback) {
2652 dout("req %p tid %llu unsafe-cb\n", req, req->r_tid);
2653 req->r_unsafe_callback(req, true);
2658 if (m.flags & CEPH_OSD_FLAG_ONDISK)
2659 complete_all(&req->r_safe_completion);
2661 ceph_osdc_put_request(req);
2665 req->r_result = -EIO;
2666 __finish_request(req);
2667 __complete_request(req);
2668 complete_all(&req->r_safe_completion);
2670 mutex_unlock(&osd->lock);
2672 up_read(&osdc->lock);
2675 static void set_pool_was_full(struct ceph_osd_client *osdc)
2679 for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) {
2680 struct ceph_pg_pool_info *pi =
2681 rb_entry(n, struct ceph_pg_pool_info, node);
2683 pi->was_full = __pool_full(pi);
2687 static bool pool_cleared_full(struct ceph_osd_client *osdc, s64 pool_id)
2689 struct ceph_pg_pool_info *pi;
2691 pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id);
2695 return pi->was_full && !__pool_full(pi);
2698 static enum calc_target_result
2699 recalc_linger_target(struct ceph_osd_linger_request *lreq)
2701 struct ceph_osd_client *osdc = lreq->osdc;
2702 enum calc_target_result ct_res;
2704 ct_res = calc_target(osdc, &lreq->t, &lreq->last_force_resend, true);
2705 if (ct_res == CALC_TARGET_NEED_RESEND) {
2706 struct ceph_osd *osd;
2708 osd = lookup_create_osd(osdc, lreq->t.osd, true);
2709 if (osd != lreq->osd) {
2710 unlink_linger(lreq->osd, lreq);
2711 link_linger(osd, lreq);
2719 * Requeue requests whose mapping to an OSD has changed.
2721 static void scan_requests(struct ceph_osd *osd,
2724 bool check_pool_cleared_full,
2725 struct rb_root *need_resend,
2726 struct list_head *need_resend_linger)
2728 struct ceph_osd_client *osdc = osd->o_osdc;
2730 bool force_resend_writes;
2732 for (n = rb_first(&osd->o_linger_requests); n; ) {
2733 struct ceph_osd_linger_request *lreq =
2734 rb_entry(n, struct ceph_osd_linger_request, node);
2735 enum calc_target_result ct_res;
2737 n = rb_next(n); /* recalc_linger_target() */
2739 dout("%s lreq %p linger_id %llu\n", __func__, lreq,
2741 ct_res = recalc_linger_target(lreq);
2743 case CALC_TARGET_NO_ACTION:
2744 force_resend_writes = cleared_full ||
2745 (check_pool_cleared_full &&
2746 pool_cleared_full(osdc, lreq->t.base_oloc.pool));
2747 if (!force_resend && !force_resend_writes)
2751 case CALC_TARGET_NEED_RESEND:
2753 * scan_requests() for the previous epoch(s)
2754 * may have already added it to the list, since
2755 * it's not unlinked here.
2757 if (list_empty(&lreq->scan_item))
2758 list_add_tail(&lreq->scan_item, need_resend_linger);
2760 case CALC_TARGET_POOL_DNE:
2765 for (n = rb_first(&osd->o_requests); n; ) {
2766 struct ceph_osd_request *req =
2767 rb_entry(n, struct ceph_osd_request, r_node);
2768 enum calc_target_result ct_res;
2770 n = rb_next(n); /* unlink_request() */
2772 dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
2773 ct_res = calc_target(osdc, &req->r_t,
2774 &req->r_last_force_resend, false);
2776 case CALC_TARGET_NO_ACTION:
2777 force_resend_writes = cleared_full ||
2778 (check_pool_cleared_full &&
2779 pool_cleared_full(osdc, req->r_t.base_oloc.pool));
2780 if (!force_resend &&
2781 (!(req->r_flags & CEPH_OSD_FLAG_WRITE) ||
2782 !force_resend_writes))
2786 case CALC_TARGET_NEED_RESEND:
2787 unlink_request(osd, req);
2788 insert_request(need_resend, req);
2790 case CALC_TARGET_POOL_DNE:
2796 static int handle_one_map(struct ceph_osd_client *osdc,
2797 void *p, void *end, bool incremental,
2798 struct rb_root *need_resend,
2799 struct list_head *need_resend_linger)
2801 struct ceph_osdmap *newmap;
2803 bool skipped_map = false;
2806 was_full = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL);
2807 set_pool_was_full(osdc);
2810 newmap = osdmap_apply_incremental(&p, end, osdc->osdmap);
2812 newmap = ceph_osdmap_decode(&p, end);
2814 return PTR_ERR(newmap);
2816 if (newmap != osdc->osdmap) {
2818 * Preserve ->was_full before destroying the old map.
2819 * For pools that weren't in the old map, ->was_full
2822 for (n = rb_first(&newmap->pg_pools); n; n = rb_next(n)) {
2823 struct ceph_pg_pool_info *pi =
2824 rb_entry(n, struct ceph_pg_pool_info, node);
2825 struct ceph_pg_pool_info *old_pi;
2827 old_pi = ceph_pg_pool_by_id(osdc->osdmap, pi->id);
2829 pi->was_full = old_pi->was_full;
2831 WARN_ON(pi->was_full);
2834 if (osdc->osdmap->epoch &&
2835 osdc->osdmap->epoch + 1 < newmap->epoch) {
2836 WARN_ON(incremental);
2840 ceph_osdmap_destroy(osdc->osdmap);
2841 osdc->osdmap = newmap;
2844 was_full &= !ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL);
2845 scan_requests(&osdc->homeless_osd, skipped_map, was_full, true,
2846 need_resend, need_resend_linger);
2848 for (n = rb_first(&osdc->osds); n; ) {
2849 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
2851 n = rb_next(n); /* close_osd() */
2853 scan_requests(osd, skipped_map, was_full, true, need_resend,
2854 need_resend_linger);
2855 if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) ||
2856 memcmp(&osd->o_con.peer_addr,
2857 ceph_osd_addr(osdc->osdmap, osd->o_osd),
2858 sizeof(struct ceph_entity_addr)))
2865 static void kick_requests(struct ceph_osd_client *osdc,
2866 struct rb_root *need_resend,
2867 struct list_head *need_resend_linger)
2869 struct ceph_osd_linger_request *lreq, *nlreq;
2872 for (n = rb_first(need_resend); n; ) {
2873 struct ceph_osd_request *req =
2874 rb_entry(n, struct ceph_osd_request, r_node);
2875 struct ceph_osd *osd;
2878 erase_request(need_resend, req); /* before link_request() */
2880 WARN_ON(req->r_osd);
2881 calc_target(osdc, &req->r_t, NULL, false);
2882 osd = lookup_create_osd(osdc, req->r_t.osd, true);
2883 link_request(osd, req);
2884 if (!req->r_linger) {
2885 if (!osd_homeless(osd) && !req->r_t.paused)
2888 cancel_linger_request(req);
2892 list_for_each_entry_safe(lreq, nlreq, need_resend_linger, scan_item) {
2893 if (!osd_homeless(lreq->osd))
2896 list_del_init(&lreq->scan_item);
2901 * Process updated osd map.
2903 * The message contains any number of incremental and full maps, normally
2904 * indicating some sort of topology change in the cluster. Kick requests
2905 * off to different OSDs as needed.
2907 void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
2909 void *p = msg->front.iov_base;
2910 void *const end = p + msg->front.iov_len;
2911 u32 nr_maps, maplen;
2913 struct ceph_fsid fsid;
2914 struct rb_root need_resend = RB_ROOT;
2915 LIST_HEAD(need_resend_linger);
2916 bool handled_incremental = false;
2917 bool was_pauserd, was_pausewr;
2918 bool pauserd, pausewr;
2921 dout("%s have %u\n", __func__, osdc->osdmap->epoch);
2922 down_write(&osdc->lock);
2925 ceph_decode_need(&p, end, sizeof(fsid), bad);
2926 ceph_decode_copy(&p, &fsid, sizeof(fsid));
2927 if (ceph_check_fsid(osdc->client, &fsid) < 0)
2930 was_pauserd = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD);
2931 was_pausewr = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR) ||
2932 ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) ||
2933 have_pool_full(osdc);
2935 /* incremental maps */
2936 ceph_decode_32_safe(&p, end, nr_maps, bad);
2937 dout(" %d inc maps\n", nr_maps);
2938 while (nr_maps > 0) {
2939 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
2940 epoch = ceph_decode_32(&p);
2941 maplen = ceph_decode_32(&p);
2942 ceph_decode_need(&p, end, maplen, bad);
2943 if (osdc->osdmap->epoch &&
2944 osdc->osdmap->epoch + 1 == epoch) {
2945 dout("applying incremental map %u len %d\n",
2947 err = handle_one_map(osdc, p, p + maplen, true,
2948 &need_resend, &need_resend_linger);
2951 handled_incremental = true;
2953 dout("ignoring incremental map %u len %d\n",
2959 if (handled_incremental)
2963 ceph_decode_32_safe(&p, end, nr_maps, bad);
2964 dout(" %d full maps\n", nr_maps);
2966 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
2967 epoch = ceph_decode_32(&p);
2968 maplen = ceph_decode_32(&p);
2969 ceph_decode_need(&p, end, maplen, bad);
2971 dout("skipping non-latest full map %u len %d\n",
2973 } else if (osdc->osdmap->epoch >= epoch) {
2974 dout("skipping full map %u len %d, "
2975 "older than our %u\n", epoch, maplen,
2976 osdc->osdmap->epoch);
2978 dout("taking full map %u len %d\n", epoch, maplen);
2979 err = handle_one_map(osdc, p, p + maplen, false,
2980 &need_resend, &need_resend_linger);
2990 * subscribe to subsequent osdmap updates if full to ensure
2991 * we find out when we are no longer full and stop returning
2994 pauserd = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSERD);
2995 pausewr = ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_PAUSEWR) ||
2996 ceph_osdmap_flag(osdc->osdmap, CEPH_OSDMAP_FULL) ||
2997 have_pool_full(osdc);
2998 if (was_pauserd || was_pausewr || pauserd || pausewr)
2999 maybe_request_map(osdc);
3001 kick_requests(osdc, &need_resend, &need_resend_linger);
3003 ceph_monc_got_map(&osdc->client->monc, CEPH_SUB_OSDMAP,
3004 osdc->osdmap->epoch);
3005 up_write(&osdc->lock);
3006 wake_up_all(&osdc->client->auth_wq);
3010 pr_err("osdc handle_map corrupt msg\n");
3012 up_write(&osdc->lock);
3016 * Resubmit requests pending on the given osd.
3018 static void kick_osd_requests(struct ceph_osd *osd)
3022 for (n = rb_first(&osd->o_requests); n; ) {
3023 struct ceph_osd_request *req =
3024 rb_entry(n, struct ceph_osd_request, r_node);
3026 n = rb_next(n); /* cancel_linger_request() */
3028 if (!req->r_linger) {
3029 if (!req->r_t.paused)
3032 cancel_linger_request(req);
3035 for (n = rb_first(&osd->o_linger_requests); n; n = rb_next(n)) {
3036 struct ceph_osd_linger_request *lreq =
3037 rb_entry(n, struct ceph_osd_linger_request, node);
3044 * If the osd connection drops, we need to resubmit all requests.
3046 static void osd_fault(struct ceph_connection *con)
3048 struct ceph_osd *osd = con->private;
3049 struct ceph_osd_client *osdc = osd->o_osdc;
3051 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
3053 down_write(&osdc->lock);
3054 if (!osd_registered(osd)) {
3055 dout("%s osd%d unknown\n", __func__, osd->o_osd);
3059 if (!reopen_osd(osd))
3060 kick_osd_requests(osd);
3061 maybe_request_map(osdc);
3064 up_write(&osdc->lock);
3068 * Process osd watch notifications
3070 static void handle_watch_notify(struct ceph_osd_client *osdc,
3071 struct ceph_msg *msg)
3073 void *p = msg->front.iov_base;
3074 void *const end = p + msg->front.iov_len;
3075 struct ceph_osd_linger_request *lreq;
3076 struct linger_work *lwork;
3077 u8 proto_ver, opcode;
3078 u64 cookie, notify_id;
3079 u64 notifier_id = 0;
3080 s32 return_code = 0;
3081 void *payload = NULL;
3082 u32 payload_len = 0;
3084 ceph_decode_8_safe(&p, end, proto_ver, bad);
3085 ceph_decode_8_safe(&p, end, opcode, bad);
3086 ceph_decode_64_safe(&p, end, cookie, bad);
3087 p += 8; /* skip ver */
3088 ceph_decode_64_safe(&p, end, notify_id, bad);
3090 if (proto_ver >= 1) {
3091 ceph_decode_32_safe(&p, end, payload_len, bad);
3092 ceph_decode_need(&p, end, payload_len, bad);
3097 if (le16_to_cpu(msg->hdr.version) >= 2)
3098 ceph_decode_32_safe(&p, end, return_code, bad);
3100 if (le16_to_cpu(msg->hdr.version) >= 3)
3101 ceph_decode_64_safe(&p, end, notifier_id, bad);
3103 down_read(&osdc->lock);
3104 lreq = lookup_linger_osdc(&osdc->linger_requests, cookie);
3106 dout("%s opcode %d cookie %llu dne\n", __func__, opcode,
3108 goto out_unlock_osdc;
3111 mutex_lock(&lreq->lock);
3112 dout("%s opcode %d cookie %llu lreq %p is_watch %d\n", __func__,
3113 opcode, cookie, lreq, lreq->is_watch);
3114 if (opcode == CEPH_WATCH_EVENT_DISCONNECT) {
3115 if (!lreq->last_error) {
3116 lreq->last_error = -ENOTCONN;
3117 queue_watch_error(lreq);
3119 } else if (!lreq->is_watch) {
3120 /* CEPH_WATCH_EVENT_NOTIFY_COMPLETE */
3121 if (lreq->notify_id && lreq->notify_id != notify_id) {
3122 dout("lreq %p notify_id %llu != %llu, ignoring\n", lreq,
3123 lreq->notify_id, notify_id);
3124 } else if (!completion_done(&lreq->notify_finish_wait)) {
3125 struct ceph_msg_data *data =
3126 list_first_entry_or_null(&msg->data,
3127 struct ceph_msg_data,
3131 if (lreq->preply_pages) {
3132 WARN_ON(data->type !=
3133 CEPH_MSG_DATA_PAGES);
3134 *lreq->preply_pages = data->pages;
3135 *lreq->preply_len = data->length;
3137 ceph_release_page_vector(data->pages,
3138 calc_pages_for(0, data->length));
3141 lreq->notify_finish_error = return_code;
3142 complete_all(&lreq->notify_finish_wait);
3145 /* CEPH_WATCH_EVENT_NOTIFY */
3146 lwork = lwork_alloc(lreq, do_watch_notify);
3148 pr_err("failed to allocate notify-lwork\n");
3149 goto out_unlock_lreq;
3152 lwork->notify.notify_id = notify_id;
3153 lwork->notify.notifier_id = notifier_id;
3154 lwork->notify.payload = payload;
3155 lwork->notify.payload_len = payload_len;
3156 lwork->notify.msg = ceph_msg_get(msg);
3161 mutex_unlock(&lreq->lock);
3163 up_read(&osdc->lock);
3167 pr_err("osdc handle_watch_notify corrupt msg\n");
3171 * Register request, send initial attempt.
3173 int ceph_osdc_start_request(struct ceph_osd_client *osdc,
3174 struct ceph_osd_request *req,
3177 down_read(&osdc->lock);
3178 submit_request(req, false);
3179 up_read(&osdc->lock);
3183 EXPORT_SYMBOL(ceph_osdc_start_request);
3186 * Unregister a registered request. The request is not completed (i.e.
3187 * no callbacks or wakeups) - higher layers are supposed to know what
3188 * they are canceling.
3190 void ceph_osdc_cancel_request(struct ceph_osd_request *req)
3192 struct ceph_osd_client *osdc = req->r_osdc;
3194 down_write(&osdc->lock);
3196 cancel_request(req);
3197 up_write(&osdc->lock);
3199 EXPORT_SYMBOL(ceph_osdc_cancel_request);
3202 * @timeout: in jiffies, 0 means "wait forever"
3204 static int wait_request_timeout(struct ceph_osd_request *req,
3205 unsigned long timeout)
3209 dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
3210 left = wait_for_completion_interruptible_timeout(&req->r_completion,
3211 ceph_timeout_jiffies(timeout));
3213 left = left ?: -ETIMEDOUT;
3214 ceph_osdc_cancel_request(req);
3216 /* kludge - need to to wake ceph_osdc_sync() */
3217 complete_all(&req->r_safe_completion);
3219 left = req->r_result; /* completed */
3226 * wait for a request to complete
3228 int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
3229 struct ceph_osd_request *req)
3231 return wait_request_timeout(req, 0);
3233 EXPORT_SYMBOL(ceph_osdc_wait_request);
3236 * sync - wait for all in-flight requests to flush. avoid starvation.
3238 void ceph_osdc_sync(struct ceph_osd_client *osdc)
3240 struct rb_node *n, *p;
3241 u64 last_tid = atomic64_read(&osdc->last_tid);
3244 down_read(&osdc->lock);
3245 for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
3246 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
3248 mutex_lock(&osd->lock);
3249 for (p = rb_first(&osd->o_requests); p; p = rb_next(p)) {
3250 struct ceph_osd_request *req =
3251 rb_entry(p, struct ceph_osd_request, r_node);
3253 if (req->r_tid > last_tid)
3256 if (!(req->r_flags & CEPH_OSD_FLAG_WRITE))
3259 ceph_osdc_get_request(req);
3260 mutex_unlock(&osd->lock);
3261 up_read(&osdc->lock);
3262 dout("%s waiting on req %p tid %llu last_tid %llu\n",
3263 __func__, req, req->r_tid, last_tid);
3264 wait_for_completion(&req->r_safe_completion);
3265 ceph_osdc_put_request(req);
3269 mutex_unlock(&osd->lock);
3272 up_read(&osdc->lock);
3273 dout("%s done last_tid %llu\n", __func__, last_tid);
3275 EXPORT_SYMBOL(ceph_osdc_sync);
3277 static struct ceph_osd_request *
3278 alloc_linger_request(struct ceph_osd_linger_request *lreq)
3280 struct ceph_osd_request *req;
3282 req = ceph_osdc_alloc_request(lreq->osdc, NULL, 1, false, GFP_NOIO);
3286 ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
3287 ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
3289 if (ceph_osdc_alloc_messages(req, GFP_NOIO)) {
3290 ceph_osdc_put_request(req);
3298 * Returns a handle, caller owns a ref.
3300 struct ceph_osd_linger_request *
3301 ceph_osdc_watch(struct ceph_osd_client *osdc,
3302 struct ceph_object_id *oid,
3303 struct ceph_object_locator *oloc,
3304 rados_watchcb2_t wcb,
3305 rados_watcherrcb_t errcb,
3308 struct ceph_osd_linger_request *lreq;
3311 lreq = linger_alloc(osdc);
3313 return ERR_PTR(-ENOMEM);
3315 lreq->is_watch = true;
3317 lreq->errcb = errcb;
3320 ceph_oid_copy(&lreq->t.base_oid, oid);
3321 ceph_oloc_copy(&lreq->t.base_oloc, oloc);
3322 lreq->t.flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
3323 lreq->mtime = CURRENT_TIME;
3325 lreq->reg_req = alloc_linger_request(lreq);
3326 if (!lreq->reg_req) {
3331 lreq->ping_req = alloc_linger_request(lreq);
3332 if (!lreq->ping_req) {
3337 down_write(&osdc->lock);
3338 linger_register(lreq); /* before osd_req_op_* */
3339 osd_req_op_watch_init(lreq->reg_req, 0, lreq->linger_id,
3340 CEPH_OSD_WATCH_OP_WATCH);
3341 osd_req_op_watch_init(lreq->ping_req, 0, lreq->linger_id,
3342 CEPH_OSD_WATCH_OP_PING);
3343 linger_submit(lreq);
3344 up_write(&osdc->lock);
3346 ret = linger_reg_commit_wait(lreq);
3348 linger_cancel(lreq);
3356 return ERR_PTR(ret);
3358 EXPORT_SYMBOL(ceph_osdc_watch);
3363 * Times out after mount_timeout to preserve rbd unmap behaviour
3364 * introduced in 2894e1d76974 ("rbd: timeout watch teardown on unmap
3365 * with mount_timeout").
3367 int ceph_osdc_unwatch(struct ceph_osd_client *osdc,
3368 struct ceph_osd_linger_request *lreq)
3370 struct ceph_options *opts = osdc->client->options;
3371 struct ceph_osd_request *req;
3374 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
3378 ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
3379 ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
3380 req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
3381 req->r_mtime = CURRENT_TIME;
3382 osd_req_op_watch_init(req, 0, lreq->linger_id,
3383 CEPH_OSD_WATCH_OP_UNWATCH);
3385 ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
3389 ceph_osdc_start_request(osdc, req, false);
3390 linger_cancel(lreq);
3392 ret = wait_request_timeout(req, opts->mount_timeout);
3395 ceph_osdc_put_request(req);
3398 EXPORT_SYMBOL(ceph_osdc_unwatch);
3400 static int osd_req_op_notify_ack_init(struct ceph_osd_request *req, int which,
3401 u64 notify_id, u64 cookie, void *payload,
3404 struct ceph_osd_req_op *op;
3405 struct ceph_pagelist *pl;
3408 op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY_ACK, 0);
3410 pl = kmalloc(sizeof(*pl), GFP_NOIO);
3414 ceph_pagelist_init(pl);
3415 ret = ceph_pagelist_encode_64(pl, notify_id);
3416 ret |= ceph_pagelist_encode_64(pl, cookie);
3418 ret |= ceph_pagelist_encode_32(pl, payload_len);
3419 ret |= ceph_pagelist_append(pl, payload, payload_len);
3421 ret |= ceph_pagelist_encode_32(pl, 0);
3424 ceph_pagelist_release(pl);
3428 ceph_osd_data_pagelist_init(&op->notify_ack.request_data, pl);
3429 op->indata_len = pl->length;
3433 int ceph_osdc_notify_ack(struct ceph_osd_client *osdc,
3434 struct ceph_object_id *oid,
3435 struct ceph_object_locator *oloc,
3441 struct ceph_osd_request *req;
3444 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
3448 ceph_oid_copy(&req->r_base_oid, oid);
3449 ceph_oloc_copy(&req->r_base_oloc, oloc);
3450 req->r_flags = CEPH_OSD_FLAG_READ;
3452 ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
3456 ret = osd_req_op_notify_ack_init(req, 0, notify_id, cookie, payload,
3461 ceph_osdc_start_request(osdc, req, false);
3462 ret = ceph_osdc_wait_request(osdc, req);
3465 ceph_osdc_put_request(req);
3468 EXPORT_SYMBOL(ceph_osdc_notify_ack);
3470 static int osd_req_op_notify_init(struct ceph_osd_request *req, int which,
3471 u64 cookie, u32 prot_ver, u32 timeout,
3472 void *payload, size_t payload_len)
3474 struct ceph_osd_req_op *op;
3475 struct ceph_pagelist *pl;
3478 op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY, 0);
3479 op->notify.cookie = cookie;
3481 pl = kmalloc(sizeof(*pl), GFP_NOIO);
3485 ceph_pagelist_init(pl);
3486 ret = ceph_pagelist_encode_32(pl, 1); /* prot_ver */
3487 ret |= ceph_pagelist_encode_32(pl, timeout);
3488 ret |= ceph_pagelist_encode_32(pl, payload_len);
3489 ret |= ceph_pagelist_append(pl, payload, payload_len);
3491 ceph_pagelist_release(pl);
3495 ceph_osd_data_pagelist_init(&op->notify.request_data, pl);
3496 op->indata_len = pl->length;
3501 * @timeout: in seconds
3503 * @preply_{pages,len} are initialized both on success and error.
3504 * The caller is responsible for:
3506 * ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len))
3508 int ceph_osdc_notify(struct ceph_osd_client *osdc,
3509 struct ceph_object_id *oid,
3510 struct ceph_object_locator *oloc,
3514 struct page ***preply_pages,
3517 struct ceph_osd_linger_request *lreq;
3518 struct page **pages;
3523 *preply_pages = NULL;
3527 lreq = linger_alloc(osdc);
3531 lreq->preply_pages = preply_pages;
3532 lreq->preply_len = preply_len;
3534 ceph_oid_copy(&lreq->t.base_oid, oid);
3535 ceph_oloc_copy(&lreq->t.base_oloc, oloc);
3536 lreq->t.flags = CEPH_OSD_FLAG_READ;
3538 lreq->reg_req = alloc_linger_request(lreq);
3539 if (!lreq->reg_req) {
3545 pages = ceph_alloc_page_vector(1, GFP_NOIO);
3546 if (IS_ERR(pages)) {
3547 ret = PTR_ERR(pages);
3551 down_write(&osdc->lock);
3552 linger_register(lreq); /* before osd_req_op_* */
3553 ret = osd_req_op_notify_init(lreq->reg_req, 0, lreq->linger_id, 1,
3554 timeout, payload, payload_len);
3556 linger_unregister(lreq);
3557 up_write(&osdc->lock);
3558 ceph_release_page_vector(pages, 1);
3561 ceph_osd_data_pages_init(osd_req_op_data(lreq->reg_req, 0, notify,
3563 pages, PAGE_SIZE, 0, false, true);
3564 linger_submit(lreq);
3565 up_write(&osdc->lock);
3567 ret = linger_reg_commit_wait(lreq);
3569 ret = linger_notify_finish_wait(lreq);
3571 dout("lreq %p failed to initiate notify %d\n", lreq, ret);
3573 linger_cancel(lreq);
3578 EXPORT_SYMBOL(ceph_osdc_notify);
3581 * Call all pending notify callbacks - for use after a watch is
3582 * unregistered, to make sure no more callbacks for it will be invoked
3584 void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc)
3586 flush_workqueue(osdc->notify_wq);
3588 EXPORT_SYMBOL(ceph_osdc_flush_notifies);
3594 int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
3599 osdc->client = client;
3600 init_rwsem(&osdc->lock);
3601 osdc->osds = RB_ROOT;
3602 INIT_LIST_HEAD(&osdc->osd_lru);
3603 spin_lock_init(&osdc->osd_lru_lock);
3604 osd_init(&osdc->homeless_osd);
3605 osdc->homeless_osd.o_osdc = osdc;
3606 osdc->homeless_osd.o_osd = CEPH_HOMELESS_OSD;
3607 osdc->linger_requests = RB_ROOT;
3608 INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout);
3609 INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout);
3612 osdc->osdmap = ceph_osdmap_alloc();
3616 osdc->req_mempool = mempool_create_slab_pool(10,
3617 ceph_osd_request_cache);
3618 if (!osdc->req_mempool)
3621 err = ceph_msgpool_init(&osdc->msgpool_op, CEPH_MSG_OSD_OP,
3622 PAGE_SIZE, 10, true, "osd_op");
3625 err = ceph_msgpool_init(&osdc->msgpool_op_reply, CEPH_MSG_OSD_OPREPLY,
3626 PAGE_SIZE, 10, true, "osd_op_reply");
3631 osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify");
3632 if (!osdc->notify_wq)
3633 goto out_msgpool_reply;
3635 schedule_delayed_work(&osdc->timeout_work,
3636 osdc->client->options->osd_keepalive_timeout);
3637 schedule_delayed_work(&osdc->osds_timeout_work,
3638 round_jiffies_relative(osdc->client->options->osd_idle_ttl));
3643 ceph_msgpool_destroy(&osdc->msgpool_op_reply);
3645 ceph_msgpool_destroy(&osdc->msgpool_op);
3647 mempool_destroy(osdc->req_mempool);
3649 ceph_osdmap_destroy(osdc->osdmap);
3654 void ceph_osdc_stop(struct ceph_osd_client *osdc)
3656 flush_workqueue(osdc->notify_wq);
3657 destroy_workqueue(osdc->notify_wq);
3658 cancel_delayed_work_sync(&osdc->timeout_work);
3659 cancel_delayed_work_sync(&osdc->osds_timeout_work);
3661 down_write(&osdc->lock);
3662 while (!RB_EMPTY_ROOT(&osdc->osds)) {
3663 struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds),
3664 struct ceph_osd, o_node);
3667 up_write(&osdc->lock);
3668 WARN_ON(atomic_read(&osdc->homeless_osd.o_ref) != 1);
3669 osd_cleanup(&osdc->homeless_osd);
3671 WARN_ON(!list_empty(&osdc->osd_lru));
3672 WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_requests));
3673 WARN_ON(atomic_read(&osdc->num_requests));
3674 WARN_ON(atomic_read(&osdc->num_homeless));
3676 ceph_osdmap_destroy(osdc->osdmap);
3677 mempool_destroy(osdc->req_mempool);
3678 ceph_msgpool_destroy(&osdc->msgpool_op);
3679 ceph_msgpool_destroy(&osdc->msgpool_op_reply);
3683 * Read some contiguous pages. If we cross a stripe boundary, shorten
3684 * *plen. Return number of bytes read, or error.
3686 int ceph_osdc_readpages(struct ceph_osd_client *osdc,
3687 struct ceph_vino vino, struct ceph_file_layout *layout,
3689 u32 truncate_seq, u64 truncate_size,
3690 struct page **pages, int num_pages, int page_align)
3692 struct ceph_osd_request *req;
3695 dout("readpages on ino %llx.%llx on %llu~%llu\n", vino.ino,
3696 vino.snap, off, *plen);
3697 req = ceph_osdc_new_request(osdc, layout, vino, off, plen, 0, 1,
3698 CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
3699 NULL, truncate_seq, truncate_size,
3702 return PTR_ERR(req);
3704 /* it may be a short read due to an object boundary */
3705 osd_req_op_extent_osd_data_pages(req, 0,
3706 pages, *plen, page_align, false, false);
3708 dout("readpages final extent is %llu~%llu (%llu bytes align %d)\n",
3709 off, *plen, *plen, page_align);
3711 rc = ceph_osdc_start_request(osdc, req, false);
3713 rc = ceph_osdc_wait_request(osdc, req);
3715 ceph_osdc_put_request(req);
3716 dout("readpages result %d\n", rc);
3719 EXPORT_SYMBOL(ceph_osdc_readpages);
3722 * do a synchronous write on N pages
3724 int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
3725 struct ceph_file_layout *layout,
3726 struct ceph_snap_context *snapc,
3728 u32 truncate_seq, u64 truncate_size,
3729 struct timespec *mtime,
3730 struct page **pages, int num_pages)
3732 struct ceph_osd_request *req;
3734 int page_align = off & ~PAGE_MASK;
3736 req = ceph_osdc_new_request(osdc, layout, vino, off, &len, 0, 1,
3738 CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE,
3739 snapc, truncate_seq, truncate_size,
3742 return PTR_ERR(req);
3744 /* it may be a short write due to an object boundary */
3745 osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_align,
3747 dout("writepages %llu~%llu (%llu bytes)\n", off, len, len);
3749 req->r_mtime = *mtime;
3750 rc = ceph_osdc_start_request(osdc, req, true);
3752 rc = ceph_osdc_wait_request(osdc, req);
3754 ceph_osdc_put_request(req);
3757 dout("writepages result %d\n", rc);
3760 EXPORT_SYMBOL(ceph_osdc_writepages);
3762 int ceph_osdc_setup(void)
3764 size_t size = sizeof(struct ceph_osd_request) +
3765 CEPH_OSD_SLAB_OPS * sizeof(struct ceph_osd_req_op);
3767 BUG_ON(ceph_osd_request_cache);
3768 ceph_osd_request_cache = kmem_cache_create("ceph_osd_request", size,
3771 return ceph_osd_request_cache ? 0 : -ENOMEM;
3773 EXPORT_SYMBOL(ceph_osdc_setup);
3775 void ceph_osdc_cleanup(void)
3777 BUG_ON(!ceph_osd_request_cache);
3778 kmem_cache_destroy(ceph_osd_request_cache);
3779 ceph_osd_request_cache = NULL;
3781 EXPORT_SYMBOL(ceph_osdc_cleanup);
3784 * handle incoming message
3786 static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
3788 struct ceph_osd *osd = con->private;
3789 struct ceph_osd_client *osdc = osd->o_osdc;
3790 int type = le16_to_cpu(msg->hdr.type);
3793 case CEPH_MSG_OSD_MAP:
3794 ceph_osdc_handle_map(osdc, msg);
3796 case CEPH_MSG_OSD_OPREPLY:
3797 handle_reply(osd, msg);
3799 case CEPH_MSG_WATCH_NOTIFY:
3800 handle_watch_notify(osdc, msg);
3804 pr_err("received unknown message type %d %s\n", type,
3805 ceph_msg_type_name(type));
3812 * Lookup and return message for incoming reply. Don't try to do
3813 * anything about a larger than preallocated data portion of the
3814 * message at the moment - for now, just skip the message.
3816 static struct ceph_msg *get_reply(struct ceph_connection *con,
3817 struct ceph_msg_header *hdr,
3820 struct ceph_osd *osd = con->private;
3821 struct ceph_osd_client *osdc = osd->o_osdc;
3822 struct ceph_msg *m = NULL;
3823 struct ceph_osd_request *req;
3824 int front_len = le32_to_cpu(hdr->front_len);
3825 int data_len = le32_to_cpu(hdr->data_len);
3826 u64 tid = le64_to_cpu(hdr->tid);
3828 down_read(&osdc->lock);
3829 if (!osd_registered(osd)) {
3830 dout("%s osd%d unknown, skipping\n", __func__, osd->o_osd);
3832 goto out_unlock_osdc;
3834 WARN_ON(osd->o_osd != le64_to_cpu(hdr->src.num));
3836 mutex_lock(&osd->lock);
3837 req = lookup_request(&osd->o_requests, tid);
3839 dout("%s osd%d tid %llu unknown, skipping\n", __func__,
3842 goto out_unlock_session;
3845 ceph_msg_revoke_incoming(req->r_reply);
3847 if (front_len > req->r_reply->front_alloc_len) {
3848 pr_warn("%s osd%d tid %llu front %d > preallocated %d\n",
3849 __func__, osd->o_osd, req->r_tid, front_len,
3850 req->r_reply->front_alloc_len);
3851 m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front_len, GFP_NOFS,
3854 goto out_unlock_session;
3855 ceph_msg_put(req->r_reply);
3859 if (data_len > req->r_reply->data_length) {
3860 pr_warn("%s osd%d tid %llu data %d > preallocated %zu, skipping\n",
3861 __func__, osd->o_osd, req->r_tid, data_len,
3862 req->r_reply->data_length);
3865 goto out_unlock_session;
3868 m = ceph_msg_get(req->r_reply);
3869 dout("get_reply tid %lld %p\n", tid, m);
3872 mutex_unlock(&osd->lock);
3874 up_read(&osdc->lock);
3879 * TODO: switch to a msg-owned pagelist
3881 static struct ceph_msg *alloc_msg_with_page_vector(struct ceph_msg_header *hdr)
3884 int type = le16_to_cpu(hdr->type);
3885 u32 front_len = le32_to_cpu(hdr->front_len);
3886 u32 data_len = le32_to_cpu(hdr->data_len);
3888 m = ceph_msg_new(type, front_len, GFP_NOIO, false);
3893 struct page **pages;
3894 struct ceph_osd_data osd_data;
3896 pages = ceph_alloc_page_vector(calc_pages_for(0, data_len),
3903 ceph_osd_data_pages_init(&osd_data, pages, data_len, 0, false,
3905 ceph_osdc_msg_data_add(m, &osd_data);
3911 static struct ceph_msg *alloc_msg(struct ceph_connection *con,
3912 struct ceph_msg_header *hdr,
3915 struct ceph_osd *osd = con->private;
3916 int type = le16_to_cpu(hdr->type);
3920 case CEPH_MSG_OSD_MAP:
3921 case CEPH_MSG_WATCH_NOTIFY:
3922 return alloc_msg_with_page_vector(hdr);
3923 case CEPH_MSG_OSD_OPREPLY:
3924 return get_reply(con, hdr, skip);
3926 pr_warn("%s osd%d unknown msg type %d, skipping\n", __func__,
3934 * Wrappers to refcount containing ceph_osd struct
3936 static struct ceph_connection *get_osd_con(struct ceph_connection *con)
3938 struct ceph_osd *osd = con->private;
3944 static void put_osd_con(struct ceph_connection *con)
3946 struct ceph_osd *osd = con->private;
3954 * Note: returned pointer is the address of a structure that's
3955 * managed separately. Caller must *not* attempt to free it.
3957 static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
3958 int *proto, int force_new)
3960 struct ceph_osd *o = con->private;
3961 struct ceph_osd_client *osdc = o->o_osdc;
3962 struct ceph_auth_client *ac = osdc->client->monc.auth;
3963 struct ceph_auth_handshake *auth = &o->o_auth;
3965 if (force_new && auth->authorizer) {
3966 ceph_auth_destroy_authorizer(auth->authorizer);
3967 auth->authorizer = NULL;
3969 if (!auth->authorizer) {
3970 int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
3973 return ERR_PTR(ret);
3975 int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
3978 return ERR_PTR(ret);
3980 *proto = ac->protocol;
3986 static int verify_authorizer_reply(struct ceph_connection *con, int len)
3988 struct ceph_osd *o = con->private;
3989 struct ceph_osd_client *osdc = o->o_osdc;
3990 struct ceph_auth_client *ac = osdc->client->monc.auth;
3992 return ceph_auth_verify_authorizer_reply(ac, o->o_auth.authorizer, len);
3995 static int invalidate_authorizer(struct ceph_connection *con)
3997 struct ceph_osd *o = con->private;
3998 struct ceph_osd_client *osdc = o->o_osdc;
3999 struct ceph_auth_client *ac = osdc->client->monc.auth;
4001 ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD);
4002 return ceph_monc_validate_auth(&osdc->client->monc);
4005 static int osd_sign_message(struct ceph_msg *msg)
4007 struct ceph_osd *o = msg->con->private;
4008 struct ceph_auth_handshake *auth = &o->o_auth;
4010 return ceph_auth_sign_message(auth, msg);
4013 static int osd_check_message_signature(struct ceph_msg *msg)
4015 struct ceph_osd *o = msg->con->private;
4016 struct ceph_auth_handshake *auth = &o->o_auth;
4018 return ceph_auth_check_message_signature(auth, msg);
4021 static const struct ceph_connection_operations osd_con_ops = {
4024 .dispatch = dispatch,
4025 .get_authorizer = get_authorizer,
4026 .verify_authorizer_reply = verify_authorizer_reply,
4027 .invalidate_authorizer = invalidate_authorizer,
4028 .alloc_msg = alloc_msg,
4029 .sign_message = osd_sign_message,
4030 .check_message_signature = osd_check_message_signature,