Merge tag 'ceph-for-4.9-rc1' of git://github.com/ceph/ceph-client
[cascardo/linux.git] / net / ceph / osd_client.c
1
2 #include <linux/ceph/ceph_debug.h>
3
4 #include <linux/module.h>
5 #include <linux/err.h>
6 #include <linux/highmem.h>
7 #include <linux/mm.h>
8 #include <linux/pagemap.h>
9 #include <linux/slab.h>
10 #include <linux/uaccess.h>
11 #ifdef CONFIG_BLOCK
12 #include <linux/bio.h>
13 #endif
14
15 #include <linux/ceph/libceph.h>
16 #include <linux/ceph/osd_client.h>
17 #include <linux/ceph/messenger.h>
18 #include <linux/ceph/decode.h>
19 #include <linux/ceph/auth.h>
20 #include <linux/ceph/pagelist.h>
21
22 #define OSD_OPREPLY_FRONT_LEN   512
23
24 static struct kmem_cache        *ceph_osd_request_cache;
25
26 static const struct ceph_connection_operations osd_con_ops;
27
28 /*
29  * Implement client access to distributed object storage cluster.
30  *
31  * All data objects are stored within a cluster/cloud of OSDs, or
32  * "object storage devices."  (Note that Ceph OSDs have _nothing_ to
33  * do with the T10 OSD extensions to SCSI.)  Ceph OSDs are simply
34  * remote daemons serving up and coordinating consistent and safe
35  * access to storage.
36  *
37  * Cluster membership and the mapping of data objects onto storage devices
38  * are described by the osd map.
39  *
40  * We keep track of pending OSD requests (read, write), resubmit
41  * requests to different OSDs when the cluster topology/data layout
42  * change, or retry the affected requests when the communications
43  * channel with an OSD is reset.
44  */
45
46 static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req);
47 static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req);
48 static void link_linger(struct ceph_osd *osd,
49                         struct ceph_osd_linger_request *lreq);
50 static void unlink_linger(struct ceph_osd *osd,
51                           struct ceph_osd_linger_request *lreq);
52
53 #if 1
54 static inline bool rwsem_is_wrlocked(struct rw_semaphore *sem)
55 {
56         bool wrlocked = true;
57
58         if (unlikely(down_read_trylock(sem))) {
59                 wrlocked = false;
60                 up_read(sem);
61         }
62
63         return wrlocked;
64 }
65 static inline void verify_osdc_locked(struct ceph_osd_client *osdc)
66 {
67         WARN_ON(!rwsem_is_locked(&osdc->lock));
68 }
69 static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc)
70 {
71         WARN_ON(!rwsem_is_wrlocked(&osdc->lock));
72 }
73 static inline void verify_osd_locked(struct ceph_osd *osd)
74 {
75         struct ceph_osd_client *osdc = osd->o_osdc;
76
77         WARN_ON(!(mutex_is_locked(&osd->lock) &&
78                   rwsem_is_locked(&osdc->lock)) &&
79                 !rwsem_is_wrlocked(&osdc->lock));
80 }
81 static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq)
82 {
83         WARN_ON(!mutex_is_locked(&lreq->lock));
84 }
85 #else
86 static inline void verify_osdc_locked(struct ceph_osd_client *osdc) { }
87 static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc) { }
88 static inline void verify_osd_locked(struct ceph_osd *osd) { }
89 static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq) { }
90 #endif
91
92 /*
93  * calculate the mapping of a file extent onto an object, and fill out the
94  * request accordingly.  shorten extent as necessary if it crosses an
95  * object boundary.
96  *
97  * fill osd op in request message.
98  */
99 static int calc_layout(struct ceph_file_layout *layout, u64 off, u64 *plen,
100                         u64 *objnum, u64 *objoff, u64 *objlen)
101 {
102         u64 orig_len = *plen;
103         int r;
104
105         /* object extent? */
106         r = ceph_calc_file_object_mapping(layout, off, orig_len, objnum,
107                                           objoff, objlen);
108         if (r < 0)
109                 return r;
110         if (*objlen < orig_len) {
111                 *plen = *objlen;
112                 dout(" skipping last %llu, final file extent %llu~%llu\n",
113                      orig_len - *plen, off, *plen);
114         }
115
116         dout("calc_layout objnum=%llx %llu~%llu\n", *objnum, *objoff, *objlen);
117
118         return 0;
119 }
120
121 static void ceph_osd_data_init(struct ceph_osd_data *osd_data)
122 {
123         memset(osd_data, 0, sizeof (*osd_data));
124         osd_data->type = CEPH_OSD_DATA_TYPE_NONE;
125 }
126
127 static void ceph_osd_data_pages_init(struct ceph_osd_data *osd_data,
128                         struct page **pages, u64 length, u32 alignment,
129                         bool pages_from_pool, bool own_pages)
130 {
131         osd_data->type = CEPH_OSD_DATA_TYPE_PAGES;
132         osd_data->pages = pages;
133         osd_data->length = length;
134         osd_data->alignment = alignment;
135         osd_data->pages_from_pool = pages_from_pool;
136         osd_data->own_pages = own_pages;
137 }
138
139 static void ceph_osd_data_pagelist_init(struct ceph_osd_data *osd_data,
140                         struct ceph_pagelist *pagelist)
141 {
142         osd_data->type = CEPH_OSD_DATA_TYPE_PAGELIST;
143         osd_data->pagelist = pagelist;
144 }
145
146 #ifdef CONFIG_BLOCK
147 static void ceph_osd_data_bio_init(struct ceph_osd_data *osd_data,
148                         struct bio *bio, size_t bio_length)
149 {
150         osd_data->type = CEPH_OSD_DATA_TYPE_BIO;
151         osd_data->bio = bio;
152         osd_data->bio_length = bio_length;
153 }
154 #endif /* CONFIG_BLOCK */
155
156 #define osd_req_op_data(oreq, whch, typ, fld)                           \
157 ({                                                                      \
158         struct ceph_osd_request *__oreq = (oreq);                       \
159         unsigned int __whch = (whch);                                   \
160         BUG_ON(__whch >= __oreq->r_num_ops);                            \
161         &__oreq->r_ops[__whch].typ.fld;                                 \
162 })
163
164 static struct ceph_osd_data *
165 osd_req_op_raw_data_in(struct ceph_osd_request *osd_req, unsigned int which)
166 {
167         BUG_ON(which >= osd_req->r_num_ops);
168
169         return &osd_req->r_ops[which].raw_data_in;
170 }
171
172 struct ceph_osd_data *
173 osd_req_op_extent_osd_data(struct ceph_osd_request *osd_req,
174                         unsigned int which)
175 {
176         return osd_req_op_data(osd_req, which, extent, osd_data);
177 }
178 EXPORT_SYMBOL(osd_req_op_extent_osd_data);
179
180 void osd_req_op_raw_data_in_pages(struct ceph_osd_request *osd_req,
181                         unsigned int which, struct page **pages,
182                         u64 length, u32 alignment,
183                         bool pages_from_pool, bool own_pages)
184 {
185         struct ceph_osd_data *osd_data;
186
187         osd_data = osd_req_op_raw_data_in(osd_req, which);
188         ceph_osd_data_pages_init(osd_data, pages, length, alignment,
189                                 pages_from_pool, own_pages);
190 }
191 EXPORT_SYMBOL(osd_req_op_raw_data_in_pages);
192
193 void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *osd_req,
194                         unsigned int which, struct page **pages,
195                         u64 length, u32 alignment,
196                         bool pages_from_pool, bool own_pages)
197 {
198         struct ceph_osd_data *osd_data;
199
200         osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
201         ceph_osd_data_pages_init(osd_data, pages, length, alignment,
202                                 pages_from_pool, own_pages);
203 }
204 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pages);
205
206 void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *osd_req,
207                         unsigned int which, struct ceph_pagelist *pagelist)
208 {
209         struct ceph_osd_data *osd_data;
210
211         osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
212         ceph_osd_data_pagelist_init(osd_data, pagelist);
213 }
214 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pagelist);
215
216 #ifdef CONFIG_BLOCK
217 void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req,
218                         unsigned int which, struct bio *bio, size_t bio_length)
219 {
220         struct ceph_osd_data *osd_data;
221
222         osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
223         ceph_osd_data_bio_init(osd_data, bio, bio_length);
224 }
225 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bio);
226 #endif /* CONFIG_BLOCK */
227
228 static void osd_req_op_cls_request_info_pagelist(
229                         struct ceph_osd_request *osd_req,
230                         unsigned int which, struct ceph_pagelist *pagelist)
231 {
232         struct ceph_osd_data *osd_data;
233
234         osd_data = osd_req_op_data(osd_req, which, cls, request_info);
235         ceph_osd_data_pagelist_init(osd_data, pagelist);
236 }
237
238 void osd_req_op_cls_request_data_pagelist(
239                         struct ceph_osd_request *osd_req,
240                         unsigned int which, struct ceph_pagelist *pagelist)
241 {
242         struct ceph_osd_data *osd_data;
243
244         osd_data = osd_req_op_data(osd_req, which, cls, request_data);
245         ceph_osd_data_pagelist_init(osd_data, pagelist);
246         osd_req->r_ops[which].cls.indata_len += pagelist->length;
247         osd_req->r_ops[which].indata_len += pagelist->length;
248 }
249 EXPORT_SYMBOL(osd_req_op_cls_request_data_pagelist);
250
251 void osd_req_op_cls_request_data_pages(struct ceph_osd_request *osd_req,
252                         unsigned int which, struct page **pages, u64 length,
253                         u32 alignment, bool pages_from_pool, bool own_pages)
254 {
255         struct ceph_osd_data *osd_data;
256
257         osd_data = osd_req_op_data(osd_req, which, cls, request_data);
258         ceph_osd_data_pages_init(osd_data, pages, length, alignment,
259                                 pages_from_pool, own_pages);
260         osd_req->r_ops[which].cls.indata_len += length;
261         osd_req->r_ops[which].indata_len += length;
262 }
263 EXPORT_SYMBOL(osd_req_op_cls_request_data_pages);
264
265 void osd_req_op_cls_response_data_pages(struct ceph_osd_request *osd_req,
266                         unsigned int which, struct page **pages, u64 length,
267                         u32 alignment, bool pages_from_pool, bool own_pages)
268 {
269         struct ceph_osd_data *osd_data;
270
271         osd_data = osd_req_op_data(osd_req, which, cls, response_data);
272         ceph_osd_data_pages_init(osd_data, pages, length, alignment,
273                                 pages_from_pool, own_pages);
274 }
275 EXPORT_SYMBOL(osd_req_op_cls_response_data_pages);
276
277 static u64 ceph_osd_data_length(struct ceph_osd_data *osd_data)
278 {
279         switch (osd_data->type) {
280         case CEPH_OSD_DATA_TYPE_NONE:
281                 return 0;
282         case CEPH_OSD_DATA_TYPE_PAGES:
283                 return osd_data->length;
284         case CEPH_OSD_DATA_TYPE_PAGELIST:
285                 return (u64)osd_data->pagelist->length;
286 #ifdef CONFIG_BLOCK
287         case CEPH_OSD_DATA_TYPE_BIO:
288                 return (u64)osd_data->bio_length;
289 #endif /* CONFIG_BLOCK */
290         default:
291                 WARN(true, "unrecognized data type %d\n", (int)osd_data->type);
292                 return 0;
293         }
294 }
295
296 static void ceph_osd_data_release(struct ceph_osd_data *osd_data)
297 {
298         if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES && osd_data->own_pages) {
299                 int num_pages;
300
301                 num_pages = calc_pages_for((u64)osd_data->alignment,
302                                                 (u64)osd_data->length);
303                 ceph_release_page_vector(osd_data->pages, num_pages);
304         }
305         ceph_osd_data_init(osd_data);
306 }
307
308 static void osd_req_op_data_release(struct ceph_osd_request *osd_req,
309                         unsigned int which)
310 {
311         struct ceph_osd_req_op *op;
312
313         BUG_ON(which >= osd_req->r_num_ops);
314         op = &osd_req->r_ops[which];
315
316         switch (op->op) {
317         case CEPH_OSD_OP_READ:
318         case CEPH_OSD_OP_WRITE:
319         case CEPH_OSD_OP_WRITEFULL:
320                 ceph_osd_data_release(&op->extent.osd_data);
321                 break;
322         case CEPH_OSD_OP_CALL:
323                 ceph_osd_data_release(&op->cls.request_info);
324                 ceph_osd_data_release(&op->cls.request_data);
325                 ceph_osd_data_release(&op->cls.response_data);
326                 break;
327         case CEPH_OSD_OP_SETXATTR:
328         case CEPH_OSD_OP_CMPXATTR:
329                 ceph_osd_data_release(&op->xattr.osd_data);
330                 break;
331         case CEPH_OSD_OP_STAT:
332                 ceph_osd_data_release(&op->raw_data_in);
333                 break;
334         case CEPH_OSD_OP_NOTIFY_ACK:
335                 ceph_osd_data_release(&op->notify_ack.request_data);
336                 break;
337         case CEPH_OSD_OP_NOTIFY:
338                 ceph_osd_data_release(&op->notify.request_data);
339                 ceph_osd_data_release(&op->notify.response_data);
340                 break;
341         case CEPH_OSD_OP_LIST_WATCHERS:
342                 ceph_osd_data_release(&op->list_watchers.response_data);
343                 break;
344         default:
345                 break;
346         }
347 }
348
349 /*
350  * Assumes @t is zero-initialized.
351  */
352 static void target_init(struct ceph_osd_request_target *t)
353 {
354         ceph_oid_init(&t->base_oid);
355         ceph_oloc_init(&t->base_oloc);
356         ceph_oid_init(&t->target_oid);
357         ceph_oloc_init(&t->target_oloc);
358
359         ceph_osds_init(&t->acting);
360         ceph_osds_init(&t->up);
361         t->size = -1;
362         t->min_size = -1;
363
364         t->osd = CEPH_HOMELESS_OSD;
365 }
366
367 static void target_copy(struct ceph_osd_request_target *dest,
368                         const struct ceph_osd_request_target *src)
369 {
370         ceph_oid_copy(&dest->base_oid, &src->base_oid);
371         ceph_oloc_copy(&dest->base_oloc, &src->base_oloc);
372         ceph_oid_copy(&dest->target_oid, &src->target_oid);
373         ceph_oloc_copy(&dest->target_oloc, &src->target_oloc);
374
375         dest->pgid = src->pgid; /* struct */
376         dest->pg_num = src->pg_num;
377         dest->pg_num_mask = src->pg_num_mask;
378         ceph_osds_copy(&dest->acting, &src->acting);
379         ceph_osds_copy(&dest->up, &src->up);
380         dest->size = src->size;
381         dest->min_size = src->min_size;
382         dest->sort_bitwise = src->sort_bitwise;
383
384         dest->flags = src->flags;
385         dest->paused = src->paused;
386
387         dest->osd = src->osd;
388 }
389
390 static void target_destroy(struct ceph_osd_request_target *t)
391 {
392         ceph_oid_destroy(&t->base_oid);
393         ceph_oloc_destroy(&t->base_oloc);
394         ceph_oid_destroy(&t->target_oid);
395         ceph_oloc_destroy(&t->target_oloc);
396 }
397
398 /*
399  * requests
400  */
401 static void request_release_checks(struct ceph_osd_request *req)
402 {
403         WARN_ON(!RB_EMPTY_NODE(&req->r_node));
404         WARN_ON(!RB_EMPTY_NODE(&req->r_mc_node));
405         WARN_ON(!list_empty(&req->r_unsafe_item));
406         WARN_ON(req->r_osd);
407 }
408
409 static void ceph_osdc_release_request(struct kref *kref)
410 {
411         struct ceph_osd_request *req = container_of(kref,
412                                             struct ceph_osd_request, r_kref);
413         unsigned int which;
414
415         dout("%s %p (r_request %p r_reply %p)\n", __func__, req,
416              req->r_request, req->r_reply);
417         request_release_checks(req);
418
419         if (req->r_request)
420                 ceph_msg_put(req->r_request);
421         if (req->r_reply)
422                 ceph_msg_put(req->r_reply);
423
424         for (which = 0; which < req->r_num_ops; which++)
425                 osd_req_op_data_release(req, which);
426
427         target_destroy(&req->r_t);
428         ceph_put_snap_context(req->r_snapc);
429
430         if (req->r_mempool)
431                 mempool_free(req, req->r_osdc->req_mempool);
432         else if (req->r_num_ops <= CEPH_OSD_SLAB_OPS)
433                 kmem_cache_free(ceph_osd_request_cache, req);
434         else
435                 kfree(req);
436 }
437
438 void ceph_osdc_get_request(struct ceph_osd_request *req)
439 {
440         dout("%s %p (was %d)\n", __func__, req,
441              atomic_read(&req->r_kref.refcount));
442         kref_get(&req->r_kref);
443 }
444 EXPORT_SYMBOL(ceph_osdc_get_request);
445
446 void ceph_osdc_put_request(struct ceph_osd_request *req)
447 {
448         if (req) {
449                 dout("%s %p (was %d)\n", __func__, req,
450                      atomic_read(&req->r_kref.refcount));
451                 kref_put(&req->r_kref, ceph_osdc_release_request);
452         }
453 }
454 EXPORT_SYMBOL(ceph_osdc_put_request);
455
456 static void request_init(struct ceph_osd_request *req)
457 {
458         /* req only, each op is zeroed in _osd_req_op_init() */
459         memset(req, 0, sizeof(*req));
460
461         kref_init(&req->r_kref);
462         init_completion(&req->r_completion);
463         init_completion(&req->r_safe_completion);
464         RB_CLEAR_NODE(&req->r_node);
465         RB_CLEAR_NODE(&req->r_mc_node);
466         INIT_LIST_HEAD(&req->r_unsafe_item);
467
468         target_init(&req->r_t);
469 }
470
471 /*
472  * This is ugly, but it allows us to reuse linger registration and ping
473  * requests, keeping the structure of the code around send_linger{_ping}()
474  * reasonable.  Setting up a min_nr=2 mempool for each linger request
475  * and dealing with copying ops (this blasts req only, watch op remains
476  * intact) isn't any better.
477  */
478 static void request_reinit(struct ceph_osd_request *req)
479 {
480         struct ceph_osd_client *osdc = req->r_osdc;
481         bool mempool = req->r_mempool;
482         unsigned int num_ops = req->r_num_ops;
483         u64 snapid = req->r_snapid;
484         struct ceph_snap_context *snapc = req->r_snapc;
485         bool linger = req->r_linger;
486         struct ceph_msg *request_msg = req->r_request;
487         struct ceph_msg *reply_msg = req->r_reply;
488
489         dout("%s req %p\n", __func__, req);
490         WARN_ON(atomic_read(&req->r_kref.refcount) != 1);
491         request_release_checks(req);
492
493         WARN_ON(atomic_read(&request_msg->kref.refcount) != 1);
494         WARN_ON(atomic_read(&reply_msg->kref.refcount) != 1);
495         target_destroy(&req->r_t);
496
497         request_init(req);
498         req->r_osdc = osdc;
499         req->r_mempool = mempool;
500         req->r_num_ops = num_ops;
501         req->r_snapid = snapid;
502         req->r_snapc = snapc;
503         req->r_linger = linger;
504         req->r_request = request_msg;
505         req->r_reply = reply_msg;
506 }
507
508 struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
509                                                struct ceph_snap_context *snapc,
510                                                unsigned int num_ops,
511                                                bool use_mempool,
512                                                gfp_t gfp_flags)
513 {
514         struct ceph_osd_request *req;
515
516         if (use_mempool) {
517                 BUG_ON(num_ops > CEPH_OSD_SLAB_OPS);
518                 req = mempool_alloc(osdc->req_mempool, gfp_flags);
519         } else if (num_ops <= CEPH_OSD_SLAB_OPS) {
520                 req = kmem_cache_alloc(ceph_osd_request_cache, gfp_flags);
521         } else {
522                 BUG_ON(num_ops > CEPH_OSD_MAX_OPS);
523                 req = kmalloc(sizeof(*req) + num_ops * sizeof(req->r_ops[0]),
524                               gfp_flags);
525         }
526         if (unlikely(!req))
527                 return NULL;
528
529         request_init(req);
530         req->r_osdc = osdc;
531         req->r_mempool = use_mempool;
532         req->r_num_ops = num_ops;
533         req->r_snapid = CEPH_NOSNAP;
534         req->r_snapc = ceph_get_snap_context(snapc);
535
536         dout("%s req %p\n", __func__, req);
537         return req;
538 }
539 EXPORT_SYMBOL(ceph_osdc_alloc_request);
540
541 static int ceph_oloc_encoding_size(struct ceph_object_locator *oloc)
542 {
543         return 8 + 4 + 4 + 4 + (oloc->pool_ns ? oloc->pool_ns->len : 0);
544 }
545
546 int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp)
547 {
548         struct ceph_osd_client *osdc = req->r_osdc;
549         struct ceph_msg *msg;
550         int msg_size;
551
552         WARN_ON(ceph_oid_empty(&req->r_base_oid));
553         WARN_ON(ceph_oloc_empty(&req->r_base_oloc));
554
555         /* create request message */
556         msg_size = 4 + 4 + 4; /* client_inc, osdmap_epoch, flags */
557         msg_size += 4 + 4 + 4 + 8; /* mtime, reassert_version */
558         msg_size += CEPH_ENCODING_START_BLK_LEN +
559                         ceph_oloc_encoding_size(&req->r_base_oloc); /* oloc */
560         msg_size += 1 + 8 + 4 + 4; /* pgid */
561         msg_size += 4 + req->r_base_oid.name_len; /* oid */
562         msg_size += 2 + req->r_num_ops * sizeof(struct ceph_osd_op);
563         msg_size += 8; /* snapid */
564         msg_size += 8; /* snap_seq */
565         msg_size += 4 + 8 * (req->r_snapc ? req->r_snapc->num_snaps : 0);
566         msg_size += 4; /* retry_attempt */
567
568         if (req->r_mempool)
569                 msg = ceph_msgpool_get(&osdc->msgpool_op, 0);
570         else
571                 msg = ceph_msg_new(CEPH_MSG_OSD_OP, msg_size, gfp, true);
572         if (!msg)
573                 return -ENOMEM;
574
575         memset(msg->front.iov_base, 0, msg->front.iov_len);
576         req->r_request = msg;
577
578         /* create reply message */
579         msg_size = OSD_OPREPLY_FRONT_LEN;
580         msg_size += req->r_base_oid.name_len;
581         msg_size += req->r_num_ops * sizeof(struct ceph_osd_op);
582
583         if (req->r_mempool)
584                 msg = ceph_msgpool_get(&osdc->msgpool_op_reply, 0);
585         else
586                 msg = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, msg_size, gfp, true);
587         if (!msg)
588                 return -ENOMEM;
589
590         req->r_reply = msg;
591
592         return 0;
593 }
594 EXPORT_SYMBOL(ceph_osdc_alloc_messages);
595
596 static bool osd_req_opcode_valid(u16 opcode)
597 {
598         switch (opcode) {
599 #define GENERATE_CASE(op, opcode, str)  case CEPH_OSD_OP_##op: return true;
600 __CEPH_FORALL_OSD_OPS(GENERATE_CASE)
601 #undef GENERATE_CASE
602         default:
603                 return false;
604         }
605 }
606
607 /*
608  * This is an osd op init function for opcodes that have no data or
609  * other information associated with them.  It also serves as a
610  * common init routine for all the other init functions, below.
611  */
612 static struct ceph_osd_req_op *
613 _osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which,
614                  u16 opcode, u32 flags)
615 {
616         struct ceph_osd_req_op *op;
617
618         BUG_ON(which >= osd_req->r_num_ops);
619         BUG_ON(!osd_req_opcode_valid(opcode));
620
621         op = &osd_req->r_ops[which];
622         memset(op, 0, sizeof (*op));
623         op->op = opcode;
624         op->flags = flags;
625
626         return op;
627 }
628
629 void osd_req_op_init(struct ceph_osd_request *osd_req,
630                      unsigned int which, u16 opcode, u32 flags)
631 {
632         (void)_osd_req_op_init(osd_req, which, opcode, flags);
633 }
634 EXPORT_SYMBOL(osd_req_op_init);
635
636 void osd_req_op_extent_init(struct ceph_osd_request *osd_req,
637                                 unsigned int which, u16 opcode,
638                                 u64 offset, u64 length,
639                                 u64 truncate_size, u32 truncate_seq)
640 {
641         struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
642                                                       opcode, 0);
643         size_t payload_len = 0;
644
645         BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
646                opcode != CEPH_OSD_OP_WRITEFULL && opcode != CEPH_OSD_OP_ZERO &&
647                opcode != CEPH_OSD_OP_TRUNCATE);
648
649         op->extent.offset = offset;
650         op->extent.length = length;
651         op->extent.truncate_size = truncate_size;
652         op->extent.truncate_seq = truncate_seq;
653         if (opcode == CEPH_OSD_OP_WRITE || opcode == CEPH_OSD_OP_WRITEFULL)
654                 payload_len += length;
655
656         op->indata_len = payload_len;
657 }
658 EXPORT_SYMBOL(osd_req_op_extent_init);
659
660 void osd_req_op_extent_update(struct ceph_osd_request *osd_req,
661                                 unsigned int which, u64 length)
662 {
663         struct ceph_osd_req_op *op;
664         u64 previous;
665
666         BUG_ON(which >= osd_req->r_num_ops);
667         op = &osd_req->r_ops[which];
668         previous = op->extent.length;
669
670         if (length == previous)
671                 return;         /* Nothing to do */
672         BUG_ON(length > previous);
673
674         op->extent.length = length;
675         op->indata_len -= previous - length;
676 }
677 EXPORT_SYMBOL(osd_req_op_extent_update);
678
679 void osd_req_op_extent_dup_last(struct ceph_osd_request *osd_req,
680                                 unsigned int which, u64 offset_inc)
681 {
682         struct ceph_osd_req_op *op, *prev_op;
683
684         BUG_ON(which + 1 >= osd_req->r_num_ops);
685
686         prev_op = &osd_req->r_ops[which];
687         op = _osd_req_op_init(osd_req, which + 1, prev_op->op, prev_op->flags);
688         /* dup previous one */
689         op->indata_len = prev_op->indata_len;
690         op->outdata_len = prev_op->outdata_len;
691         op->extent = prev_op->extent;
692         /* adjust offset */
693         op->extent.offset += offset_inc;
694         op->extent.length -= offset_inc;
695
696         if (op->op == CEPH_OSD_OP_WRITE || op->op == CEPH_OSD_OP_WRITEFULL)
697                 op->indata_len -= offset_inc;
698 }
699 EXPORT_SYMBOL(osd_req_op_extent_dup_last);
700
701 void osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which,
702                         u16 opcode, const char *class, const char *method)
703 {
704         struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
705                                                       opcode, 0);
706         struct ceph_pagelist *pagelist;
707         size_t payload_len = 0;
708         size_t size;
709
710         BUG_ON(opcode != CEPH_OSD_OP_CALL);
711
712         pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
713         BUG_ON(!pagelist);
714         ceph_pagelist_init(pagelist);
715
716         op->cls.class_name = class;
717         size = strlen(class);
718         BUG_ON(size > (size_t) U8_MAX);
719         op->cls.class_len = size;
720         ceph_pagelist_append(pagelist, class, size);
721         payload_len += size;
722
723         op->cls.method_name = method;
724         size = strlen(method);
725         BUG_ON(size > (size_t) U8_MAX);
726         op->cls.method_len = size;
727         ceph_pagelist_append(pagelist, method, size);
728         payload_len += size;
729
730         osd_req_op_cls_request_info_pagelist(osd_req, which, pagelist);
731
732         op->indata_len = payload_len;
733 }
734 EXPORT_SYMBOL(osd_req_op_cls_init);
735
736 int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which,
737                           u16 opcode, const char *name, const void *value,
738                           size_t size, u8 cmp_op, u8 cmp_mode)
739 {
740         struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
741                                                       opcode, 0);
742         struct ceph_pagelist *pagelist;
743         size_t payload_len;
744
745         BUG_ON(opcode != CEPH_OSD_OP_SETXATTR && opcode != CEPH_OSD_OP_CMPXATTR);
746
747         pagelist = kmalloc(sizeof(*pagelist), GFP_NOFS);
748         if (!pagelist)
749                 return -ENOMEM;
750
751         ceph_pagelist_init(pagelist);
752
753         payload_len = strlen(name);
754         op->xattr.name_len = payload_len;
755         ceph_pagelist_append(pagelist, name, payload_len);
756
757         op->xattr.value_len = size;
758         ceph_pagelist_append(pagelist, value, size);
759         payload_len += size;
760
761         op->xattr.cmp_op = cmp_op;
762         op->xattr.cmp_mode = cmp_mode;
763
764         ceph_osd_data_pagelist_init(&op->xattr.osd_data, pagelist);
765         op->indata_len = payload_len;
766         return 0;
767 }
768 EXPORT_SYMBOL(osd_req_op_xattr_init);
769
770 /*
771  * @watch_opcode: CEPH_OSD_WATCH_OP_*
772  */
773 static void osd_req_op_watch_init(struct ceph_osd_request *req, int which,
774                                   u64 cookie, u8 watch_opcode)
775 {
776         struct ceph_osd_req_op *op;
777
778         op = _osd_req_op_init(req, which, CEPH_OSD_OP_WATCH, 0);
779         op->watch.cookie = cookie;
780         op->watch.op = watch_opcode;
781         op->watch.gen = 0;
782 }
783
784 void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req,
785                                 unsigned int which,
786                                 u64 expected_object_size,
787                                 u64 expected_write_size)
788 {
789         struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
790                                                       CEPH_OSD_OP_SETALLOCHINT,
791                                                       0);
792
793         op->alloc_hint.expected_object_size = expected_object_size;
794         op->alloc_hint.expected_write_size = expected_write_size;
795
796         /*
797          * CEPH_OSD_OP_SETALLOCHINT op is advisory and therefore deemed
798          * not worth a feature bit.  Set FAILOK per-op flag to make
799          * sure older osds don't trip over an unsupported opcode.
800          */
801         op->flags |= CEPH_OSD_OP_FLAG_FAILOK;
802 }
803 EXPORT_SYMBOL(osd_req_op_alloc_hint_init);
804
805 static void ceph_osdc_msg_data_add(struct ceph_msg *msg,
806                                 struct ceph_osd_data *osd_data)
807 {
808         u64 length = ceph_osd_data_length(osd_data);
809
810         if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) {
811                 BUG_ON(length > (u64) SIZE_MAX);
812                 if (length)
813                         ceph_msg_data_add_pages(msg, osd_data->pages,
814                                         length, osd_data->alignment);
815         } else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) {
816                 BUG_ON(!length);
817                 ceph_msg_data_add_pagelist(msg, osd_data->pagelist);
818 #ifdef CONFIG_BLOCK
819         } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BIO) {
820                 ceph_msg_data_add_bio(msg, osd_data->bio, length);
821 #endif
822         } else {
823                 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_NONE);
824         }
825 }
826
827 static u32 osd_req_encode_op(struct ceph_osd_op *dst,
828                              const struct ceph_osd_req_op *src)
829 {
830         if (WARN_ON(!osd_req_opcode_valid(src->op))) {
831                 pr_err("unrecognized osd opcode %d\n", src->op);
832
833                 return 0;
834         }
835
836         switch (src->op) {
837         case CEPH_OSD_OP_STAT:
838                 break;
839         case CEPH_OSD_OP_READ:
840         case CEPH_OSD_OP_WRITE:
841         case CEPH_OSD_OP_WRITEFULL:
842         case CEPH_OSD_OP_ZERO:
843         case CEPH_OSD_OP_TRUNCATE:
844                 dst->extent.offset = cpu_to_le64(src->extent.offset);
845                 dst->extent.length = cpu_to_le64(src->extent.length);
846                 dst->extent.truncate_size =
847                         cpu_to_le64(src->extent.truncate_size);
848                 dst->extent.truncate_seq =
849                         cpu_to_le32(src->extent.truncate_seq);
850                 break;
851         case CEPH_OSD_OP_CALL:
852                 dst->cls.class_len = src->cls.class_len;
853                 dst->cls.method_len = src->cls.method_len;
854                 dst->cls.indata_len = cpu_to_le32(src->cls.indata_len);
855                 break;
856         case CEPH_OSD_OP_STARTSYNC:
857                 break;
858         case CEPH_OSD_OP_WATCH:
859                 dst->watch.cookie = cpu_to_le64(src->watch.cookie);
860                 dst->watch.ver = cpu_to_le64(0);
861                 dst->watch.op = src->watch.op;
862                 dst->watch.gen = cpu_to_le32(src->watch.gen);
863                 break;
864         case CEPH_OSD_OP_NOTIFY_ACK:
865                 break;
866         case CEPH_OSD_OP_NOTIFY:
867                 dst->notify.cookie = cpu_to_le64(src->notify.cookie);
868                 break;
869         case CEPH_OSD_OP_LIST_WATCHERS:
870                 break;
871         case CEPH_OSD_OP_SETALLOCHINT:
872                 dst->alloc_hint.expected_object_size =
873                     cpu_to_le64(src->alloc_hint.expected_object_size);
874                 dst->alloc_hint.expected_write_size =
875                     cpu_to_le64(src->alloc_hint.expected_write_size);
876                 break;
877         case CEPH_OSD_OP_SETXATTR:
878         case CEPH_OSD_OP_CMPXATTR:
879                 dst->xattr.name_len = cpu_to_le32(src->xattr.name_len);
880                 dst->xattr.value_len = cpu_to_le32(src->xattr.value_len);
881                 dst->xattr.cmp_op = src->xattr.cmp_op;
882                 dst->xattr.cmp_mode = src->xattr.cmp_mode;
883                 break;
884         case CEPH_OSD_OP_CREATE:
885         case CEPH_OSD_OP_DELETE:
886                 break;
887         default:
888                 pr_err("unsupported osd opcode %s\n",
889                         ceph_osd_op_name(src->op));
890                 WARN_ON(1);
891
892                 return 0;
893         }
894
895         dst->op = cpu_to_le16(src->op);
896         dst->flags = cpu_to_le32(src->flags);
897         dst->payload_len = cpu_to_le32(src->indata_len);
898
899         return src->indata_len;
900 }
901
902 /*
903  * build new request AND message, calculate layout, and adjust file
904  * extent as needed.
905  *
906  * if the file was recently truncated, we include information about its
907  * old and new size so that the object can be updated appropriately.  (we
908  * avoid synchronously deleting truncated objects because it's slow.)
909  *
910  * if @do_sync, include a 'startsync' command so that the osd will flush
911  * data quickly.
912  */
913 struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
914                                                struct ceph_file_layout *layout,
915                                                struct ceph_vino vino,
916                                                u64 off, u64 *plen,
917                                                unsigned int which, int num_ops,
918                                                int opcode, int flags,
919                                                struct ceph_snap_context *snapc,
920                                                u32 truncate_seq,
921                                                u64 truncate_size,
922                                                bool use_mempool)
923 {
924         struct ceph_osd_request *req;
925         u64 objnum = 0;
926         u64 objoff = 0;
927         u64 objlen = 0;
928         int r;
929
930         BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
931                opcode != CEPH_OSD_OP_ZERO && opcode != CEPH_OSD_OP_TRUNCATE &&
932                opcode != CEPH_OSD_OP_CREATE && opcode != CEPH_OSD_OP_DELETE);
933
934         req = ceph_osdc_alloc_request(osdc, snapc, num_ops, use_mempool,
935                                         GFP_NOFS);
936         if (!req) {
937                 r = -ENOMEM;
938                 goto fail;
939         }
940
941         /* calculate max write size */
942         r = calc_layout(layout, off, plen, &objnum, &objoff, &objlen);
943         if (r)
944                 goto fail;
945
946         if (opcode == CEPH_OSD_OP_CREATE || opcode == CEPH_OSD_OP_DELETE) {
947                 osd_req_op_init(req, which, opcode, 0);
948         } else {
949                 u32 object_size = layout->object_size;
950                 u32 object_base = off - objoff;
951                 if (!(truncate_seq == 1 && truncate_size == -1ULL)) {
952                         if (truncate_size <= object_base) {
953                                 truncate_size = 0;
954                         } else {
955                                 truncate_size -= object_base;
956                                 if (truncate_size > object_size)
957                                         truncate_size = object_size;
958                         }
959                 }
960                 osd_req_op_extent_init(req, which, opcode, objoff, objlen,
961                                        truncate_size, truncate_seq);
962         }
963
964         req->r_flags = flags;
965         req->r_base_oloc.pool = layout->pool_id;
966         req->r_base_oloc.pool_ns = ceph_try_get_string(layout->pool_ns);
967         ceph_oid_printf(&req->r_base_oid, "%llx.%08llx", vino.ino, objnum);
968
969         req->r_snapid = vino.snap;
970         if (flags & CEPH_OSD_FLAG_WRITE)
971                 req->r_data_offset = off;
972
973         r = ceph_osdc_alloc_messages(req, GFP_NOFS);
974         if (r)
975                 goto fail;
976
977         return req;
978
979 fail:
980         ceph_osdc_put_request(req);
981         return ERR_PTR(r);
982 }
983 EXPORT_SYMBOL(ceph_osdc_new_request);
984
985 /*
986  * We keep osd requests in an rbtree, sorted by ->r_tid.
987  */
988 DEFINE_RB_FUNCS(request, struct ceph_osd_request, r_tid, r_node)
989 DEFINE_RB_FUNCS(request_mc, struct ceph_osd_request, r_tid, r_mc_node)
990
991 static bool osd_homeless(struct ceph_osd *osd)
992 {
993         return osd->o_osd == CEPH_HOMELESS_OSD;
994 }
995
996 static bool osd_registered(struct ceph_osd *osd)
997 {
998         verify_osdc_locked(osd->o_osdc);
999
1000         return !RB_EMPTY_NODE(&osd->o_node);
1001 }
1002
1003 /*
1004  * Assumes @osd is zero-initialized.
1005  */
1006 static void osd_init(struct ceph_osd *osd)
1007 {
1008         atomic_set(&osd->o_ref, 1);
1009         RB_CLEAR_NODE(&osd->o_node);
1010         osd->o_requests = RB_ROOT;
1011         osd->o_linger_requests = RB_ROOT;
1012         INIT_LIST_HEAD(&osd->o_osd_lru);
1013         INIT_LIST_HEAD(&osd->o_keepalive_item);
1014         osd->o_incarnation = 1;
1015         mutex_init(&osd->lock);
1016 }
1017
1018 static void osd_cleanup(struct ceph_osd *osd)
1019 {
1020         WARN_ON(!RB_EMPTY_NODE(&osd->o_node));
1021         WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests));
1022         WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests));
1023         WARN_ON(!list_empty(&osd->o_osd_lru));
1024         WARN_ON(!list_empty(&osd->o_keepalive_item));
1025
1026         if (osd->o_auth.authorizer) {
1027                 WARN_ON(osd_homeless(osd));
1028                 ceph_auth_destroy_authorizer(osd->o_auth.authorizer);
1029         }
1030 }
1031
1032 /*
1033  * Track open sessions with osds.
1034  */
1035 static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum)
1036 {
1037         struct ceph_osd *osd;
1038
1039         WARN_ON(onum == CEPH_HOMELESS_OSD);
1040
1041         osd = kzalloc(sizeof(*osd), GFP_NOIO | __GFP_NOFAIL);
1042         osd_init(osd);
1043         osd->o_osdc = osdc;
1044         osd->o_osd = onum;
1045
1046         ceph_con_init(&osd->o_con, osd, &osd_con_ops, &osdc->client->msgr);
1047
1048         return osd;
1049 }
1050
1051 static struct ceph_osd *get_osd(struct ceph_osd *osd)
1052 {
1053         if (atomic_inc_not_zero(&osd->o_ref)) {
1054                 dout("get_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref)-1,
1055                      atomic_read(&osd->o_ref));
1056                 return osd;
1057         } else {
1058                 dout("get_osd %p FAIL\n", osd);
1059                 return NULL;
1060         }
1061 }
1062
1063 static void put_osd(struct ceph_osd *osd)
1064 {
1065         dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref),
1066              atomic_read(&osd->o_ref) - 1);
1067         if (atomic_dec_and_test(&osd->o_ref)) {
1068                 osd_cleanup(osd);
1069                 kfree(osd);
1070         }
1071 }
1072
1073 DEFINE_RB_FUNCS(osd, struct ceph_osd, o_osd, o_node)
1074
1075 static void __move_osd_to_lru(struct ceph_osd *osd)
1076 {
1077         struct ceph_osd_client *osdc = osd->o_osdc;
1078
1079         dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1080         BUG_ON(!list_empty(&osd->o_osd_lru));
1081
1082         spin_lock(&osdc->osd_lru_lock);
1083         list_add_tail(&osd->o_osd_lru, &osdc->osd_lru);
1084         spin_unlock(&osdc->osd_lru_lock);
1085
1086         osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl;
1087 }
1088
1089 static void maybe_move_osd_to_lru(struct ceph_osd *osd)
1090 {
1091         if (RB_EMPTY_ROOT(&osd->o_requests) &&
1092             RB_EMPTY_ROOT(&osd->o_linger_requests))
1093                 __move_osd_to_lru(osd);
1094 }
1095
1096 static void __remove_osd_from_lru(struct ceph_osd *osd)
1097 {
1098         struct ceph_osd_client *osdc = osd->o_osdc;
1099
1100         dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1101
1102         spin_lock(&osdc->osd_lru_lock);
1103         if (!list_empty(&osd->o_osd_lru))
1104                 list_del_init(&osd->o_osd_lru);
1105         spin_unlock(&osdc->osd_lru_lock);
1106 }
1107
1108 /*
1109  * Close the connection and assign any leftover requests to the
1110  * homeless session.
1111  */
1112 static void close_osd(struct ceph_osd *osd)
1113 {
1114         struct ceph_osd_client *osdc = osd->o_osdc;
1115         struct rb_node *n;
1116
1117         verify_osdc_wrlocked(osdc);
1118         dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1119
1120         ceph_con_close(&osd->o_con);
1121
1122         for (n = rb_first(&osd->o_requests); n; ) {
1123                 struct ceph_osd_request *req =
1124                     rb_entry(n, struct ceph_osd_request, r_node);
1125
1126                 n = rb_next(n); /* unlink_request() */
1127
1128                 dout(" reassigning req %p tid %llu\n", req, req->r_tid);
1129                 unlink_request(osd, req);
1130                 link_request(&osdc->homeless_osd, req);
1131         }
1132         for (n = rb_first(&osd->o_linger_requests); n; ) {
1133                 struct ceph_osd_linger_request *lreq =
1134                     rb_entry(n, struct ceph_osd_linger_request, node);
1135
1136                 n = rb_next(n); /* unlink_linger() */
1137
1138                 dout(" reassigning lreq %p linger_id %llu\n", lreq,
1139                      lreq->linger_id);
1140                 unlink_linger(osd, lreq);
1141                 link_linger(&osdc->homeless_osd, lreq);
1142         }
1143
1144         __remove_osd_from_lru(osd);
1145         erase_osd(&osdc->osds, osd);
1146         put_osd(osd);
1147 }
1148
1149 /*
1150  * reset osd connect
1151  */
1152 static int reopen_osd(struct ceph_osd *osd)
1153 {
1154         struct ceph_entity_addr *peer_addr;
1155
1156         dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1157
1158         if (RB_EMPTY_ROOT(&osd->o_requests) &&
1159             RB_EMPTY_ROOT(&osd->o_linger_requests)) {
1160                 close_osd(osd);
1161                 return -ENODEV;
1162         }
1163
1164         peer_addr = &osd->o_osdc->osdmap->osd_addr[osd->o_osd];
1165         if (!memcmp(peer_addr, &osd->o_con.peer_addr, sizeof (*peer_addr)) &&
1166                         !ceph_con_opened(&osd->o_con)) {
1167                 struct rb_node *n;
1168
1169                 dout("osd addr hasn't changed and connection never opened, "
1170                      "letting msgr retry\n");
1171                 /* touch each r_stamp for handle_timeout()'s benfit */
1172                 for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) {
1173                         struct ceph_osd_request *req =
1174                             rb_entry(n, struct ceph_osd_request, r_node);
1175                         req->r_stamp = jiffies;
1176                 }
1177
1178                 return -EAGAIN;
1179         }
1180
1181         ceph_con_close(&osd->o_con);
1182         ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd, peer_addr);
1183         osd->o_incarnation++;
1184
1185         return 0;
1186 }
1187
1188 static struct ceph_osd *lookup_create_osd(struct ceph_osd_client *osdc, int o,
1189                                           bool wrlocked)
1190 {
1191         struct ceph_osd *osd;
1192
1193         if (wrlocked)
1194                 verify_osdc_wrlocked(osdc);
1195         else
1196                 verify_osdc_locked(osdc);
1197
1198         if (o != CEPH_HOMELESS_OSD)
1199                 osd = lookup_osd(&osdc->osds, o);
1200         else
1201                 osd = &osdc->homeless_osd;
1202         if (!osd) {
1203                 if (!wrlocked)
1204                         return ERR_PTR(-EAGAIN);
1205
1206                 osd = create_osd(osdc, o);
1207                 insert_osd(&osdc->osds, osd);
1208                 ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd,
1209                               &osdc->osdmap->osd_addr[osd->o_osd]);
1210         }
1211
1212         dout("%s osdc %p osd%d -> osd %p\n", __func__, osdc, o, osd);
1213         return osd;
1214 }
1215
1216 /*
1217  * Create request <-> OSD session relation.
1218  *
1219  * @req has to be assigned a tid, @osd may be homeless.
1220  */
1221 static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req)
1222 {
1223         verify_osd_locked(osd);
1224         WARN_ON(!req->r_tid || req->r_osd);
1225         dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd,
1226              req, req->r_tid);
1227
1228         if (!osd_homeless(osd))
1229                 __remove_osd_from_lru(osd);
1230         else
1231                 atomic_inc(&osd->o_osdc->num_homeless);
1232
1233         get_osd(osd);
1234         insert_request(&osd->o_requests, req);
1235         req->r_osd = osd;
1236 }
1237
1238 static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req)
1239 {
1240         verify_osd_locked(osd);
1241         WARN_ON(req->r_osd != osd);
1242         dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd,
1243              req, req->r_tid);
1244
1245         req->r_osd = NULL;
1246         erase_request(&osd->o_requests, req);
1247         put_osd(osd);
1248
1249         if (!osd_homeless(osd))
1250                 maybe_move_osd_to_lru(osd);
1251         else
1252                 atomic_dec(&osd->o_osdc->num_homeless);
1253 }
1254
1255 static bool __pool_full(struct ceph_pg_pool_info *pi)
1256 {
1257         return pi->flags & CEPH_POOL_FLAG_FULL;
1258 }
1259
1260 static bool have_pool_full(struct ceph_osd_client *osdc)
1261 {
1262         struct rb_node *n;
1263
1264         for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) {
1265                 struct ceph_pg_pool_info *pi =
1266                     rb_entry(n, struct ceph_pg_pool_info, node);
1267
1268                 if (__pool_full(pi))
1269                         return true;
1270         }
1271
1272         return false;
1273 }
1274
1275 static bool pool_full(struct ceph_osd_client *osdc, s64 pool_id)
1276 {
1277         struct ceph_pg_pool_info *pi;
1278
1279         pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id);
1280         if (!pi)
1281                 return false;
1282
1283         return __pool_full(pi);
1284 }
1285
1286 /*
1287  * Returns whether a request should be blocked from being sent
1288  * based on the current osdmap and osd_client settings.
1289  */
1290 static bool target_should_be_paused(struct ceph_osd_client *osdc,
1291                                     const struct ceph_osd_request_target *t,
1292                                     struct ceph_pg_pool_info *pi)
1293 {
1294         bool pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
1295         bool pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
1296                        ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
1297                        __pool_full(pi);
1298
1299         WARN_ON(pi->id != t->base_oloc.pool);
1300         return (t->flags & CEPH_OSD_FLAG_READ && pauserd) ||
1301                (t->flags & CEPH_OSD_FLAG_WRITE && pausewr);
1302 }
1303
1304 enum calc_target_result {
1305         CALC_TARGET_NO_ACTION = 0,
1306         CALC_TARGET_NEED_RESEND,
1307         CALC_TARGET_POOL_DNE,
1308 };
1309
1310 static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
1311                                            struct ceph_osd_request_target *t,
1312                                            u32 *last_force_resend,
1313                                            bool any_change)
1314 {
1315         struct ceph_pg_pool_info *pi;
1316         struct ceph_pg pgid, last_pgid;
1317         struct ceph_osds up, acting;
1318         bool force_resend = false;
1319         bool need_check_tiering = false;
1320         bool need_resend = false;
1321         bool sort_bitwise = ceph_osdmap_flag(osdc, CEPH_OSDMAP_SORTBITWISE);
1322         enum calc_target_result ct_res;
1323         int ret;
1324
1325         pi = ceph_pg_pool_by_id(osdc->osdmap, t->base_oloc.pool);
1326         if (!pi) {
1327                 t->osd = CEPH_HOMELESS_OSD;
1328                 ct_res = CALC_TARGET_POOL_DNE;
1329                 goto out;
1330         }
1331
1332         if (osdc->osdmap->epoch == pi->last_force_request_resend) {
1333                 if (last_force_resend &&
1334                     *last_force_resend < pi->last_force_request_resend) {
1335                         *last_force_resend = pi->last_force_request_resend;
1336                         force_resend = true;
1337                 } else if (!last_force_resend) {
1338                         force_resend = true;
1339                 }
1340         }
1341         if (ceph_oid_empty(&t->target_oid) || force_resend) {
1342                 ceph_oid_copy(&t->target_oid, &t->base_oid);
1343                 need_check_tiering = true;
1344         }
1345         if (ceph_oloc_empty(&t->target_oloc) || force_resend) {
1346                 ceph_oloc_copy(&t->target_oloc, &t->base_oloc);
1347                 need_check_tiering = true;
1348         }
1349
1350         if (need_check_tiering &&
1351             (t->flags & CEPH_OSD_FLAG_IGNORE_OVERLAY) == 0) {
1352                 if (t->flags & CEPH_OSD_FLAG_READ && pi->read_tier >= 0)
1353                         t->target_oloc.pool = pi->read_tier;
1354                 if (t->flags & CEPH_OSD_FLAG_WRITE && pi->write_tier >= 0)
1355                         t->target_oloc.pool = pi->write_tier;
1356         }
1357
1358         ret = ceph_object_locator_to_pg(osdc->osdmap, &t->target_oid,
1359                                         &t->target_oloc, &pgid);
1360         if (ret) {
1361                 WARN_ON(ret != -ENOENT);
1362                 t->osd = CEPH_HOMELESS_OSD;
1363                 ct_res = CALC_TARGET_POOL_DNE;
1364                 goto out;
1365         }
1366         last_pgid.pool = pgid.pool;
1367         last_pgid.seed = ceph_stable_mod(pgid.seed, t->pg_num, t->pg_num_mask);
1368
1369         ceph_pg_to_up_acting_osds(osdc->osdmap, &pgid, &up, &acting);
1370         if (any_change &&
1371             ceph_is_new_interval(&t->acting,
1372                                  &acting,
1373                                  &t->up,
1374                                  &up,
1375                                  t->size,
1376                                  pi->size,
1377                                  t->min_size,
1378                                  pi->min_size,
1379                                  t->pg_num,
1380                                  pi->pg_num,
1381                                  t->sort_bitwise,
1382                                  sort_bitwise,
1383                                  &last_pgid))
1384                 force_resend = true;
1385
1386         if (t->paused && !target_should_be_paused(osdc, t, pi)) {
1387                 t->paused = false;
1388                 need_resend = true;
1389         }
1390
1391         if (ceph_pg_compare(&t->pgid, &pgid) ||
1392             ceph_osds_changed(&t->acting, &acting, any_change) ||
1393             force_resend) {
1394                 t->pgid = pgid; /* struct */
1395                 ceph_osds_copy(&t->acting, &acting);
1396                 ceph_osds_copy(&t->up, &up);
1397                 t->size = pi->size;
1398                 t->min_size = pi->min_size;
1399                 t->pg_num = pi->pg_num;
1400                 t->pg_num_mask = pi->pg_num_mask;
1401                 t->sort_bitwise = sort_bitwise;
1402
1403                 t->osd = acting.primary;
1404                 need_resend = true;
1405         }
1406
1407         ct_res = need_resend ? CALC_TARGET_NEED_RESEND : CALC_TARGET_NO_ACTION;
1408 out:
1409         dout("%s t %p -> ct_res %d osd %d\n", __func__, t, ct_res, t->osd);
1410         return ct_res;
1411 }
1412
1413 static void setup_request_data(struct ceph_osd_request *req,
1414                                struct ceph_msg *msg)
1415 {
1416         u32 data_len = 0;
1417         int i;
1418
1419         if (!list_empty(&msg->data))
1420                 return;
1421
1422         WARN_ON(msg->data_length);
1423         for (i = 0; i < req->r_num_ops; i++) {
1424                 struct ceph_osd_req_op *op = &req->r_ops[i];
1425
1426                 switch (op->op) {
1427                 /* request */
1428                 case CEPH_OSD_OP_WRITE:
1429                 case CEPH_OSD_OP_WRITEFULL:
1430                         WARN_ON(op->indata_len != op->extent.length);
1431                         ceph_osdc_msg_data_add(msg, &op->extent.osd_data);
1432                         break;
1433                 case CEPH_OSD_OP_SETXATTR:
1434                 case CEPH_OSD_OP_CMPXATTR:
1435                         WARN_ON(op->indata_len != op->xattr.name_len +
1436                                                   op->xattr.value_len);
1437                         ceph_osdc_msg_data_add(msg, &op->xattr.osd_data);
1438                         break;
1439                 case CEPH_OSD_OP_NOTIFY_ACK:
1440                         ceph_osdc_msg_data_add(msg,
1441                                                &op->notify_ack.request_data);
1442                         break;
1443
1444                 /* reply */
1445                 case CEPH_OSD_OP_STAT:
1446                         ceph_osdc_msg_data_add(req->r_reply,
1447                                                &op->raw_data_in);
1448                         break;
1449                 case CEPH_OSD_OP_READ:
1450                         ceph_osdc_msg_data_add(req->r_reply,
1451                                                &op->extent.osd_data);
1452                         break;
1453                 case CEPH_OSD_OP_LIST_WATCHERS:
1454                         ceph_osdc_msg_data_add(req->r_reply,
1455                                                &op->list_watchers.response_data);
1456                         break;
1457
1458                 /* both */
1459                 case CEPH_OSD_OP_CALL:
1460                         WARN_ON(op->indata_len != op->cls.class_len +
1461                                                   op->cls.method_len +
1462                                                   op->cls.indata_len);
1463                         ceph_osdc_msg_data_add(msg, &op->cls.request_info);
1464                         /* optional, can be NONE */
1465                         ceph_osdc_msg_data_add(msg, &op->cls.request_data);
1466                         /* optional, can be NONE */
1467                         ceph_osdc_msg_data_add(req->r_reply,
1468                                                &op->cls.response_data);
1469                         break;
1470                 case CEPH_OSD_OP_NOTIFY:
1471                         ceph_osdc_msg_data_add(msg,
1472                                                &op->notify.request_data);
1473                         ceph_osdc_msg_data_add(req->r_reply,
1474                                                &op->notify.response_data);
1475                         break;
1476                 }
1477
1478                 data_len += op->indata_len;
1479         }
1480
1481         WARN_ON(data_len != msg->data_length);
1482 }
1483
1484 static void encode_request(struct ceph_osd_request *req, struct ceph_msg *msg)
1485 {
1486         void *p = msg->front.iov_base;
1487         void *const end = p + msg->front_alloc_len;
1488         u32 data_len = 0;
1489         int i;
1490
1491         if (req->r_flags & CEPH_OSD_FLAG_WRITE) {
1492                 /* snapshots aren't writeable */
1493                 WARN_ON(req->r_snapid != CEPH_NOSNAP);
1494         } else {
1495                 WARN_ON(req->r_mtime.tv_sec || req->r_mtime.tv_nsec ||
1496                         req->r_data_offset || req->r_snapc);
1497         }
1498
1499         setup_request_data(req, msg);
1500
1501         ceph_encode_32(&p, 1); /* client_inc, always 1 */
1502         ceph_encode_32(&p, req->r_osdc->osdmap->epoch);
1503         ceph_encode_32(&p, req->r_flags);
1504         ceph_encode_timespec(p, &req->r_mtime);
1505         p += sizeof(struct ceph_timespec);
1506         /* aka reassert_version */
1507         memcpy(p, &req->r_replay_version, sizeof(req->r_replay_version));
1508         p += sizeof(req->r_replay_version);
1509
1510         /* oloc */
1511         ceph_start_encoding(&p, 5, 4,
1512                             ceph_oloc_encoding_size(&req->r_t.target_oloc));
1513         ceph_encode_64(&p, req->r_t.target_oloc.pool);
1514         ceph_encode_32(&p, -1); /* preferred */
1515         ceph_encode_32(&p, 0); /* key len */
1516         if (req->r_t.target_oloc.pool_ns)
1517                 ceph_encode_string(&p, end, req->r_t.target_oloc.pool_ns->str,
1518                                    req->r_t.target_oloc.pool_ns->len);
1519         else
1520                 ceph_encode_32(&p, 0);
1521
1522         /* pgid */
1523         ceph_encode_8(&p, 1);
1524         ceph_encode_64(&p, req->r_t.pgid.pool);
1525         ceph_encode_32(&p, req->r_t.pgid.seed);
1526         ceph_encode_32(&p, -1); /* preferred */
1527
1528         /* oid */
1529         ceph_encode_32(&p, req->r_t.target_oid.name_len);
1530         memcpy(p, req->r_t.target_oid.name, req->r_t.target_oid.name_len);
1531         p += req->r_t.target_oid.name_len;
1532
1533         /* ops, can imply data */
1534         ceph_encode_16(&p, req->r_num_ops);
1535         for (i = 0; i < req->r_num_ops; i++) {
1536                 data_len += osd_req_encode_op(p, &req->r_ops[i]);
1537                 p += sizeof(struct ceph_osd_op);
1538         }
1539
1540         ceph_encode_64(&p, req->r_snapid); /* snapid */
1541         if (req->r_snapc) {
1542                 ceph_encode_64(&p, req->r_snapc->seq);
1543                 ceph_encode_32(&p, req->r_snapc->num_snaps);
1544                 for (i = 0; i < req->r_snapc->num_snaps; i++)
1545                         ceph_encode_64(&p, req->r_snapc->snaps[i]);
1546         } else {
1547                 ceph_encode_64(&p, 0); /* snap_seq */
1548                 ceph_encode_32(&p, 0); /* snaps len */
1549         }
1550
1551         ceph_encode_32(&p, req->r_attempts); /* retry_attempt */
1552
1553         BUG_ON(p > end);
1554         msg->front.iov_len = p - msg->front.iov_base;
1555         msg->hdr.version = cpu_to_le16(4); /* MOSDOp v4 */
1556         msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
1557         msg->hdr.data_len = cpu_to_le32(data_len);
1558         /*
1559          * The header "data_off" is a hint to the receiver allowing it
1560          * to align received data into its buffers such that there's no
1561          * need to re-copy it before writing it to disk (direct I/O).
1562          */
1563         msg->hdr.data_off = cpu_to_le16(req->r_data_offset);
1564
1565         dout("%s req %p oid %s oid_len %d front %zu data %u\n", __func__,
1566              req, req->r_t.target_oid.name, req->r_t.target_oid.name_len,
1567              msg->front.iov_len, data_len);
1568 }
1569
1570 /*
1571  * @req has to be assigned a tid and registered.
1572  */
1573 static void send_request(struct ceph_osd_request *req)
1574 {
1575         struct ceph_osd *osd = req->r_osd;
1576
1577         verify_osd_locked(osd);
1578         WARN_ON(osd->o_osd != req->r_t.osd);
1579
1580         /*
1581          * We may have a previously queued request message hanging
1582          * around.  Cancel it to avoid corrupting the msgr.
1583          */
1584         if (req->r_sent)
1585                 ceph_msg_revoke(req->r_request);
1586
1587         req->r_flags |= CEPH_OSD_FLAG_KNOWN_REDIR;
1588         if (req->r_attempts)
1589                 req->r_flags |= CEPH_OSD_FLAG_RETRY;
1590         else
1591                 WARN_ON(req->r_flags & CEPH_OSD_FLAG_RETRY);
1592
1593         encode_request(req, req->r_request);
1594
1595         dout("%s req %p tid %llu to pg %llu.%x osd%d flags 0x%x attempt %d\n",
1596              __func__, req, req->r_tid, req->r_t.pgid.pool, req->r_t.pgid.seed,
1597              req->r_t.osd, req->r_flags, req->r_attempts);
1598
1599         req->r_t.paused = false;
1600         req->r_stamp = jiffies;
1601         req->r_attempts++;
1602
1603         req->r_sent = osd->o_incarnation;
1604         req->r_request->hdr.tid = cpu_to_le64(req->r_tid);
1605         ceph_con_send(&osd->o_con, ceph_msg_get(req->r_request));
1606 }
1607
1608 static void maybe_request_map(struct ceph_osd_client *osdc)
1609 {
1610         bool continuous = false;
1611
1612         verify_osdc_locked(osdc);
1613         WARN_ON(!osdc->osdmap->epoch);
1614
1615         if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
1616             ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD) ||
1617             ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) {
1618                 dout("%s osdc %p continuous\n", __func__, osdc);
1619                 continuous = true;
1620         } else {
1621                 dout("%s osdc %p onetime\n", __func__, osdc);
1622         }
1623
1624         if (ceph_monc_want_map(&osdc->client->monc, CEPH_SUB_OSDMAP,
1625                                osdc->osdmap->epoch + 1, continuous))
1626                 ceph_monc_renew_subs(&osdc->client->monc);
1627 }
1628
1629 static void send_map_check(struct ceph_osd_request *req);
1630
1631 static void __submit_request(struct ceph_osd_request *req, bool wrlocked)
1632 {
1633         struct ceph_osd_client *osdc = req->r_osdc;
1634         struct ceph_osd *osd;
1635         enum calc_target_result ct_res;
1636         bool need_send = false;
1637         bool promoted = false;
1638
1639         WARN_ON(req->r_tid || req->r_got_reply);
1640         dout("%s req %p wrlocked %d\n", __func__, req, wrlocked);
1641
1642 again:
1643         ct_res = calc_target(osdc, &req->r_t, &req->r_last_force_resend, false);
1644         if (ct_res == CALC_TARGET_POOL_DNE && !wrlocked)
1645                 goto promote;
1646
1647         osd = lookup_create_osd(osdc, req->r_t.osd, wrlocked);
1648         if (IS_ERR(osd)) {
1649                 WARN_ON(PTR_ERR(osd) != -EAGAIN || wrlocked);
1650                 goto promote;
1651         }
1652
1653         if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
1654             ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) {
1655                 dout("req %p pausewr\n", req);
1656                 req->r_t.paused = true;
1657                 maybe_request_map(osdc);
1658         } else if ((req->r_flags & CEPH_OSD_FLAG_READ) &&
1659                    ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) {
1660                 dout("req %p pauserd\n", req);
1661                 req->r_t.paused = true;
1662                 maybe_request_map(osdc);
1663         } else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
1664                    !(req->r_flags & (CEPH_OSD_FLAG_FULL_TRY |
1665                                      CEPH_OSD_FLAG_FULL_FORCE)) &&
1666                    (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
1667                     pool_full(osdc, req->r_t.base_oloc.pool))) {
1668                 dout("req %p full/pool_full\n", req);
1669                 pr_warn_ratelimited("FULL or reached pool quota\n");
1670                 req->r_t.paused = true;
1671                 maybe_request_map(osdc);
1672         } else if (!osd_homeless(osd)) {
1673                 need_send = true;
1674         } else {
1675                 maybe_request_map(osdc);
1676         }
1677
1678         mutex_lock(&osd->lock);
1679         /*
1680          * Assign the tid atomically with send_request() to protect
1681          * multiple writes to the same object from racing with each
1682          * other, resulting in out of order ops on the OSDs.
1683          */
1684         req->r_tid = atomic64_inc_return(&osdc->last_tid);
1685         link_request(osd, req);
1686         if (need_send)
1687                 send_request(req);
1688         mutex_unlock(&osd->lock);
1689
1690         if (ct_res == CALC_TARGET_POOL_DNE)
1691                 send_map_check(req);
1692
1693         if (promoted)
1694                 downgrade_write(&osdc->lock);
1695         return;
1696
1697 promote:
1698         up_read(&osdc->lock);
1699         down_write(&osdc->lock);
1700         wrlocked = true;
1701         promoted = true;
1702         goto again;
1703 }
1704
1705 static void account_request(struct ceph_osd_request *req)
1706 {
1707         unsigned int mask = CEPH_OSD_FLAG_ACK | CEPH_OSD_FLAG_ONDISK;
1708
1709         if (req->r_flags & CEPH_OSD_FLAG_READ) {
1710                 WARN_ON(req->r_flags & mask);
1711                 req->r_flags |= CEPH_OSD_FLAG_ACK;
1712         } else if (req->r_flags & CEPH_OSD_FLAG_WRITE)
1713                 WARN_ON(!(req->r_flags & mask));
1714         else
1715                 WARN_ON(1);
1716
1717         WARN_ON(req->r_unsafe_callback && (req->r_flags & mask) != mask);
1718         atomic_inc(&req->r_osdc->num_requests);
1719 }
1720
1721 static void submit_request(struct ceph_osd_request *req, bool wrlocked)
1722 {
1723         ceph_osdc_get_request(req);
1724         account_request(req);
1725         __submit_request(req, wrlocked);
1726 }
1727
1728 static void __finish_request(struct ceph_osd_request *req)
1729 {
1730         struct ceph_osd_client *osdc = req->r_osdc;
1731         struct ceph_osd *osd = req->r_osd;
1732
1733         verify_osd_locked(osd);
1734         dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
1735
1736         WARN_ON(lookup_request_mc(&osdc->map_checks, req->r_tid));
1737         unlink_request(osd, req);
1738         atomic_dec(&osdc->num_requests);
1739
1740         /*
1741          * If an OSD has failed or returned and a request has been sent
1742          * twice, it's possible to get a reply and end up here while the
1743          * request message is queued for delivery.  We will ignore the
1744          * reply, so not a big deal, but better to try and catch it.
1745          */
1746         ceph_msg_revoke(req->r_request);
1747         ceph_msg_revoke_incoming(req->r_reply);
1748 }
1749
1750 static void finish_request(struct ceph_osd_request *req)
1751 {
1752         __finish_request(req);
1753         ceph_osdc_put_request(req);
1754 }
1755
1756 static void __complete_request(struct ceph_osd_request *req)
1757 {
1758         if (req->r_callback)
1759                 req->r_callback(req);
1760         else
1761                 complete_all(&req->r_completion);
1762 }
1763
1764 /*
1765  * Note that this is open-coded in handle_reply(), which has to deal
1766  * with ack vs commit, dup acks, etc.
1767  */
1768 static void complete_request(struct ceph_osd_request *req, int err)
1769 {
1770         dout("%s req %p tid %llu err %d\n", __func__, req, req->r_tid, err);
1771
1772         req->r_result = err;
1773         __finish_request(req);
1774         __complete_request(req);
1775         complete_all(&req->r_safe_completion);
1776         ceph_osdc_put_request(req);
1777 }
1778
1779 static void cancel_map_check(struct ceph_osd_request *req)
1780 {
1781         struct ceph_osd_client *osdc = req->r_osdc;
1782         struct ceph_osd_request *lookup_req;
1783
1784         verify_osdc_wrlocked(osdc);
1785
1786         lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid);
1787         if (!lookup_req)
1788                 return;
1789
1790         WARN_ON(lookup_req != req);
1791         erase_request_mc(&osdc->map_checks, req);
1792         ceph_osdc_put_request(req);
1793 }
1794
1795 static void cancel_request(struct ceph_osd_request *req)
1796 {
1797         dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
1798
1799         cancel_map_check(req);
1800         finish_request(req);
1801 }
1802
1803 static void check_pool_dne(struct ceph_osd_request *req)
1804 {
1805         struct ceph_osd_client *osdc = req->r_osdc;
1806         struct ceph_osdmap *map = osdc->osdmap;
1807
1808         verify_osdc_wrlocked(osdc);
1809         WARN_ON(!map->epoch);
1810
1811         if (req->r_attempts) {
1812                 /*
1813                  * We sent a request earlier, which means that
1814                  * previously the pool existed, and now it does not
1815                  * (i.e., it was deleted).
1816                  */
1817                 req->r_map_dne_bound = map->epoch;
1818                 dout("%s req %p tid %llu pool disappeared\n", __func__, req,
1819                      req->r_tid);
1820         } else {
1821                 dout("%s req %p tid %llu map_dne_bound %u have %u\n", __func__,
1822                      req, req->r_tid, req->r_map_dne_bound, map->epoch);
1823         }
1824
1825         if (req->r_map_dne_bound) {
1826                 if (map->epoch >= req->r_map_dne_bound) {
1827                         /* we had a new enough map */
1828                         pr_info_ratelimited("tid %llu pool does not exist\n",
1829                                             req->r_tid);
1830                         complete_request(req, -ENOENT);
1831                 }
1832         } else {
1833                 send_map_check(req);
1834         }
1835 }
1836
1837 static void map_check_cb(struct ceph_mon_generic_request *greq)
1838 {
1839         struct ceph_osd_client *osdc = &greq->monc->client->osdc;
1840         struct ceph_osd_request *req;
1841         u64 tid = greq->private_data;
1842
1843         WARN_ON(greq->result || !greq->u.newest);
1844
1845         down_write(&osdc->lock);
1846         req = lookup_request_mc(&osdc->map_checks, tid);
1847         if (!req) {
1848                 dout("%s tid %llu dne\n", __func__, tid);
1849                 goto out_unlock;
1850         }
1851
1852         dout("%s req %p tid %llu map_dne_bound %u newest %llu\n", __func__,
1853              req, req->r_tid, req->r_map_dne_bound, greq->u.newest);
1854         if (!req->r_map_dne_bound)
1855                 req->r_map_dne_bound = greq->u.newest;
1856         erase_request_mc(&osdc->map_checks, req);
1857         check_pool_dne(req);
1858
1859         ceph_osdc_put_request(req);
1860 out_unlock:
1861         up_write(&osdc->lock);
1862 }
1863
1864 static void send_map_check(struct ceph_osd_request *req)
1865 {
1866         struct ceph_osd_client *osdc = req->r_osdc;
1867         struct ceph_osd_request *lookup_req;
1868         int ret;
1869
1870         verify_osdc_wrlocked(osdc);
1871
1872         lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid);
1873         if (lookup_req) {
1874                 WARN_ON(lookup_req != req);
1875                 return;
1876         }
1877
1878         ceph_osdc_get_request(req);
1879         insert_request_mc(&osdc->map_checks, req);
1880         ret = ceph_monc_get_version_async(&osdc->client->monc, "osdmap",
1881                                           map_check_cb, req->r_tid);
1882         WARN_ON(ret);
1883 }
1884
1885 /*
1886  * lingering requests, watch/notify v2 infrastructure
1887  */
1888 static void linger_release(struct kref *kref)
1889 {
1890         struct ceph_osd_linger_request *lreq =
1891             container_of(kref, struct ceph_osd_linger_request, kref);
1892
1893         dout("%s lreq %p reg_req %p ping_req %p\n", __func__, lreq,
1894              lreq->reg_req, lreq->ping_req);
1895         WARN_ON(!RB_EMPTY_NODE(&lreq->node));
1896         WARN_ON(!RB_EMPTY_NODE(&lreq->osdc_node));
1897         WARN_ON(!RB_EMPTY_NODE(&lreq->mc_node));
1898         WARN_ON(!list_empty(&lreq->scan_item));
1899         WARN_ON(!list_empty(&lreq->pending_lworks));
1900         WARN_ON(lreq->osd);
1901
1902         if (lreq->reg_req)
1903                 ceph_osdc_put_request(lreq->reg_req);
1904         if (lreq->ping_req)
1905                 ceph_osdc_put_request(lreq->ping_req);
1906         target_destroy(&lreq->t);
1907         kfree(lreq);
1908 }
1909
1910 static void linger_put(struct ceph_osd_linger_request *lreq)
1911 {
1912         if (lreq)
1913                 kref_put(&lreq->kref, linger_release);
1914 }
1915
1916 static struct ceph_osd_linger_request *
1917 linger_get(struct ceph_osd_linger_request *lreq)
1918 {
1919         kref_get(&lreq->kref);
1920         return lreq;
1921 }
1922
1923 static struct ceph_osd_linger_request *
1924 linger_alloc(struct ceph_osd_client *osdc)
1925 {
1926         struct ceph_osd_linger_request *lreq;
1927
1928         lreq = kzalloc(sizeof(*lreq), GFP_NOIO);
1929         if (!lreq)
1930                 return NULL;
1931
1932         kref_init(&lreq->kref);
1933         mutex_init(&lreq->lock);
1934         RB_CLEAR_NODE(&lreq->node);
1935         RB_CLEAR_NODE(&lreq->osdc_node);
1936         RB_CLEAR_NODE(&lreq->mc_node);
1937         INIT_LIST_HEAD(&lreq->scan_item);
1938         INIT_LIST_HEAD(&lreq->pending_lworks);
1939         init_completion(&lreq->reg_commit_wait);
1940         init_completion(&lreq->notify_finish_wait);
1941
1942         lreq->osdc = osdc;
1943         target_init(&lreq->t);
1944
1945         dout("%s lreq %p\n", __func__, lreq);
1946         return lreq;
1947 }
1948
1949 DEFINE_RB_INSDEL_FUNCS(linger, struct ceph_osd_linger_request, linger_id, node)
1950 DEFINE_RB_FUNCS(linger_osdc, struct ceph_osd_linger_request, linger_id, osdc_node)
1951 DEFINE_RB_FUNCS(linger_mc, struct ceph_osd_linger_request, linger_id, mc_node)
1952
1953 /*
1954  * Create linger request <-> OSD session relation.
1955  *
1956  * @lreq has to be registered, @osd may be homeless.
1957  */
1958 static void link_linger(struct ceph_osd *osd,
1959                         struct ceph_osd_linger_request *lreq)
1960 {
1961         verify_osd_locked(osd);
1962         WARN_ON(!lreq->linger_id || lreq->osd);
1963         dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd,
1964              osd->o_osd, lreq, lreq->linger_id);
1965
1966         if (!osd_homeless(osd))
1967                 __remove_osd_from_lru(osd);
1968         else
1969                 atomic_inc(&osd->o_osdc->num_homeless);
1970
1971         get_osd(osd);
1972         insert_linger(&osd->o_linger_requests, lreq);
1973         lreq->osd = osd;
1974 }
1975
1976 static void unlink_linger(struct ceph_osd *osd,
1977                           struct ceph_osd_linger_request *lreq)
1978 {
1979         verify_osd_locked(osd);
1980         WARN_ON(lreq->osd != osd);
1981         dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd,
1982              osd->o_osd, lreq, lreq->linger_id);
1983
1984         lreq->osd = NULL;
1985         erase_linger(&osd->o_linger_requests, lreq);
1986         put_osd(osd);
1987
1988         if (!osd_homeless(osd))
1989                 maybe_move_osd_to_lru(osd);
1990         else
1991                 atomic_dec(&osd->o_osdc->num_homeless);
1992 }
1993
1994 static bool __linger_registered(struct ceph_osd_linger_request *lreq)
1995 {
1996         verify_osdc_locked(lreq->osdc);
1997
1998         return !RB_EMPTY_NODE(&lreq->osdc_node);
1999 }
2000
2001 static bool linger_registered(struct ceph_osd_linger_request *lreq)
2002 {
2003         struct ceph_osd_client *osdc = lreq->osdc;
2004         bool registered;
2005
2006         down_read(&osdc->lock);
2007         registered = __linger_registered(lreq);
2008         up_read(&osdc->lock);
2009
2010         return registered;
2011 }
2012
2013 static void linger_register(struct ceph_osd_linger_request *lreq)
2014 {
2015         struct ceph_osd_client *osdc = lreq->osdc;
2016
2017         verify_osdc_wrlocked(osdc);
2018         WARN_ON(lreq->linger_id);
2019
2020         linger_get(lreq);
2021         lreq->linger_id = ++osdc->last_linger_id;
2022         insert_linger_osdc(&osdc->linger_requests, lreq);
2023 }
2024
2025 static void linger_unregister(struct ceph_osd_linger_request *lreq)
2026 {
2027         struct ceph_osd_client *osdc = lreq->osdc;
2028
2029         verify_osdc_wrlocked(osdc);
2030
2031         erase_linger_osdc(&osdc->linger_requests, lreq);
2032         linger_put(lreq);
2033 }
2034
2035 static void cancel_linger_request(struct ceph_osd_request *req)
2036 {
2037         struct ceph_osd_linger_request *lreq = req->r_priv;
2038
2039         WARN_ON(!req->r_linger);
2040         cancel_request(req);
2041         linger_put(lreq);
2042 }
2043
2044 struct linger_work {
2045         struct work_struct work;
2046         struct ceph_osd_linger_request *lreq;
2047         struct list_head pending_item;
2048         unsigned long queued_stamp;
2049
2050         union {
2051                 struct {
2052                         u64 notify_id;
2053                         u64 notifier_id;
2054                         void *payload; /* points into @msg front */
2055                         size_t payload_len;
2056
2057                         struct ceph_msg *msg; /* for ceph_msg_put() */
2058                 } notify;
2059                 struct {
2060                         int err;
2061                 } error;
2062         };
2063 };
2064
2065 static struct linger_work *lwork_alloc(struct ceph_osd_linger_request *lreq,
2066                                        work_func_t workfn)
2067 {
2068         struct linger_work *lwork;
2069
2070         lwork = kzalloc(sizeof(*lwork), GFP_NOIO);
2071         if (!lwork)
2072                 return NULL;
2073
2074         INIT_WORK(&lwork->work, workfn);
2075         INIT_LIST_HEAD(&lwork->pending_item);
2076         lwork->lreq = linger_get(lreq);
2077
2078         return lwork;
2079 }
2080
2081 static void lwork_free(struct linger_work *lwork)
2082 {
2083         struct ceph_osd_linger_request *lreq = lwork->lreq;
2084
2085         mutex_lock(&lreq->lock);
2086         list_del(&lwork->pending_item);
2087         mutex_unlock(&lreq->lock);
2088
2089         linger_put(lreq);
2090         kfree(lwork);
2091 }
2092
2093 static void lwork_queue(struct linger_work *lwork)
2094 {
2095         struct ceph_osd_linger_request *lreq = lwork->lreq;
2096         struct ceph_osd_client *osdc = lreq->osdc;
2097
2098         verify_lreq_locked(lreq);
2099         WARN_ON(!list_empty(&lwork->pending_item));
2100
2101         lwork->queued_stamp = jiffies;
2102         list_add_tail(&lwork->pending_item, &lreq->pending_lworks);
2103         queue_work(osdc->notify_wq, &lwork->work);
2104 }
2105
2106 static void do_watch_notify(struct work_struct *w)
2107 {
2108         struct linger_work *lwork = container_of(w, struct linger_work, work);
2109         struct ceph_osd_linger_request *lreq = lwork->lreq;
2110
2111         if (!linger_registered(lreq)) {
2112                 dout("%s lreq %p not registered\n", __func__, lreq);
2113                 goto out;
2114         }
2115
2116         WARN_ON(!lreq->is_watch);
2117         dout("%s lreq %p notify_id %llu notifier_id %llu payload_len %zu\n",
2118              __func__, lreq, lwork->notify.notify_id, lwork->notify.notifier_id,
2119              lwork->notify.payload_len);
2120         lreq->wcb(lreq->data, lwork->notify.notify_id, lreq->linger_id,
2121                   lwork->notify.notifier_id, lwork->notify.payload,
2122                   lwork->notify.payload_len);
2123
2124 out:
2125         ceph_msg_put(lwork->notify.msg);
2126         lwork_free(lwork);
2127 }
2128
2129 static void do_watch_error(struct work_struct *w)
2130 {
2131         struct linger_work *lwork = container_of(w, struct linger_work, work);
2132         struct ceph_osd_linger_request *lreq = lwork->lreq;
2133
2134         if (!linger_registered(lreq)) {
2135                 dout("%s lreq %p not registered\n", __func__, lreq);
2136                 goto out;
2137         }
2138
2139         dout("%s lreq %p err %d\n", __func__, lreq, lwork->error.err);
2140         lreq->errcb(lreq->data, lreq->linger_id, lwork->error.err);
2141
2142 out:
2143         lwork_free(lwork);
2144 }
2145
2146 static void queue_watch_error(struct ceph_osd_linger_request *lreq)
2147 {
2148         struct linger_work *lwork;
2149
2150         lwork = lwork_alloc(lreq, do_watch_error);
2151         if (!lwork) {
2152                 pr_err("failed to allocate error-lwork\n");
2153                 return;
2154         }
2155
2156         lwork->error.err = lreq->last_error;
2157         lwork_queue(lwork);
2158 }
2159
2160 static void linger_reg_commit_complete(struct ceph_osd_linger_request *lreq,
2161                                        int result)
2162 {
2163         if (!completion_done(&lreq->reg_commit_wait)) {
2164                 lreq->reg_commit_error = (result <= 0 ? result : 0);
2165                 complete_all(&lreq->reg_commit_wait);
2166         }
2167 }
2168
2169 static void linger_commit_cb(struct ceph_osd_request *req)
2170 {
2171         struct ceph_osd_linger_request *lreq = req->r_priv;
2172
2173         mutex_lock(&lreq->lock);
2174         dout("%s lreq %p linger_id %llu result %d\n", __func__, lreq,
2175              lreq->linger_id, req->r_result);
2176         WARN_ON(!__linger_registered(lreq));
2177         linger_reg_commit_complete(lreq, req->r_result);
2178         lreq->committed = true;
2179
2180         if (!lreq->is_watch) {
2181                 struct ceph_osd_data *osd_data =
2182                     osd_req_op_data(req, 0, notify, response_data);
2183                 void *p = page_address(osd_data->pages[0]);
2184
2185                 WARN_ON(req->r_ops[0].op != CEPH_OSD_OP_NOTIFY ||
2186                         osd_data->type != CEPH_OSD_DATA_TYPE_PAGES);
2187
2188                 /* make note of the notify_id */
2189                 if (req->r_ops[0].outdata_len >= sizeof(u64)) {
2190                         lreq->notify_id = ceph_decode_64(&p);
2191                         dout("lreq %p notify_id %llu\n", lreq,
2192                              lreq->notify_id);
2193                 } else {
2194                         dout("lreq %p no notify_id\n", lreq);
2195                 }
2196         }
2197
2198         mutex_unlock(&lreq->lock);
2199         linger_put(lreq);
2200 }
2201
2202 static int normalize_watch_error(int err)
2203 {
2204         /*
2205          * Translate ENOENT -> ENOTCONN so that a delete->disconnection
2206          * notification and a failure to reconnect because we raced with
2207          * the delete appear the same to the user.
2208          */
2209         if (err == -ENOENT)
2210                 err = -ENOTCONN;
2211
2212         return err;
2213 }
2214
2215 static void linger_reconnect_cb(struct ceph_osd_request *req)
2216 {
2217         struct ceph_osd_linger_request *lreq = req->r_priv;
2218
2219         mutex_lock(&lreq->lock);
2220         dout("%s lreq %p linger_id %llu result %d last_error %d\n", __func__,
2221              lreq, lreq->linger_id, req->r_result, lreq->last_error);
2222         if (req->r_result < 0) {
2223                 if (!lreq->last_error) {
2224                         lreq->last_error = normalize_watch_error(req->r_result);
2225                         queue_watch_error(lreq);
2226                 }
2227         }
2228
2229         mutex_unlock(&lreq->lock);
2230         linger_put(lreq);
2231 }
2232
2233 static void send_linger(struct ceph_osd_linger_request *lreq)
2234 {
2235         struct ceph_osd_request *req = lreq->reg_req;
2236         struct ceph_osd_req_op *op = &req->r_ops[0];
2237
2238         verify_osdc_wrlocked(req->r_osdc);
2239         dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
2240
2241         if (req->r_osd)
2242                 cancel_linger_request(req);
2243
2244         request_reinit(req);
2245         ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
2246         ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
2247         req->r_flags = lreq->t.flags;
2248         req->r_mtime = lreq->mtime;
2249
2250         mutex_lock(&lreq->lock);
2251         if (lreq->is_watch && lreq->committed) {
2252                 WARN_ON(op->op != CEPH_OSD_OP_WATCH ||
2253                         op->watch.cookie != lreq->linger_id);
2254                 op->watch.op = CEPH_OSD_WATCH_OP_RECONNECT;
2255                 op->watch.gen = ++lreq->register_gen;
2256                 dout("lreq %p reconnect register_gen %u\n", lreq,
2257                      op->watch.gen);
2258                 req->r_callback = linger_reconnect_cb;
2259         } else {
2260                 if (!lreq->is_watch)
2261                         lreq->notify_id = 0;
2262                 else
2263                         WARN_ON(op->watch.op != CEPH_OSD_WATCH_OP_WATCH);
2264                 dout("lreq %p register\n", lreq);
2265                 req->r_callback = linger_commit_cb;
2266         }
2267         mutex_unlock(&lreq->lock);
2268
2269         req->r_priv = linger_get(lreq);
2270         req->r_linger = true;
2271
2272         submit_request(req, true);
2273 }
2274
2275 static void linger_ping_cb(struct ceph_osd_request *req)
2276 {
2277         struct ceph_osd_linger_request *lreq = req->r_priv;
2278
2279         mutex_lock(&lreq->lock);
2280         dout("%s lreq %p linger_id %llu result %d ping_sent %lu last_error %d\n",
2281              __func__, lreq, lreq->linger_id, req->r_result, lreq->ping_sent,
2282              lreq->last_error);
2283         if (lreq->register_gen == req->r_ops[0].watch.gen) {
2284                 if (!req->r_result) {
2285                         lreq->watch_valid_thru = lreq->ping_sent;
2286                 } else if (!lreq->last_error) {
2287                         lreq->last_error = normalize_watch_error(req->r_result);
2288                         queue_watch_error(lreq);
2289                 }
2290         } else {
2291                 dout("lreq %p register_gen %u ignoring old pong %u\n", lreq,
2292                      lreq->register_gen, req->r_ops[0].watch.gen);
2293         }
2294
2295         mutex_unlock(&lreq->lock);
2296         linger_put(lreq);
2297 }
2298
2299 static void send_linger_ping(struct ceph_osd_linger_request *lreq)
2300 {
2301         struct ceph_osd_client *osdc = lreq->osdc;
2302         struct ceph_osd_request *req = lreq->ping_req;
2303         struct ceph_osd_req_op *op = &req->r_ops[0];
2304
2305         if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) {
2306                 dout("%s PAUSERD\n", __func__);
2307                 return;
2308         }
2309
2310         lreq->ping_sent = jiffies;
2311         dout("%s lreq %p linger_id %llu ping_sent %lu register_gen %u\n",
2312              __func__, lreq, lreq->linger_id, lreq->ping_sent,
2313              lreq->register_gen);
2314
2315         if (req->r_osd)
2316                 cancel_linger_request(req);
2317
2318         request_reinit(req);
2319         target_copy(&req->r_t, &lreq->t);
2320
2321         WARN_ON(op->op != CEPH_OSD_OP_WATCH ||
2322                 op->watch.cookie != lreq->linger_id ||
2323                 op->watch.op != CEPH_OSD_WATCH_OP_PING);
2324         op->watch.gen = lreq->register_gen;
2325         req->r_callback = linger_ping_cb;
2326         req->r_priv = linger_get(lreq);
2327         req->r_linger = true;
2328
2329         ceph_osdc_get_request(req);
2330         account_request(req);
2331         req->r_tid = atomic64_inc_return(&osdc->last_tid);
2332         link_request(lreq->osd, req);
2333         send_request(req);
2334 }
2335
2336 static void linger_submit(struct ceph_osd_linger_request *lreq)
2337 {
2338         struct ceph_osd_client *osdc = lreq->osdc;
2339         struct ceph_osd *osd;
2340
2341         calc_target(osdc, &lreq->t, &lreq->last_force_resend, false);
2342         osd = lookup_create_osd(osdc, lreq->t.osd, true);
2343         link_linger(osd, lreq);
2344
2345         send_linger(lreq);
2346 }
2347
2348 static void cancel_linger_map_check(struct ceph_osd_linger_request *lreq)
2349 {
2350         struct ceph_osd_client *osdc = lreq->osdc;
2351         struct ceph_osd_linger_request *lookup_lreq;
2352
2353         verify_osdc_wrlocked(osdc);
2354
2355         lookup_lreq = lookup_linger_mc(&osdc->linger_map_checks,
2356                                        lreq->linger_id);
2357         if (!lookup_lreq)
2358                 return;
2359
2360         WARN_ON(lookup_lreq != lreq);
2361         erase_linger_mc(&osdc->linger_map_checks, lreq);
2362         linger_put(lreq);
2363 }
2364
2365 /*
2366  * @lreq has to be both registered and linked.
2367  */
2368 static void __linger_cancel(struct ceph_osd_linger_request *lreq)
2369 {
2370         if (lreq->is_watch && lreq->ping_req->r_osd)
2371                 cancel_linger_request(lreq->ping_req);
2372         if (lreq->reg_req->r_osd)
2373                 cancel_linger_request(lreq->reg_req);
2374         cancel_linger_map_check(lreq);
2375         unlink_linger(lreq->osd, lreq);
2376         linger_unregister(lreq);
2377 }
2378
2379 static void linger_cancel(struct ceph_osd_linger_request *lreq)
2380 {
2381         struct ceph_osd_client *osdc = lreq->osdc;
2382
2383         down_write(&osdc->lock);
2384         if (__linger_registered(lreq))
2385                 __linger_cancel(lreq);
2386         up_write(&osdc->lock);
2387 }
2388
2389 static void send_linger_map_check(struct ceph_osd_linger_request *lreq);
2390
2391 static void check_linger_pool_dne(struct ceph_osd_linger_request *lreq)
2392 {
2393         struct ceph_osd_client *osdc = lreq->osdc;
2394         struct ceph_osdmap *map = osdc->osdmap;
2395
2396         verify_osdc_wrlocked(osdc);
2397         WARN_ON(!map->epoch);
2398
2399         if (lreq->register_gen) {
2400                 lreq->map_dne_bound = map->epoch;
2401                 dout("%s lreq %p linger_id %llu pool disappeared\n", __func__,
2402                      lreq, lreq->linger_id);
2403         } else {
2404                 dout("%s lreq %p linger_id %llu map_dne_bound %u have %u\n",
2405                      __func__, lreq, lreq->linger_id, lreq->map_dne_bound,
2406                      map->epoch);
2407         }
2408
2409         if (lreq->map_dne_bound) {
2410                 if (map->epoch >= lreq->map_dne_bound) {
2411                         /* we had a new enough map */
2412                         pr_info("linger_id %llu pool does not exist\n",
2413                                 lreq->linger_id);
2414                         linger_reg_commit_complete(lreq, -ENOENT);
2415                         __linger_cancel(lreq);
2416                 }
2417         } else {
2418                 send_linger_map_check(lreq);
2419         }
2420 }
2421
2422 static void linger_map_check_cb(struct ceph_mon_generic_request *greq)
2423 {
2424         struct ceph_osd_client *osdc = &greq->monc->client->osdc;
2425         struct ceph_osd_linger_request *lreq;
2426         u64 linger_id = greq->private_data;
2427
2428         WARN_ON(greq->result || !greq->u.newest);
2429
2430         down_write(&osdc->lock);
2431         lreq = lookup_linger_mc(&osdc->linger_map_checks, linger_id);
2432         if (!lreq) {
2433                 dout("%s linger_id %llu dne\n", __func__, linger_id);
2434                 goto out_unlock;
2435         }
2436
2437         dout("%s lreq %p linger_id %llu map_dne_bound %u newest %llu\n",
2438              __func__, lreq, lreq->linger_id, lreq->map_dne_bound,
2439              greq->u.newest);
2440         if (!lreq->map_dne_bound)
2441                 lreq->map_dne_bound = greq->u.newest;
2442         erase_linger_mc(&osdc->linger_map_checks, lreq);
2443         check_linger_pool_dne(lreq);
2444
2445         linger_put(lreq);
2446 out_unlock:
2447         up_write(&osdc->lock);
2448 }
2449
2450 static void send_linger_map_check(struct ceph_osd_linger_request *lreq)
2451 {
2452         struct ceph_osd_client *osdc = lreq->osdc;
2453         struct ceph_osd_linger_request *lookup_lreq;
2454         int ret;
2455
2456         verify_osdc_wrlocked(osdc);
2457
2458         lookup_lreq = lookup_linger_mc(&osdc->linger_map_checks,
2459                                        lreq->linger_id);
2460         if (lookup_lreq) {
2461                 WARN_ON(lookup_lreq != lreq);
2462                 return;
2463         }
2464
2465         linger_get(lreq);
2466         insert_linger_mc(&osdc->linger_map_checks, lreq);
2467         ret = ceph_monc_get_version_async(&osdc->client->monc, "osdmap",
2468                                           linger_map_check_cb, lreq->linger_id);
2469         WARN_ON(ret);
2470 }
2471
2472 static int linger_reg_commit_wait(struct ceph_osd_linger_request *lreq)
2473 {
2474         int ret;
2475
2476         dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
2477         ret = wait_for_completion_interruptible(&lreq->reg_commit_wait);
2478         return ret ?: lreq->reg_commit_error;
2479 }
2480
2481 static int linger_notify_finish_wait(struct ceph_osd_linger_request *lreq)
2482 {
2483         int ret;
2484
2485         dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
2486         ret = wait_for_completion_interruptible(&lreq->notify_finish_wait);
2487         return ret ?: lreq->notify_finish_error;
2488 }
2489
2490 /*
2491  * Timeout callback, called every N seconds.  When 1 or more OSD
2492  * requests has been active for more than N seconds, we send a keepalive
2493  * (tag + timestamp) to its OSD to ensure any communications channel
2494  * reset is detected.
2495  */
2496 static void handle_timeout(struct work_struct *work)
2497 {
2498         struct ceph_osd_client *osdc =
2499                 container_of(work, struct ceph_osd_client, timeout_work.work);
2500         struct ceph_options *opts = osdc->client->options;
2501         unsigned long cutoff = jiffies - opts->osd_keepalive_timeout;
2502         LIST_HEAD(slow_osds);
2503         struct rb_node *n, *p;
2504
2505         dout("%s osdc %p\n", __func__, osdc);
2506         down_write(&osdc->lock);
2507
2508         /*
2509          * ping osds that are a bit slow.  this ensures that if there
2510          * is a break in the TCP connection we will notice, and reopen
2511          * a connection with that osd (from the fault callback).
2512          */
2513         for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
2514                 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
2515                 bool found = false;
2516
2517                 for (p = rb_first(&osd->o_requests); p; p = rb_next(p)) {
2518                         struct ceph_osd_request *req =
2519                             rb_entry(p, struct ceph_osd_request, r_node);
2520
2521                         if (time_before(req->r_stamp, cutoff)) {
2522                                 dout(" req %p tid %llu on osd%d is laggy\n",
2523                                      req, req->r_tid, osd->o_osd);
2524                                 found = true;
2525                         }
2526                 }
2527                 for (p = rb_first(&osd->o_linger_requests); p; p = rb_next(p)) {
2528                         struct ceph_osd_linger_request *lreq =
2529                             rb_entry(p, struct ceph_osd_linger_request, node);
2530
2531                         dout(" lreq %p linger_id %llu is served by osd%d\n",
2532                              lreq, lreq->linger_id, osd->o_osd);
2533                         found = true;
2534
2535                         mutex_lock(&lreq->lock);
2536                         if (lreq->is_watch && lreq->committed && !lreq->last_error)
2537                                 send_linger_ping(lreq);
2538                         mutex_unlock(&lreq->lock);
2539                 }
2540
2541                 if (found)
2542                         list_move_tail(&osd->o_keepalive_item, &slow_osds);
2543         }
2544
2545         if (atomic_read(&osdc->num_homeless) || !list_empty(&slow_osds))
2546                 maybe_request_map(osdc);
2547
2548         while (!list_empty(&slow_osds)) {
2549                 struct ceph_osd *osd = list_first_entry(&slow_osds,
2550                                                         struct ceph_osd,
2551                                                         o_keepalive_item);
2552                 list_del_init(&osd->o_keepalive_item);
2553                 ceph_con_keepalive(&osd->o_con);
2554         }
2555
2556         up_write(&osdc->lock);
2557         schedule_delayed_work(&osdc->timeout_work,
2558                               osdc->client->options->osd_keepalive_timeout);
2559 }
2560
2561 static void handle_osds_timeout(struct work_struct *work)
2562 {
2563         struct ceph_osd_client *osdc =
2564                 container_of(work, struct ceph_osd_client,
2565                              osds_timeout_work.work);
2566         unsigned long delay = osdc->client->options->osd_idle_ttl / 4;
2567         struct ceph_osd *osd, *nosd;
2568
2569         dout("%s osdc %p\n", __func__, osdc);
2570         down_write(&osdc->lock);
2571         list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) {
2572                 if (time_before(jiffies, osd->lru_ttl))
2573                         break;
2574
2575                 WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests));
2576                 WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests));
2577                 close_osd(osd);
2578         }
2579
2580         up_write(&osdc->lock);
2581         schedule_delayed_work(&osdc->osds_timeout_work,
2582                               round_jiffies_relative(delay));
2583 }
2584
2585 static int ceph_oloc_decode(void **p, void *end,
2586                             struct ceph_object_locator *oloc)
2587 {
2588         u8 struct_v, struct_cv;
2589         u32 len;
2590         void *struct_end;
2591         int ret = 0;
2592
2593         ceph_decode_need(p, end, 1 + 1 + 4, e_inval);
2594         struct_v = ceph_decode_8(p);
2595         struct_cv = ceph_decode_8(p);
2596         if (struct_v < 3) {
2597                 pr_warn("got v %d < 3 cv %d of ceph_object_locator\n",
2598                         struct_v, struct_cv);
2599                 goto e_inval;
2600         }
2601         if (struct_cv > 6) {
2602                 pr_warn("got v %d cv %d > 6 of ceph_object_locator\n",
2603                         struct_v, struct_cv);
2604                 goto e_inval;
2605         }
2606         len = ceph_decode_32(p);
2607         ceph_decode_need(p, end, len, e_inval);
2608         struct_end = *p + len;
2609
2610         oloc->pool = ceph_decode_64(p);
2611         *p += 4; /* skip preferred */
2612
2613         len = ceph_decode_32(p);
2614         if (len > 0) {
2615                 pr_warn("ceph_object_locator::key is set\n");
2616                 goto e_inval;
2617         }
2618
2619         if (struct_v >= 5) {
2620                 bool changed = false;
2621
2622                 len = ceph_decode_32(p);
2623                 if (len > 0) {
2624                         ceph_decode_need(p, end, len, e_inval);
2625                         if (!oloc->pool_ns ||
2626                             ceph_compare_string(oloc->pool_ns, *p, len))
2627                                 changed = true;
2628                         *p += len;
2629                 } else {
2630                         if (oloc->pool_ns)
2631                                 changed = true;
2632                 }
2633                 if (changed) {
2634                         /* redirect changes namespace */
2635                         pr_warn("ceph_object_locator::nspace is changed\n");
2636                         goto e_inval;
2637                 }
2638         }
2639
2640         if (struct_v >= 6) {
2641                 s64 hash = ceph_decode_64(p);
2642                 if (hash != -1) {
2643                         pr_warn("ceph_object_locator::hash is set\n");
2644                         goto e_inval;
2645                 }
2646         }
2647
2648         /* skip the rest */
2649         *p = struct_end;
2650 out:
2651         return ret;
2652
2653 e_inval:
2654         ret = -EINVAL;
2655         goto out;
2656 }
2657
2658 static int ceph_redirect_decode(void **p, void *end,
2659                                 struct ceph_request_redirect *redir)
2660 {
2661         u8 struct_v, struct_cv;
2662         u32 len;
2663         void *struct_end;
2664         int ret;
2665
2666         ceph_decode_need(p, end, 1 + 1 + 4, e_inval);
2667         struct_v = ceph_decode_8(p);
2668         struct_cv = ceph_decode_8(p);
2669         if (struct_cv > 1) {
2670                 pr_warn("got v %d cv %d > 1 of ceph_request_redirect\n",
2671                         struct_v, struct_cv);
2672                 goto e_inval;
2673         }
2674         len = ceph_decode_32(p);
2675         ceph_decode_need(p, end, len, e_inval);
2676         struct_end = *p + len;
2677
2678         ret = ceph_oloc_decode(p, end, &redir->oloc);
2679         if (ret)
2680                 goto out;
2681
2682         len = ceph_decode_32(p);
2683         if (len > 0) {
2684                 pr_warn("ceph_request_redirect::object_name is set\n");
2685                 goto e_inval;
2686         }
2687
2688         len = ceph_decode_32(p);
2689         *p += len; /* skip osd_instructions */
2690
2691         /* skip the rest */
2692         *p = struct_end;
2693 out:
2694         return ret;
2695
2696 e_inval:
2697         ret = -EINVAL;
2698         goto out;
2699 }
2700
2701 struct MOSDOpReply {
2702         struct ceph_pg pgid;
2703         u64 flags;
2704         int result;
2705         u32 epoch;
2706         int num_ops;
2707         u32 outdata_len[CEPH_OSD_MAX_OPS];
2708         s32 rval[CEPH_OSD_MAX_OPS];
2709         int retry_attempt;
2710         struct ceph_eversion replay_version;
2711         u64 user_version;
2712         struct ceph_request_redirect redirect;
2713 };
2714
2715 static int decode_MOSDOpReply(const struct ceph_msg *msg, struct MOSDOpReply *m)
2716 {
2717         void *p = msg->front.iov_base;
2718         void *const end = p + msg->front.iov_len;
2719         u16 version = le16_to_cpu(msg->hdr.version);
2720         struct ceph_eversion bad_replay_version;
2721         u8 decode_redir;
2722         u32 len;
2723         int ret;
2724         int i;
2725
2726         ceph_decode_32_safe(&p, end, len, e_inval);
2727         ceph_decode_need(&p, end, len, e_inval);
2728         p += len; /* skip oid */
2729
2730         ret = ceph_decode_pgid(&p, end, &m->pgid);
2731         if (ret)
2732                 return ret;
2733
2734         ceph_decode_64_safe(&p, end, m->flags, e_inval);
2735         ceph_decode_32_safe(&p, end, m->result, e_inval);
2736         ceph_decode_need(&p, end, sizeof(bad_replay_version), e_inval);
2737         memcpy(&bad_replay_version, p, sizeof(bad_replay_version));
2738         p += sizeof(bad_replay_version);
2739         ceph_decode_32_safe(&p, end, m->epoch, e_inval);
2740
2741         ceph_decode_32_safe(&p, end, m->num_ops, e_inval);
2742         if (m->num_ops > ARRAY_SIZE(m->outdata_len))
2743                 goto e_inval;
2744
2745         ceph_decode_need(&p, end, m->num_ops * sizeof(struct ceph_osd_op),
2746                          e_inval);
2747         for (i = 0; i < m->num_ops; i++) {
2748                 struct ceph_osd_op *op = p;
2749
2750                 m->outdata_len[i] = le32_to_cpu(op->payload_len);
2751                 p += sizeof(*op);
2752         }
2753
2754         ceph_decode_32_safe(&p, end, m->retry_attempt, e_inval);
2755         for (i = 0; i < m->num_ops; i++)
2756                 ceph_decode_32_safe(&p, end, m->rval[i], e_inval);
2757
2758         if (version >= 5) {
2759                 ceph_decode_need(&p, end, sizeof(m->replay_version), e_inval);
2760                 memcpy(&m->replay_version, p, sizeof(m->replay_version));
2761                 p += sizeof(m->replay_version);
2762                 ceph_decode_64_safe(&p, end, m->user_version, e_inval);
2763         } else {
2764                 m->replay_version = bad_replay_version; /* struct */
2765                 m->user_version = le64_to_cpu(m->replay_version.version);
2766         }
2767
2768         if (version >= 6) {
2769                 if (version >= 7)
2770                         ceph_decode_8_safe(&p, end, decode_redir, e_inval);
2771                 else
2772                         decode_redir = 1;
2773         } else {
2774                 decode_redir = 0;
2775         }
2776
2777         if (decode_redir) {
2778                 ret = ceph_redirect_decode(&p, end, &m->redirect);
2779                 if (ret)
2780                         return ret;
2781         } else {
2782                 ceph_oloc_init(&m->redirect.oloc);
2783         }
2784
2785         return 0;
2786
2787 e_inval:
2788         return -EINVAL;
2789 }
2790
2791 /*
2792  * We are done with @req if
2793  *   - @m is a safe reply, or
2794  *   - @m is an unsafe reply and we didn't want a safe one
2795  */
2796 static bool done_request(const struct ceph_osd_request *req,
2797                          const struct MOSDOpReply *m)
2798 {
2799         return (m->result < 0 ||
2800                 (m->flags & CEPH_OSD_FLAG_ONDISK) ||
2801                 !(req->r_flags & CEPH_OSD_FLAG_ONDISK));
2802 }
2803
2804 /*
2805  * handle osd op reply.  either call the callback if it is specified,
2806  * or do the completion to wake up the waiting thread.
2807  *
2808  * ->r_unsafe_callback is set?  yes                     no
2809  *
2810  * first reply is OK (needed    r_cb/r_completion,      r_cb/r_completion,
2811  * any or needed/got safe)      r_safe_completion       r_safe_completion
2812  *
2813  * first reply is unsafe        r_unsafe_cb(true)       (nothing)
2814  *
2815  * when we get the safe reply   r_unsafe_cb(false),     r_cb/r_completion,
2816  *                              r_safe_completion       r_safe_completion
2817  */
2818 static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg)
2819 {
2820         struct ceph_osd_client *osdc = osd->o_osdc;
2821         struct ceph_osd_request *req;
2822         struct MOSDOpReply m;
2823         u64 tid = le64_to_cpu(msg->hdr.tid);
2824         u32 data_len = 0;
2825         bool already_acked;
2826         int ret;
2827         int i;
2828
2829         dout("%s msg %p tid %llu\n", __func__, msg, tid);
2830
2831         down_read(&osdc->lock);
2832         if (!osd_registered(osd)) {
2833                 dout("%s osd%d unknown\n", __func__, osd->o_osd);
2834                 goto out_unlock_osdc;
2835         }
2836         WARN_ON(osd->o_osd != le64_to_cpu(msg->hdr.src.num));
2837
2838         mutex_lock(&osd->lock);
2839         req = lookup_request(&osd->o_requests, tid);
2840         if (!req) {
2841                 dout("%s osd%d tid %llu unknown\n", __func__, osd->o_osd, tid);
2842                 goto out_unlock_session;
2843         }
2844
2845         m.redirect.oloc.pool_ns = req->r_t.target_oloc.pool_ns;
2846         ret = decode_MOSDOpReply(msg, &m);
2847         m.redirect.oloc.pool_ns = NULL;
2848         if (ret) {
2849                 pr_err("failed to decode MOSDOpReply for tid %llu: %d\n",
2850                        req->r_tid, ret);
2851                 ceph_msg_dump(msg);
2852                 goto fail_request;
2853         }
2854         dout("%s req %p tid %llu flags 0x%llx pgid %llu.%x epoch %u attempt %d v %u'%llu uv %llu\n",
2855              __func__, req, req->r_tid, m.flags, m.pgid.pool, m.pgid.seed,
2856              m.epoch, m.retry_attempt, le32_to_cpu(m.replay_version.epoch),
2857              le64_to_cpu(m.replay_version.version), m.user_version);
2858
2859         if (m.retry_attempt >= 0) {
2860                 if (m.retry_attempt != req->r_attempts - 1) {
2861                         dout("req %p tid %llu retry_attempt %d != %d, ignoring\n",
2862                              req, req->r_tid, m.retry_attempt,
2863                              req->r_attempts - 1);
2864                         goto out_unlock_session;
2865                 }
2866         } else {
2867                 WARN_ON(1); /* MOSDOpReply v4 is assumed */
2868         }
2869
2870         if (!ceph_oloc_empty(&m.redirect.oloc)) {
2871                 dout("req %p tid %llu redirect pool %lld\n", req, req->r_tid,
2872                      m.redirect.oloc.pool);
2873                 unlink_request(osd, req);
2874                 mutex_unlock(&osd->lock);
2875
2876                 /*
2877                  * Not ceph_oloc_copy() - changing pool_ns is not
2878                  * supported.
2879                  */
2880                 req->r_t.target_oloc.pool = m.redirect.oloc.pool;
2881                 req->r_flags |= CEPH_OSD_FLAG_REDIRECTED;
2882                 req->r_tid = 0;
2883                 __submit_request(req, false);
2884                 goto out_unlock_osdc;
2885         }
2886
2887         if (m.num_ops != req->r_num_ops) {
2888                 pr_err("num_ops %d != %d for tid %llu\n", m.num_ops,
2889                        req->r_num_ops, req->r_tid);
2890                 goto fail_request;
2891         }
2892         for (i = 0; i < req->r_num_ops; i++) {
2893                 dout(" req %p tid %llu op %d rval %d len %u\n", req,
2894                      req->r_tid, i, m.rval[i], m.outdata_len[i]);
2895                 req->r_ops[i].rval = m.rval[i];
2896                 req->r_ops[i].outdata_len = m.outdata_len[i];
2897                 data_len += m.outdata_len[i];
2898         }
2899         if (data_len != le32_to_cpu(msg->hdr.data_len)) {
2900                 pr_err("sum of lens %u != %u for tid %llu\n", data_len,
2901                        le32_to_cpu(msg->hdr.data_len), req->r_tid);
2902                 goto fail_request;
2903         }
2904         dout("%s req %p tid %llu acked %d result %d data_len %u\n", __func__,
2905              req, req->r_tid, req->r_got_reply, m.result, data_len);
2906
2907         already_acked = req->r_got_reply;
2908         if (!already_acked) {
2909                 req->r_result = m.result ?: data_len;
2910                 req->r_replay_version = m.replay_version; /* struct */
2911                 req->r_got_reply = true;
2912         } else if (!(m.flags & CEPH_OSD_FLAG_ONDISK)) {
2913                 dout("req %p tid %llu dup ack\n", req, req->r_tid);
2914                 goto out_unlock_session;
2915         }
2916
2917         if (done_request(req, &m)) {
2918                 __finish_request(req);
2919                 if (req->r_linger) {
2920                         WARN_ON(req->r_unsafe_callback);
2921                         dout("req %p tid %llu cb (locked)\n", req, req->r_tid);
2922                         __complete_request(req);
2923                 }
2924         }
2925
2926         mutex_unlock(&osd->lock);
2927         up_read(&osdc->lock);
2928
2929         if (done_request(req, &m)) {
2930                 if (already_acked && req->r_unsafe_callback) {
2931                         dout("req %p tid %llu safe-cb\n", req, req->r_tid);
2932                         req->r_unsafe_callback(req, false);
2933                 } else if (!req->r_linger) {
2934                         dout("req %p tid %llu cb\n", req, req->r_tid);
2935                         __complete_request(req);
2936                 }
2937                 if (m.flags & CEPH_OSD_FLAG_ONDISK)
2938                         complete_all(&req->r_safe_completion);
2939                 ceph_osdc_put_request(req);
2940         } else {
2941                 if (req->r_unsafe_callback) {
2942                         dout("req %p tid %llu unsafe-cb\n", req, req->r_tid);
2943                         req->r_unsafe_callback(req, true);
2944                 } else {
2945                         WARN_ON(1);
2946                 }
2947         }
2948
2949         return;
2950
2951 fail_request:
2952         complete_request(req, -EIO);
2953 out_unlock_session:
2954         mutex_unlock(&osd->lock);
2955 out_unlock_osdc:
2956         up_read(&osdc->lock);
2957 }
2958
2959 static void set_pool_was_full(struct ceph_osd_client *osdc)
2960 {
2961         struct rb_node *n;
2962
2963         for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) {
2964                 struct ceph_pg_pool_info *pi =
2965                     rb_entry(n, struct ceph_pg_pool_info, node);
2966
2967                 pi->was_full = __pool_full(pi);
2968         }
2969 }
2970
2971 static bool pool_cleared_full(struct ceph_osd_client *osdc, s64 pool_id)
2972 {
2973         struct ceph_pg_pool_info *pi;
2974
2975         pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id);
2976         if (!pi)
2977                 return false;
2978
2979         return pi->was_full && !__pool_full(pi);
2980 }
2981
2982 static enum calc_target_result
2983 recalc_linger_target(struct ceph_osd_linger_request *lreq)
2984 {
2985         struct ceph_osd_client *osdc = lreq->osdc;
2986         enum calc_target_result ct_res;
2987
2988         ct_res = calc_target(osdc, &lreq->t, &lreq->last_force_resend, true);
2989         if (ct_res == CALC_TARGET_NEED_RESEND) {
2990                 struct ceph_osd *osd;
2991
2992                 osd = lookup_create_osd(osdc, lreq->t.osd, true);
2993                 if (osd != lreq->osd) {
2994                         unlink_linger(lreq->osd, lreq);
2995                         link_linger(osd, lreq);
2996                 }
2997         }
2998
2999         return ct_res;
3000 }
3001
3002 /*
3003  * Requeue requests whose mapping to an OSD has changed.
3004  */
3005 static void scan_requests(struct ceph_osd *osd,
3006                           bool force_resend,
3007                           bool cleared_full,
3008                           bool check_pool_cleared_full,
3009                           struct rb_root *need_resend,
3010                           struct list_head *need_resend_linger)
3011 {
3012         struct ceph_osd_client *osdc = osd->o_osdc;
3013         struct rb_node *n;
3014         bool force_resend_writes;
3015
3016         for (n = rb_first(&osd->o_linger_requests); n; ) {
3017                 struct ceph_osd_linger_request *lreq =
3018                     rb_entry(n, struct ceph_osd_linger_request, node);
3019                 enum calc_target_result ct_res;
3020
3021                 n = rb_next(n); /* recalc_linger_target() */
3022
3023                 dout("%s lreq %p linger_id %llu\n", __func__, lreq,
3024                      lreq->linger_id);
3025                 ct_res = recalc_linger_target(lreq);
3026                 switch (ct_res) {
3027                 case CALC_TARGET_NO_ACTION:
3028                         force_resend_writes = cleared_full ||
3029                             (check_pool_cleared_full &&
3030                              pool_cleared_full(osdc, lreq->t.base_oloc.pool));
3031                         if (!force_resend && !force_resend_writes)
3032                                 break;
3033
3034                         /* fall through */
3035                 case CALC_TARGET_NEED_RESEND:
3036                         cancel_linger_map_check(lreq);
3037                         /*
3038                          * scan_requests() for the previous epoch(s)
3039                          * may have already added it to the list, since
3040                          * it's not unlinked here.
3041                          */
3042                         if (list_empty(&lreq->scan_item))
3043                                 list_add_tail(&lreq->scan_item, need_resend_linger);
3044                         break;
3045                 case CALC_TARGET_POOL_DNE:
3046                         check_linger_pool_dne(lreq);
3047                         break;
3048                 }
3049         }
3050
3051         for (n = rb_first(&osd->o_requests); n; ) {
3052                 struct ceph_osd_request *req =
3053                     rb_entry(n, struct ceph_osd_request, r_node);
3054                 enum calc_target_result ct_res;
3055
3056                 n = rb_next(n); /* unlink_request(), check_pool_dne() */
3057
3058                 dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
3059                 ct_res = calc_target(osdc, &req->r_t,
3060                                      &req->r_last_force_resend, false);
3061                 switch (ct_res) {
3062                 case CALC_TARGET_NO_ACTION:
3063                         force_resend_writes = cleared_full ||
3064                             (check_pool_cleared_full &&
3065                              pool_cleared_full(osdc, req->r_t.base_oloc.pool));
3066                         if (!force_resend &&
3067                             (!(req->r_flags & CEPH_OSD_FLAG_WRITE) ||
3068                              !force_resend_writes))
3069                                 break;
3070
3071                         /* fall through */
3072                 case CALC_TARGET_NEED_RESEND:
3073                         cancel_map_check(req);
3074                         unlink_request(osd, req);
3075                         insert_request(need_resend, req);
3076                         break;
3077                 case CALC_TARGET_POOL_DNE:
3078                         check_pool_dne(req);
3079                         break;
3080                 }
3081         }
3082 }
3083
3084 static int handle_one_map(struct ceph_osd_client *osdc,
3085                           void *p, void *end, bool incremental,
3086                           struct rb_root *need_resend,
3087                           struct list_head *need_resend_linger)
3088 {
3089         struct ceph_osdmap *newmap;
3090         struct rb_node *n;
3091         bool skipped_map = false;
3092         bool was_full;
3093
3094         was_full = ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL);
3095         set_pool_was_full(osdc);
3096
3097         if (incremental)
3098                 newmap = osdmap_apply_incremental(&p, end, osdc->osdmap);
3099         else
3100                 newmap = ceph_osdmap_decode(&p, end);
3101         if (IS_ERR(newmap))
3102                 return PTR_ERR(newmap);
3103
3104         if (newmap != osdc->osdmap) {
3105                 /*
3106                  * Preserve ->was_full before destroying the old map.
3107                  * For pools that weren't in the old map, ->was_full
3108                  * should be false.
3109                  */
3110                 for (n = rb_first(&newmap->pg_pools); n; n = rb_next(n)) {
3111                         struct ceph_pg_pool_info *pi =
3112                             rb_entry(n, struct ceph_pg_pool_info, node);
3113                         struct ceph_pg_pool_info *old_pi;
3114
3115                         old_pi = ceph_pg_pool_by_id(osdc->osdmap, pi->id);
3116                         if (old_pi)
3117                                 pi->was_full = old_pi->was_full;
3118                         else
3119                                 WARN_ON(pi->was_full);
3120                 }
3121
3122                 if (osdc->osdmap->epoch &&
3123                     osdc->osdmap->epoch + 1 < newmap->epoch) {
3124                         WARN_ON(incremental);
3125                         skipped_map = true;
3126                 }
3127
3128                 ceph_osdmap_destroy(osdc->osdmap);
3129                 osdc->osdmap = newmap;
3130         }
3131
3132         was_full &= !ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL);
3133         scan_requests(&osdc->homeless_osd, skipped_map, was_full, true,
3134                       need_resend, need_resend_linger);
3135
3136         for (n = rb_first(&osdc->osds); n; ) {
3137                 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
3138
3139                 n = rb_next(n); /* close_osd() */
3140
3141                 scan_requests(osd, skipped_map, was_full, true, need_resend,
3142                               need_resend_linger);
3143                 if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) ||
3144                     memcmp(&osd->o_con.peer_addr,
3145                            ceph_osd_addr(osdc->osdmap, osd->o_osd),
3146                            sizeof(struct ceph_entity_addr)))
3147                         close_osd(osd);
3148         }
3149
3150         return 0;
3151 }
3152
3153 static void kick_requests(struct ceph_osd_client *osdc,
3154                           struct rb_root *need_resend,
3155                           struct list_head *need_resend_linger)
3156 {
3157         struct ceph_osd_linger_request *lreq, *nlreq;
3158         struct rb_node *n;
3159
3160         for (n = rb_first(need_resend); n; ) {
3161                 struct ceph_osd_request *req =
3162                     rb_entry(n, struct ceph_osd_request, r_node);
3163                 struct ceph_osd *osd;
3164
3165                 n = rb_next(n);
3166                 erase_request(need_resend, req); /* before link_request() */
3167
3168                 WARN_ON(req->r_osd);
3169                 calc_target(osdc, &req->r_t, NULL, false);
3170                 osd = lookup_create_osd(osdc, req->r_t.osd, true);
3171                 link_request(osd, req);
3172                 if (!req->r_linger) {
3173                         if (!osd_homeless(osd) && !req->r_t.paused)
3174                                 send_request(req);
3175                 } else {
3176                         cancel_linger_request(req);
3177                 }
3178         }
3179
3180         list_for_each_entry_safe(lreq, nlreq, need_resend_linger, scan_item) {
3181                 if (!osd_homeless(lreq->osd))
3182                         send_linger(lreq);
3183
3184                 list_del_init(&lreq->scan_item);
3185         }
3186 }
3187
3188 /*
3189  * Process updated osd map.
3190  *
3191  * The message contains any number of incremental and full maps, normally
3192  * indicating some sort of topology change in the cluster.  Kick requests
3193  * off to different OSDs as needed.
3194  */
3195 void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
3196 {
3197         void *p = msg->front.iov_base;
3198         void *const end = p + msg->front.iov_len;
3199         u32 nr_maps, maplen;
3200         u32 epoch;
3201         struct ceph_fsid fsid;
3202         struct rb_root need_resend = RB_ROOT;
3203         LIST_HEAD(need_resend_linger);
3204         bool handled_incremental = false;
3205         bool was_pauserd, was_pausewr;
3206         bool pauserd, pausewr;
3207         int err;
3208
3209         dout("%s have %u\n", __func__, osdc->osdmap->epoch);
3210         down_write(&osdc->lock);
3211
3212         /* verify fsid */
3213         ceph_decode_need(&p, end, sizeof(fsid), bad);
3214         ceph_decode_copy(&p, &fsid, sizeof(fsid));
3215         if (ceph_check_fsid(osdc->client, &fsid) < 0)
3216                 goto bad;
3217
3218         was_pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
3219         was_pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
3220                       ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
3221                       have_pool_full(osdc);
3222
3223         /* incremental maps */
3224         ceph_decode_32_safe(&p, end, nr_maps, bad);
3225         dout(" %d inc maps\n", nr_maps);
3226         while (nr_maps > 0) {
3227                 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
3228                 epoch = ceph_decode_32(&p);
3229                 maplen = ceph_decode_32(&p);
3230                 ceph_decode_need(&p, end, maplen, bad);
3231                 if (osdc->osdmap->epoch &&
3232                     osdc->osdmap->epoch + 1 == epoch) {
3233                         dout("applying incremental map %u len %d\n",
3234                              epoch, maplen);
3235                         err = handle_one_map(osdc, p, p + maplen, true,
3236                                              &need_resend, &need_resend_linger);
3237                         if (err)
3238                                 goto bad;
3239                         handled_incremental = true;
3240                 } else {
3241                         dout("ignoring incremental map %u len %d\n",
3242                              epoch, maplen);
3243                 }
3244                 p += maplen;
3245                 nr_maps--;
3246         }
3247         if (handled_incremental)
3248                 goto done;
3249
3250         /* full maps */
3251         ceph_decode_32_safe(&p, end, nr_maps, bad);
3252         dout(" %d full maps\n", nr_maps);
3253         while (nr_maps) {
3254                 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
3255                 epoch = ceph_decode_32(&p);
3256                 maplen = ceph_decode_32(&p);
3257                 ceph_decode_need(&p, end, maplen, bad);
3258                 if (nr_maps > 1) {
3259                         dout("skipping non-latest full map %u len %d\n",
3260                              epoch, maplen);
3261                 } else if (osdc->osdmap->epoch >= epoch) {
3262                         dout("skipping full map %u len %d, "
3263                              "older than our %u\n", epoch, maplen,
3264                              osdc->osdmap->epoch);
3265                 } else {
3266                         dout("taking full map %u len %d\n", epoch, maplen);
3267                         err = handle_one_map(osdc, p, p + maplen, false,
3268                                              &need_resend, &need_resend_linger);
3269                         if (err)
3270                                 goto bad;
3271                 }
3272                 p += maplen;
3273                 nr_maps--;
3274         }
3275
3276 done:
3277         /*
3278          * subscribe to subsequent osdmap updates if full to ensure
3279          * we find out when we are no longer full and stop returning
3280          * ENOSPC.
3281          */
3282         pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
3283         pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
3284                   ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
3285                   have_pool_full(osdc);
3286         if (was_pauserd || was_pausewr || pauserd || pausewr)
3287                 maybe_request_map(osdc);
3288
3289         kick_requests(osdc, &need_resend, &need_resend_linger);
3290
3291         ceph_monc_got_map(&osdc->client->monc, CEPH_SUB_OSDMAP,
3292                           osdc->osdmap->epoch);
3293         up_write(&osdc->lock);
3294         wake_up_all(&osdc->client->auth_wq);
3295         return;
3296
3297 bad:
3298         pr_err("osdc handle_map corrupt msg\n");
3299         ceph_msg_dump(msg);
3300         up_write(&osdc->lock);
3301 }
3302
3303 /*
3304  * Resubmit requests pending on the given osd.
3305  */
3306 static void kick_osd_requests(struct ceph_osd *osd)
3307 {
3308         struct rb_node *n;
3309
3310         for (n = rb_first(&osd->o_requests); n; ) {
3311                 struct ceph_osd_request *req =
3312                     rb_entry(n, struct ceph_osd_request, r_node);
3313
3314                 n = rb_next(n); /* cancel_linger_request() */
3315
3316                 if (!req->r_linger) {
3317                         if (!req->r_t.paused)
3318                                 send_request(req);
3319                 } else {
3320                         cancel_linger_request(req);
3321                 }
3322         }
3323         for (n = rb_first(&osd->o_linger_requests); n; n = rb_next(n)) {
3324                 struct ceph_osd_linger_request *lreq =
3325                     rb_entry(n, struct ceph_osd_linger_request, node);
3326
3327                 send_linger(lreq);
3328         }
3329 }
3330
3331 /*
3332  * If the osd connection drops, we need to resubmit all requests.
3333  */
3334 static void osd_fault(struct ceph_connection *con)
3335 {
3336         struct ceph_osd *osd = con->private;
3337         struct ceph_osd_client *osdc = osd->o_osdc;
3338
3339         dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
3340
3341         down_write(&osdc->lock);
3342         if (!osd_registered(osd)) {
3343                 dout("%s osd%d unknown\n", __func__, osd->o_osd);
3344                 goto out_unlock;
3345         }
3346
3347         if (!reopen_osd(osd))
3348                 kick_osd_requests(osd);
3349         maybe_request_map(osdc);
3350
3351 out_unlock:
3352         up_write(&osdc->lock);
3353 }
3354
3355 /*
3356  * Process osd watch notifications
3357  */
3358 static void handle_watch_notify(struct ceph_osd_client *osdc,
3359                                 struct ceph_msg *msg)
3360 {
3361         void *p = msg->front.iov_base;
3362         void *const end = p + msg->front.iov_len;
3363         struct ceph_osd_linger_request *lreq;
3364         struct linger_work *lwork;
3365         u8 proto_ver, opcode;
3366         u64 cookie, notify_id;
3367         u64 notifier_id = 0;
3368         s32 return_code = 0;
3369         void *payload = NULL;
3370         u32 payload_len = 0;
3371
3372         ceph_decode_8_safe(&p, end, proto_ver, bad);
3373         ceph_decode_8_safe(&p, end, opcode, bad);
3374         ceph_decode_64_safe(&p, end, cookie, bad);
3375         p += 8; /* skip ver */
3376         ceph_decode_64_safe(&p, end, notify_id, bad);
3377
3378         if (proto_ver >= 1) {
3379                 ceph_decode_32_safe(&p, end, payload_len, bad);
3380                 ceph_decode_need(&p, end, payload_len, bad);
3381                 payload = p;
3382                 p += payload_len;
3383         }
3384
3385         if (le16_to_cpu(msg->hdr.version) >= 2)
3386                 ceph_decode_32_safe(&p, end, return_code, bad);
3387
3388         if (le16_to_cpu(msg->hdr.version) >= 3)
3389                 ceph_decode_64_safe(&p, end, notifier_id, bad);
3390
3391         down_read(&osdc->lock);
3392         lreq = lookup_linger_osdc(&osdc->linger_requests, cookie);
3393         if (!lreq) {
3394                 dout("%s opcode %d cookie %llu dne\n", __func__, opcode,
3395                      cookie);
3396                 goto out_unlock_osdc;
3397         }
3398
3399         mutex_lock(&lreq->lock);
3400         dout("%s opcode %d cookie %llu lreq %p is_watch %d\n", __func__,
3401              opcode, cookie, lreq, lreq->is_watch);
3402         if (opcode == CEPH_WATCH_EVENT_DISCONNECT) {
3403                 if (!lreq->last_error) {
3404                         lreq->last_error = -ENOTCONN;
3405                         queue_watch_error(lreq);
3406                 }
3407         } else if (!lreq->is_watch) {
3408                 /* CEPH_WATCH_EVENT_NOTIFY_COMPLETE */
3409                 if (lreq->notify_id && lreq->notify_id != notify_id) {
3410                         dout("lreq %p notify_id %llu != %llu, ignoring\n", lreq,
3411                              lreq->notify_id, notify_id);
3412                 } else if (!completion_done(&lreq->notify_finish_wait)) {
3413                         struct ceph_msg_data *data =
3414                             list_first_entry_or_null(&msg->data,
3415                                                      struct ceph_msg_data,
3416                                                      links);
3417
3418                         if (data) {
3419                                 if (lreq->preply_pages) {
3420                                         WARN_ON(data->type !=
3421                                                         CEPH_MSG_DATA_PAGES);
3422                                         *lreq->preply_pages = data->pages;
3423                                         *lreq->preply_len = data->length;
3424                                 } else {
3425                                         ceph_release_page_vector(data->pages,
3426                                                calc_pages_for(0, data->length));
3427                                 }
3428                         }
3429                         lreq->notify_finish_error = return_code;
3430                         complete_all(&lreq->notify_finish_wait);
3431                 }
3432         } else {
3433                 /* CEPH_WATCH_EVENT_NOTIFY */
3434                 lwork = lwork_alloc(lreq, do_watch_notify);
3435                 if (!lwork) {
3436                         pr_err("failed to allocate notify-lwork\n");
3437                         goto out_unlock_lreq;
3438                 }
3439
3440                 lwork->notify.notify_id = notify_id;
3441                 lwork->notify.notifier_id = notifier_id;
3442                 lwork->notify.payload = payload;
3443                 lwork->notify.payload_len = payload_len;
3444                 lwork->notify.msg = ceph_msg_get(msg);
3445                 lwork_queue(lwork);
3446         }
3447
3448 out_unlock_lreq:
3449         mutex_unlock(&lreq->lock);
3450 out_unlock_osdc:
3451         up_read(&osdc->lock);
3452         return;
3453
3454 bad:
3455         pr_err("osdc handle_watch_notify corrupt msg\n");
3456 }
3457
3458 /*
3459  * Register request, send initial attempt.
3460  */
3461 int ceph_osdc_start_request(struct ceph_osd_client *osdc,
3462                             struct ceph_osd_request *req,
3463                             bool nofail)
3464 {
3465         down_read(&osdc->lock);
3466         submit_request(req, false);
3467         up_read(&osdc->lock);
3468
3469         return 0;
3470 }
3471 EXPORT_SYMBOL(ceph_osdc_start_request);
3472
3473 /*
3474  * Unregister a registered request.  The request is not completed (i.e.
3475  * no callbacks or wakeups) - higher layers are supposed to know what
3476  * they are canceling.
3477  */
3478 void ceph_osdc_cancel_request(struct ceph_osd_request *req)
3479 {
3480         struct ceph_osd_client *osdc = req->r_osdc;
3481
3482         down_write(&osdc->lock);
3483         if (req->r_osd)
3484                 cancel_request(req);
3485         up_write(&osdc->lock);
3486 }
3487 EXPORT_SYMBOL(ceph_osdc_cancel_request);
3488
3489 /*
3490  * @timeout: in jiffies, 0 means "wait forever"
3491  */
3492 static int wait_request_timeout(struct ceph_osd_request *req,
3493                                 unsigned long timeout)
3494 {
3495         long left;
3496
3497         dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
3498         left = wait_for_completion_killable_timeout(&req->r_completion,
3499                                                 ceph_timeout_jiffies(timeout));
3500         if (left <= 0) {
3501                 left = left ?: -ETIMEDOUT;
3502                 ceph_osdc_cancel_request(req);
3503
3504                 /* kludge - need to to wake ceph_osdc_sync() */
3505                 complete_all(&req->r_safe_completion);
3506         } else {
3507                 left = req->r_result; /* completed */
3508         }
3509
3510         return left;
3511 }
3512
3513 /*
3514  * wait for a request to complete
3515  */
3516 int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
3517                            struct ceph_osd_request *req)
3518 {
3519         return wait_request_timeout(req, 0);
3520 }
3521 EXPORT_SYMBOL(ceph_osdc_wait_request);
3522
3523 /*
3524  * sync - wait for all in-flight requests to flush.  avoid starvation.
3525  */
3526 void ceph_osdc_sync(struct ceph_osd_client *osdc)
3527 {
3528         struct rb_node *n, *p;
3529         u64 last_tid = atomic64_read(&osdc->last_tid);
3530
3531 again:
3532         down_read(&osdc->lock);
3533         for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
3534                 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
3535
3536                 mutex_lock(&osd->lock);
3537                 for (p = rb_first(&osd->o_requests); p; p = rb_next(p)) {
3538                         struct ceph_osd_request *req =
3539                             rb_entry(p, struct ceph_osd_request, r_node);
3540
3541                         if (req->r_tid > last_tid)
3542                                 break;
3543
3544                         if (!(req->r_flags & CEPH_OSD_FLAG_WRITE))
3545                                 continue;
3546
3547                         ceph_osdc_get_request(req);
3548                         mutex_unlock(&osd->lock);
3549                         up_read(&osdc->lock);
3550                         dout("%s waiting on req %p tid %llu last_tid %llu\n",
3551                              __func__, req, req->r_tid, last_tid);
3552                         wait_for_completion(&req->r_safe_completion);
3553                         ceph_osdc_put_request(req);
3554                         goto again;
3555                 }
3556
3557                 mutex_unlock(&osd->lock);
3558         }
3559
3560         up_read(&osdc->lock);
3561         dout("%s done last_tid %llu\n", __func__, last_tid);
3562 }
3563 EXPORT_SYMBOL(ceph_osdc_sync);
3564
3565 static struct ceph_osd_request *
3566 alloc_linger_request(struct ceph_osd_linger_request *lreq)
3567 {
3568         struct ceph_osd_request *req;
3569
3570         req = ceph_osdc_alloc_request(lreq->osdc, NULL, 1, false, GFP_NOIO);
3571         if (!req)
3572                 return NULL;
3573
3574         ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
3575         ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
3576
3577         if (ceph_osdc_alloc_messages(req, GFP_NOIO)) {
3578                 ceph_osdc_put_request(req);
3579                 return NULL;
3580         }
3581
3582         return req;
3583 }
3584
3585 /*
3586  * Returns a handle, caller owns a ref.
3587  */
3588 struct ceph_osd_linger_request *
3589 ceph_osdc_watch(struct ceph_osd_client *osdc,
3590                 struct ceph_object_id *oid,
3591                 struct ceph_object_locator *oloc,
3592                 rados_watchcb2_t wcb,
3593                 rados_watcherrcb_t errcb,
3594                 void *data)
3595 {
3596         struct ceph_osd_linger_request *lreq;
3597         int ret;
3598
3599         lreq = linger_alloc(osdc);
3600         if (!lreq)
3601                 return ERR_PTR(-ENOMEM);
3602
3603         lreq->is_watch = true;
3604         lreq->wcb = wcb;
3605         lreq->errcb = errcb;
3606         lreq->data = data;
3607         lreq->watch_valid_thru = jiffies;
3608
3609         ceph_oid_copy(&lreq->t.base_oid, oid);
3610         ceph_oloc_copy(&lreq->t.base_oloc, oloc);
3611         lreq->t.flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
3612         lreq->mtime = CURRENT_TIME;
3613
3614         lreq->reg_req = alloc_linger_request(lreq);
3615         if (!lreq->reg_req) {
3616                 ret = -ENOMEM;
3617                 goto err_put_lreq;
3618         }
3619
3620         lreq->ping_req = alloc_linger_request(lreq);
3621         if (!lreq->ping_req) {
3622                 ret = -ENOMEM;
3623                 goto err_put_lreq;
3624         }
3625
3626         down_write(&osdc->lock);
3627         linger_register(lreq); /* before osd_req_op_* */
3628         osd_req_op_watch_init(lreq->reg_req, 0, lreq->linger_id,
3629                               CEPH_OSD_WATCH_OP_WATCH);
3630         osd_req_op_watch_init(lreq->ping_req, 0, lreq->linger_id,
3631                               CEPH_OSD_WATCH_OP_PING);
3632         linger_submit(lreq);
3633         up_write(&osdc->lock);
3634
3635         ret = linger_reg_commit_wait(lreq);
3636         if (ret) {
3637                 linger_cancel(lreq);
3638                 goto err_put_lreq;
3639         }
3640
3641         return lreq;
3642
3643 err_put_lreq:
3644         linger_put(lreq);
3645         return ERR_PTR(ret);
3646 }
3647 EXPORT_SYMBOL(ceph_osdc_watch);
3648
3649 /*
3650  * Releases a ref.
3651  *
3652  * Times out after mount_timeout to preserve rbd unmap behaviour
3653  * introduced in 2894e1d76974 ("rbd: timeout watch teardown on unmap
3654  * with mount_timeout").
3655  */
3656 int ceph_osdc_unwatch(struct ceph_osd_client *osdc,
3657                       struct ceph_osd_linger_request *lreq)
3658 {
3659         struct ceph_options *opts = osdc->client->options;
3660         struct ceph_osd_request *req;
3661         int ret;
3662
3663         req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
3664         if (!req)
3665                 return -ENOMEM;
3666
3667         ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
3668         ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
3669         req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
3670         req->r_mtime = CURRENT_TIME;
3671         osd_req_op_watch_init(req, 0, lreq->linger_id,
3672                               CEPH_OSD_WATCH_OP_UNWATCH);
3673
3674         ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
3675         if (ret)
3676                 goto out_put_req;
3677
3678         ceph_osdc_start_request(osdc, req, false);
3679         linger_cancel(lreq);
3680         linger_put(lreq);
3681         ret = wait_request_timeout(req, opts->mount_timeout);
3682
3683 out_put_req:
3684         ceph_osdc_put_request(req);
3685         return ret;
3686 }
3687 EXPORT_SYMBOL(ceph_osdc_unwatch);
3688
3689 static int osd_req_op_notify_ack_init(struct ceph_osd_request *req, int which,
3690                                       u64 notify_id, u64 cookie, void *payload,
3691                                       size_t payload_len)
3692 {
3693         struct ceph_osd_req_op *op;
3694         struct ceph_pagelist *pl;
3695         int ret;
3696
3697         op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY_ACK, 0);
3698
3699         pl = kmalloc(sizeof(*pl), GFP_NOIO);
3700         if (!pl)
3701                 return -ENOMEM;
3702
3703         ceph_pagelist_init(pl);
3704         ret = ceph_pagelist_encode_64(pl, notify_id);
3705         ret |= ceph_pagelist_encode_64(pl, cookie);
3706         if (payload) {
3707                 ret |= ceph_pagelist_encode_32(pl, payload_len);
3708                 ret |= ceph_pagelist_append(pl, payload, payload_len);
3709         } else {
3710                 ret |= ceph_pagelist_encode_32(pl, 0);
3711         }
3712         if (ret) {
3713                 ceph_pagelist_release(pl);
3714                 return -ENOMEM;
3715         }
3716
3717         ceph_osd_data_pagelist_init(&op->notify_ack.request_data, pl);
3718         op->indata_len = pl->length;
3719         return 0;
3720 }
3721
3722 int ceph_osdc_notify_ack(struct ceph_osd_client *osdc,
3723                          struct ceph_object_id *oid,
3724                          struct ceph_object_locator *oloc,
3725                          u64 notify_id,
3726                          u64 cookie,
3727                          void *payload,
3728                          size_t payload_len)
3729 {
3730         struct ceph_osd_request *req;
3731         int ret;
3732
3733         req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
3734         if (!req)
3735                 return -ENOMEM;
3736
3737         ceph_oid_copy(&req->r_base_oid, oid);
3738         ceph_oloc_copy(&req->r_base_oloc, oloc);
3739         req->r_flags = CEPH_OSD_FLAG_READ;
3740
3741         ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
3742         if (ret)
3743                 goto out_put_req;
3744
3745         ret = osd_req_op_notify_ack_init(req, 0, notify_id, cookie, payload,
3746                                          payload_len);
3747         if (ret)
3748                 goto out_put_req;
3749
3750         ceph_osdc_start_request(osdc, req, false);
3751         ret = ceph_osdc_wait_request(osdc, req);
3752
3753 out_put_req:
3754         ceph_osdc_put_request(req);
3755         return ret;
3756 }
3757 EXPORT_SYMBOL(ceph_osdc_notify_ack);
3758
3759 static int osd_req_op_notify_init(struct ceph_osd_request *req, int which,
3760                                   u64 cookie, u32 prot_ver, u32 timeout,
3761                                   void *payload, size_t payload_len)
3762 {
3763         struct ceph_osd_req_op *op;
3764         struct ceph_pagelist *pl;
3765         int ret;
3766
3767         op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY, 0);
3768         op->notify.cookie = cookie;
3769
3770         pl = kmalloc(sizeof(*pl), GFP_NOIO);
3771         if (!pl)
3772                 return -ENOMEM;
3773
3774         ceph_pagelist_init(pl);
3775         ret = ceph_pagelist_encode_32(pl, 1); /* prot_ver */
3776         ret |= ceph_pagelist_encode_32(pl, timeout);
3777         ret |= ceph_pagelist_encode_32(pl, payload_len);
3778         ret |= ceph_pagelist_append(pl, payload, payload_len);
3779         if (ret) {
3780                 ceph_pagelist_release(pl);
3781                 return -ENOMEM;
3782         }
3783
3784         ceph_osd_data_pagelist_init(&op->notify.request_data, pl);
3785         op->indata_len = pl->length;
3786         return 0;
3787 }
3788
3789 /*
3790  * @timeout: in seconds
3791  *
3792  * @preply_{pages,len} are initialized both on success and error.
3793  * The caller is responsible for:
3794  *
3795  *     ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len))
3796  */
3797 int ceph_osdc_notify(struct ceph_osd_client *osdc,
3798                      struct ceph_object_id *oid,
3799                      struct ceph_object_locator *oloc,
3800                      void *payload,
3801                      size_t payload_len,
3802                      u32 timeout,
3803                      struct page ***preply_pages,
3804                      size_t *preply_len)
3805 {
3806         struct ceph_osd_linger_request *lreq;
3807         struct page **pages;
3808         int ret;
3809
3810         WARN_ON(!timeout);
3811         if (preply_pages) {
3812                 *preply_pages = NULL;
3813                 *preply_len = 0;
3814         }
3815
3816         lreq = linger_alloc(osdc);
3817         if (!lreq)
3818                 return -ENOMEM;
3819
3820         lreq->preply_pages = preply_pages;
3821         lreq->preply_len = preply_len;
3822
3823         ceph_oid_copy(&lreq->t.base_oid, oid);
3824         ceph_oloc_copy(&lreq->t.base_oloc, oloc);
3825         lreq->t.flags = CEPH_OSD_FLAG_READ;
3826
3827         lreq->reg_req = alloc_linger_request(lreq);
3828         if (!lreq->reg_req) {
3829                 ret = -ENOMEM;
3830                 goto out_put_lreq;
3831         }
3832
3833         /* for notify_id */
3834         pages = ceph_alloc_page_vector(1, GFP_NOIO);
3835         if (IS_ERR(pages)) {
3836                 ret = PTR_ERR(pages);
3837                 goto out_put_lreq;
3838         }
3839
3840         down_write(&osdc->lock);
3841         linger_register(lreq); /* before osd_req_op_* */
3842         ret = osd_req_op_notify_init(lreq->reg_req, 0, lreq->linger_id, 1,
3843                                      timeout, payload, payload_len);
3844         if (ret) {
3845                 linger_unregister(lreq);
3846                 up_write(&osdc->lock);
3847                 ceph_release_page_vector(pages, 1);
3848                 goto out_put_lreq;
3849         }
3850         ceph_osd_data_pages_init(osd_req_op_data(lreq->reg_req, 0, notify,
3851                                                  response_data),
3852                                  pages, PAGE_SIZE, 0, false, true);
3853         linger_submit(lreq);
3854         up_write(&osdc->lock);
3855
3856         ret = linger_reg_commit_wait(lreq);
3857         if (!ret)
3858                 ret = linger_notify_finish_wait(lreq);
3859         else
3860                 dout("lreq %p failed to initiate notify %d\n", lreq, ret);
3861
3862         linger_cancel(lreq);
3863 out_put_lreq:
3864         linger_put(lreq);
3865         return ret;
3866 }
3867 EXPORT_SYMBOL(ceph_osdc_notify);
3868
3869 /*
3870  * Return the number of milliseconds since the watch was last
3871  * confirmed, or an error.  If there is an error, the watch is no
3872  * longer valid, and should be destroyed with ceph_osdc_unwatch().
3873  */
3874 int ceph_osdc_watch_check(struct ceph_osd_client *osdc,
3875                           struct ceph_osd_linger_request *lreq)
3876 {
3877         unsigned long stamp, age;
3878         int ret;
3879
3880         down_read(&osdc->lock);
3881         mutex_lock(&lreq->lock);
3882         stamp = lreq->watch_valid_thru;
3883         if (!list_empty(&lreq->pending_lworks)) {
3884                 struct linger_work *lwork =
3885                     list_first_entry(&lreq->pending_lworks,
3886                                      struct linger_work,
3887                                      pending_item);
3888
3889                 if (time_before(lwork->queued_stamp, stamp))
3890                         stamp = lwork->queued_stamp;
3891         }
3892         age = jiffies - stamp;
3893         dout("%s lreq %p linger_id %llu age %lu last_error %d\n", __func__,
3894              lreq, lreq->linger_id, age, lreq->last_error);
3895         /* we are truncating to msecs, so return a safe upper bound */
3896         ret = lreq->last_error ?: 1 + jiffies_to_msecs(age);
3897
3898         mutex_unlock(&lreq->lock);
3899         up_read(&osdc->lock);
3900         return ret;
3901 }
3902
3903 static int decode_watcher(void **p, void *end, struct ceph_watch_item *item)
3904 {
3905         u8 struct_v;
3906         u32 struct_len;
3907         int ret;
3908
3909         ret = ceph_start_decoding(p, end, 2, "watch_item_t",
3910                                   &struct_v, &struct_len);
3911         if (ret)
3912                 return ret;
3913
3914         ceph_decode_copy(p, &item->name, sizeof(item->name));
3915         item->cookie = ceph_decode_64(p);
3916         *p += 4; /* skip timeout_seconds */
3917         if (struct_v >= 2) {
3918                 ceph_decode_copy(p, &item->addr, sizeof(item->addr));
3919                 ceph_decode_addr(&item->addr);
3920         }
3921
3922         dout("%s %s%llu cookie %llu addr %s\n", __func__,
3923              ENTITY_NAME(item->name), item->cookie,
3924              ceph_pr_addr(&item->addr.in_addr));
3925         return 0;
3926 }
3927
3928 static int decode_watchers(void **p, void *end,
3929                            struct ceph_watch_item **watchers,
3930                            u32 *num_watchers)
3931 {
3932         u8 struct_v;
3933         u32 struct_len;
3934         int i;
3935         int ret;
3936
3937         ret = ceph_start_decoding(p, end, 1, "obj_list_watch_response_t",
3938                                   &struct_v, &struct_len);
3939         if (ret)
3940                 return ret;
3941
3942         *num_watchers = ceph_decode_32(p);
3943         *watchers = kcalloc(*num_watchers, sizeof(**watchers), GFP_NOIO);
3944         if (!*watchers)
3945                 return -ENOMEM;
3946
3947         for (i = 0; i < *num_watchers; i++) {
3948                 ret = decode_watcher(p, end, *watchers + i);
3949                 if (ret) {
3950                         kfree(*watchers);
3951                         return ret;
3952                 }
3953         }
3954
3955         return 0;
3956 }
3957
3958 /*
3959  * On success, the caller is responsible for:
3960  *
3961  *     kfree(watchers);
3962  */
3963 int ceph_osdc_list_watchers(struct ceph_osd_client *osdc,
3964                             struct ceph_object_id *oid,
3965                             struct ceph_object_locator *oloc,
3966                             struct ceph_watch_item **watchers,
3967                             u32 *num_watchers)
3968 {
3969         struct ceph_osd_request *req;
3970         struct page **pages;
3971         int ret;
3972
3973         req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
3974         if (!req)
3975                 return -ENOMEM;
3976
3977         ceph_oid_copy(&req->r_base_oid, oid);
3978         ceph_oloc_copy(&req->r_base_oloc, oloc);
3979         req->r_flags = CEPH_OSD_FLAG_READ;
3980
3981         ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
3982         if (ret)
3983                 goto out_put_req;
3984
3985         pages = ceph_alloc_page_vector(1, GFP_NOIO);
3986         if (IS_ERR(pages)) {
3987                 ret = PTR_ERR(pages);
3988                 goto out_put_req;
3989         }
3990
3991         osd_req_op_init(req, 0, CEPH_OSD_OP_LIST_WATCHERS, 0);
3992         ceph_osd_data_pages_init(osd_req_op_data(req, 0, list_watchers,
3993                                                  response_data),
3994                                  pages, PAGE_SIZE, 0, false, true);
3995
3996         ceph_osdc_start_request(osdc, req, false);
3997         ret = ceph_osdc_wait_request(osdc, req);
3998         if (ret >= 0) {
3999                 void *p = page_address(pages[0]);
4000                 void *const end = p + req->r_ops[0].outdata_len;
4001
4002                 ret = decode_watchers(&p, end, watchers, num_watchers);
4003         }
4004
4005 out_put_req:
4006         ceph_osdc_put_request(req);
4007         return ret;
4008 }
4009 EXPORT_SYMBOL(ceph_osdc_list_watchers);
4010
4011 /*
4012  * Call all pending notify callbacks - for use after a watch is
4013  * unregistered, to make sure no more callbacks for it will be invoked
4014  */
4015 void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc)
4016 {
4017         dout("%s osdc %p\n", __func__, osdc);
4018         flush_workqueue(osdc->notify_wq);
4019 }
4020 EXPORT_SYMBOL(ceph_osdc_flush_notifies);
4021
4022 void ceph_osdc_maybe_request_map(struct ceph_osd_client *osdc)
4023 {
4024         down_read(&osdc->lock);
4025         maybe_request_map(osdc);
4026         up_read(&osdc->lock);
4027 }
4028 EXPORT_SYMBOL(ceph_osdc_maybe_request_map);
4029
4030 /*
4031  * Execute an OSD class method on an object.
4032  *
4033  * @flags: CEPH_OSD_FLAG_*
4034  * @resp_len: out param for reply length
4035  */
4036 int ceph_osdc_call(struct ceph_osd_client *osdc,
4037                    struct ceph_object_id *oid,
4038                    struct ceph_object_locator *oloc,
4039                    const char *class, const char *method,
4040                    unsigned int flags,
4041                    struct page *req_page, size_t req_len,
4042                    struct page *resp_page, size_t *resp_len)
4043 {
4044         struct ceph_osd_request *req;
4045         int ret;
4046
4047         req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
4048         if (!req)
4049                 return -ENOMEM;
4050
4051         ceph_oid_copy(&req->r_base_oid, oid);
4052         ceph_oloc_copy(&req->r_base_oloc, oloc);
4053         req->r_flags = flags;
4054
4055         ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
4056         if (ret)
4057                 goto out_put_req;
4058
4059         osd_req_op_cls_init(req, 0, CEPH_OSD_OP_CALL, class, method);
4060         if (req_page)
4061                 osd_req_op_cls_request_data_pages(req, 0, &req_page, req_len,
4062                                                   0, false, false);
4063         if (resp_page)
4064                 osd_req_op_cls_response_data_pages(req, 0, &resp_page,
4065                                                    PAGE_SIZE, 0, false, false);
4066
4067         ceph_osdc_start_request(osdc, req, false);
4068         ret = ceph_osdc_wait_request(osdc, req);
4069         if (ret >= 0) {
4070                 ret = req->r_ops[0].rval;
4071                 if (resp_page)
4072                         *resp_len = req->r_ops[0].outdata_len;
4073         }
4074
4075 out_put_req:
4076         ceph_osdc_put_request(req);
4077         return ret;
4078 }
4079 EXPORT_SYMBOL(ceph_osdc_call);
4080
4081 /*
4082  * init, shutdown
4083  */
4084 int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
4085 {
4086         int err;
4087
4088         dout("init\n");
4089         osdc->client = client;
4090         init_rwsem(&osdc->lock);
4091         osdc->osds = RB_ROOT;
4092         INIT_LIST_HEAD(&osdc->osd_lru);
4093         spin_lock_init(&osdc->osd_lru_lock);
4094         osd_init(&osdc->homeless_osd);
4095         osdc->homeless_osd.o_osdc = osdc;
4096         osdc->homeless_osd.o_osd = CEPH_HOMELESS_OSD;
4097         osdc->linger_requests = RB_ROOT;
4098         osdc->map_checks = RB_ROOT;
4099         osdc->linger_map_checks = RB_ROOT;
4100         INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout);
4101         INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout);
4102
4103         err = -ENOMEM;
4104         osdc->osdmap = ceph_osdmap_alloc();
4105         if (!osdc->osdmap)
4106                 goto out;
4107
4108         osdc->req_mempool = mempool_create_slab_pool(10,
4109                                                      ceph_osd_request_cache);
4110         if (!osdc->req_mempool)
4111                 goto out_map;
4112
4113         err = ceph_msgpool_init(&osdc->msgpool_op, CEPH_MSG_OSD_OP,
4114                                 PAGE_SIZE, 10, true, "osd_op");
4115         if (err < 0)
4116                 goto out_mempool;
4117         err = ceph_msgpool_init(&osdc->msgpool_op_reply, CEPH_MSG_OSD_OPREPLY,
4118                                 PAGE_SIZE, 10, true, "osd_op_reply");
4119         if (err < 0)
4120                 goto out_msgpool;
4121
4122         err = -ENOMEM;
4123         osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify");
4124         if (!osdc->notify_wq)
4125                 goto out_msgpool_reply;
4126
4127         schedule_delayed_work(&osdc->timeout_work,
4128                               osdc->client->options->osd_keepalive_timeout);
4129         schedule_delayed_work(&osdc->osds_timeout_work,
4130             round_jiffies_relative(osdc->client->options->osd_idle_ttl));
4131
4132         return 0;
4133
4134 out_msgpool_reply:
4135         ceph_msgpool_destroy(&osdc->msgpool_op_reply);
4136 out_msgpool:
4137         ceph_msgpool_destroy(&osdc->msgpool_op);
4138 out_mempool:
4139         mempool_destroy(osdc->req_mempool);
4140 out_map:
4141         ceph_osdmap_destroy(osdc->osdmap);
4142 out:
4143         return err;
4144 }
4145
4146 void ceph_osdc_stop(struct ceph_osd_client *osdc)
4147 {
4148         flush_workqueue(osdc->notify_wq);
4149         destroy_workqueue(osdc->notify_wq);
4150         cancel_delayed_work_sync(&osdc->timeout_work);
4151         cancel_delayed_work_sync(&osdc->osds_timeout_work);
4152
4153         down_write(&osdc->lock);
4154         while (!RB_EMPTY_ROOT(&osdc->osds)) {
4155                 struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds),
4156                                                 struct ceph_osd, o_node);
4157                 close_osd(osd);
4158         }
4159         up_write(&osdc->lock);
4160         WARN_ON(atomic_read(&osdc->homeless_osd.o_ref) != 1);
4161         osd_cleanup(&osdc->homeless_osd);
4162
4163         WARN_ON(!list_empty(&osdc->osd_lru));
4164         WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_requests));
4165         WARN_ON(!RB_EMPTY_ROOT(&osdc->map_checks));
4166         WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_map_checks));
4167         WARN_ON(atomic_read(&osdc->num_requests));
4168         WARN_ON(atomic_read(&osdc->num_homeless));
4169
4170         ceph_osdmap_destroy(osdc->osdmap);
4171         mempool_destroy(osdc->req_mempool);
4172         ceph_msgpool_destroy(&osdc->msgpool_op);
4173         ceph_msgpool_destroy(&osdc->msgpool_op_reply);
4174 }
4175
4176 /*
4177  * Read some contiguous pages.  If we cross a stripe boundary, shorten
4178  * *plen.  Return number of bytes read, or error.
4179  */
4180 int ceph_osdc_readpages(struct ceph_osd_client *osdc,
4181                         struct ceph_vino vino, struct ceph_file_layout *layout,
4182                         u64 off, u64 *plen,
4183                         u32 truncate_seq, u64 truncate_size,
4184                         struct page **pages, int num_pages, int page_align)
4185 {
4186         struct ceph_osd_request *req;
4187         int rc = 0;
4188
4189         dout("readpages on ino %llx.%llx on %llu~%llu\n", vino.ino,
4190              vino.snap, off, *plen);
4191         req = ceph_osdc_new_request(osdc, layout, vino, off, plen, 0, 1,
4192                                     CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
4193                                     NULL, truncate_seq, truncate_size,
4194                                     false);
4195         if (IS_ERR(req))
4196                 return PTR_ERR(req);
4197
4198         /* it may be a short read due to an object boundary */
4199         osd_req_op_extent_osd_data_pages(req, 0,
4200                                 pages, *plen, page_align, false, false);
4201
4202         dout("readpages  final extent is %llu~%llu (%llu bytes align %d)\n",
4203              off, *plen, *plen, page_align);
4204
4205         rc = ceph_osdc_start_request(osdc, req, false);
4206         if (!rc)
4207                 rc = ceph_osdc_wait_request(osdc, req);
4208
4209         ceph_osdc_put_request(req);
4210         dout("readpages result %d\n", rc);
4211         return rc;
4212 }
4213 EXPORT_SYMBOL(ceph_osdc_readpages);
4214
4215 /*
4216  * do a synchronous write on N pages
4217  */
4218 int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
4219                          struct ceph_file_layout *layout,
4220                          struct ceph_snap_context *snapc,
4221                          u64 off, u64 len,
4222                          u32 truncate_seq, u64 truncate_size,
4223                          struct timespec *mtime,
4224                          struct page **pages, int num_pages)
4225 {
4226         struct ceph_osd_request *req;
4227         int rc = 0;
4228         int page_align = off & ~PAGE_MASK;
4229
4230         req = ceph_osdc_new_request(osdc, layout, vino, off, &len, 0, 1,
4231                                     CEPH_OSD_OP_WRITE,
4232                                     CEPH_OSD_FLAG_ONDISK | CEPH_OSD_FLAG_WRITE,
4233                                     snapc, truncate_seq, truncate_size,
4234                                     true);
4235         if (IS_ERR(req))
4236                 return PTR_ERR(req);
4237
4238         /* it may be a short write due to an object boundary */
4239         osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_align,
4240                                 false, false);
4241         dout("writepages %llu~%llu (%llu bytes)\n", off, len, len);
4242
4243         req->r_mtime = *mtime;
4244         rc = ceph_osdc_start_request(osdc, req, true);
4245         if (!rc)
4246                 rc = ceph_osdc_wait_request(osdc, req);
4247
4248         ceph_osdc_put_request(req);
4249         if (rc == 0)
4250                 rc = len;
4251         dout("writepages result %d\n", rc);
4252         return rc;
4253 }
4254 EXPORT_SYMBOL(ceph_osdc_writepages);
4255
4256 int ceph_osdc_setup(void)
4257 {
4258         size_t size = sizeof(struct ceph_osd_request) +
4259             CEPH_OSD_SLAB_OPS * sizeof(struct ceph_osd_req_op);
4260
4261         BUG_ON(ceph_osd_request_cache);
4262         ceph_osd_request_cache = kmem_cache_create("ceph_osd_request", size,
4263                                                    0, 0, NULL);
4264
4265         return ceph_osd_request_cache ? 0 : -ENOMEM;
4266 }
4267 EXPORT_SYMBOL(ceph_osdc_setup);
4268
4269 void ceph_osdc_cleanup(void)
4270 {
4271         BUG_ON(!ceph_osd_request_cache);
4272         kmem_cache_destroy(ceph_osd_request_cache);
4273         ceph_osd_request_cache = NULL;
4274 }
4275 EXPORT_SYMBOL(ceph_osdc_cleanup);
4276
4277 /*
4278  * handle incoming message
4279  */
4280 static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
4281 {
4282         struct ceph_osd *osd = con->private;
4283         struct ceph_osd_client *osdc = osd->o_osdc;
4284         int type = le16_to_cpu(msg->hdr.type);
4285
4286         switch (type) {
4287         case CEPH_MSG_OSD_MAP:
4288                 ceph_osdc_handle_map(osdc, msg);
4289                 break;
4290         case CEPH_MSG_OSD_OPREPLY:
4291                 handle_reply(osd, msg);
4292                 break;
4293         case CEPH_MSG_WATCH_NOTIFY:
4294                 handle_watch_notify(osdc, msg);
4295                 break;
4296
4297         default:
4298                 pr_err("received unknown message type %d %s\n", type,
4299                        ceph_msg_type_name(type));
4300         }
4301
4302         ceph_msg_put(msg);
4303 }
4304
4305 /*
4306  * Lookup and return message for incoming reply.  Don't try to do
4307  * anything about a larger than preallocated data portion of the
4308  * message at the moment - for now, just skip the message.
4309  */
4310 static struct ceph_msg *get_reply(struct ceph_connection *con,
4311                                   struct ceph_msg_header *hdr,
4312                                   int *skip)
4313 {
4314         struct ceph_osd *osd = con->private;
4315         struct ceph_osd_client *osdc = osd->o_osdc;
4316         struct ceph_msg *m = NULL;
4317         struct ceph_osd_request *req;
4318         int front_len = le32_to_cpu(hdr->front_len);
4319         int data_len = le32_to_cpu(hdr->data_len);
4320         u64 tid = le64_to_cpu(hdr->tid);
4321
4322         down_read(&osdc->lock);
4323         if (!osd_registered(osd)) {
4324                 dout("%s osd%d unknown, skipping\n", __func__, osd->o_osd);
4325                 *skip = 1;
4326                 goto out_unlock_osdc;
4327         }
4328         WARN_ON(osd->o_osd != le64_to_cpu(hdr->src.num));
4329
4330         mutex_lock(&osd->lock);
4331         req = lookup_request(&osd->o_requests, tid);
4332         if (!req) {
4333                 dout("%s osd%d tid %llu unknown, skipping\n", __func__,
4334                      osd->o_osd, tid);
4335                 *skip = 1;
4336                 goto out_unlock_session;
4337         }
4338
4339         ceph_msg_revoke_incoming(req->r_reply);
4340
4341         if (front_len > req->r_reply->front_alloc_len) {
4342                 pr_warn("%s osd%d tid %llu front %d > preallocated %d\n",
4343                         __func__, osd->o_osd, req->r_tid, front_len,
4344                         req->r_reply->front_alloc_len);
4345                 m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front_len, GFP_NOFS,
4346                                  false);
4347                 if (!m)
4348                         goto out_unlock_session;
4349                 ceph_msg_put(req->r_reply);
4350                 req->r_reply = m;
4351         }
4352
4353         if (data_len > req->r_reply->data_length) {
4354                 pr_warn("%s osd%d tid %llu data %d > preallocated %zu, skipping\n",
4355                         __func__, osd->o_osd, req->r_tid, data_len,
4356                         req->r_reply->data_length);
4357                 m = NULL;
4358                 *skip = 1;
4359                 goto out_unlock_session;
4360         }
4361
4362         m = ceph_msg_get(req->r_reply);
4363         dout("get_reply tid %lld %p\n", tid, m);
4364
4365 out_unlock_session:
4366         mutex_unlock(&osd->lock);
4367 out_unlock_osdc:
4368         up_read(&osdc->lock);
4369         return m;
4370 }
4371
4372 /*
4373  * TODO: switch to a msg-owned pagelist
4374  */
4375 static struct ceph_msg *alloc_msg_with_page_vector(struct ceph_msg_header *hdr)
4376 {
4377         struct ceph_msg *m;
4378         int type = le16_to_cpu(hdr->type);
4379         u32 front_len = le32_to_cpu(hdr->front_len);
4380         u32 data_len = le32_to_cpu(hdr->data_len);
4381
4382         m = ceph_msg_new(type, front_len, GFP_NOIO, false);
4383         if (!m)
4384                 return NULL;
4385
4386         if (data_len) {
4387                 struct page **pages;
4388                 struct ceph_osd_data osd_data;
4389
4390                 pages = ceph_alloc_page_vector(calc_pages_for(0, data_len),
4391                                                GFP_NOIO);
4392                 if (IS_ERR(pages)) {
4393                         ceph_msg_put(m);
4394                         return NULL;
4395                 }
4396
4397                 ceph_osd_data_pages_init(&osd_data, pages, data_len, 0, false,
4398                                          false);
4399                 ceph_osdc_msg_data_add(m, &osd_data);
4400         }
4401
4402         return m;
4403 }
4404
4405 static struct ceph_msg *alloc_msg(struct ceph_connection *con,
4406                                   struct ceph_msg_header *hdr,
4407                                   int *skip)
4408 {
4409         struct ceph_osd *osd = con->private;
4410         int type = le16_to_cpu(hdr->type);
4411
4412         *skip = 0;
4413         switch (type) {
4414         case CEPH_MSG_OSD_MAP:
4415         case CEPH_MSG_WATCH_NOTIFY:
4416                 return alloc_msg_with_page_vector(hdr);
4417         case CEPH_MSG_OSD_OPREPLY:
4418                 return get_reply(con, hdr, skip);
4419         default:
4420                 pr_warn("%s osd%d unknown msg type %d, skipping\n", __func__,
4421                         osd->o_osd, type);
4422                 *skip = 1;
4423                 return NULL;
4424         }
4425 }
4426
4427 /*
4428  * Wrappers to refcount containing ceph_osd struct
4429  */
4430 static struct ceph_connection *get_osd_con(struct ceph_connection *con)
4431 {
4432         struct ceph_osd *osd = con->private;
4433         if (get_osd(osd))
4434                 return con;
4435         return NULL;
4436 }
4437
4438 static void put_osd_con(struct ceph_connection *con)
4439 {
4440         struct ceph_osd *osd = con->private;
4441         put_osd(osd);
4442 }
4443
4444 /*
4445  * authentication
4446  */
4447 /*
4448  * Note: returned pointer is the address of a structure that's
4449  * managed separately.  Caller must *not* attempt to free it.
4450  */
4451 static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
4452                                         int *proto, int force_new)
4453 {
4454         struct ceph_osd *o = con->private;
4455         struct ceph_osd_client *osdc = o->o_osdc;
4456         struct ceph_auth_client *ac = osdc->client->monc.auth;
4457         struct ceph_auth_handshake *auth = &o->o_auth;
4458
4459         if (force_new && auth->authorizer) {
4460                 ceph_auth_destroy_authorizer(auth->authorizer);
4461                 auth->authorizer = NULL;
4462         }
4463         if (!auth->authorizer) {
4464                 int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
4465                                                       auth);
4466                 if (ret)
4467                         return ERR_PTR(ret);
4468         } else {
4469                 int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
4470                                                      auth);
4471                 if (ret)
4472                         return ERR_PTR(ret);
4473         }
4474         *proto = ac->protocol;
4475
4476         return auth;
4477 }
4478
4479
4480 static int verify_authorizer_reply(struct ceph_connection *con, int len)
4481 {
4482         struct ceph_osd *o = con->private;
4483         struct ceph_osd_client *osdc = o->o_osdc;
4484         struct ceph_auth_client *ac = osdc->client->monc.auth;
4485
4486         return ceph_auth_verify_authorizer_reply(ac, o->o_auth.authorizer, len);
4487 }
4488
4489 static int invalidate_authorizer(struct ceph_connection *con)
4490 {
4491         struct ceph_osd *o = con->private;
4492         struct ceph_osd_client *osdc = o->o_osdc;
4493         struct ceph_auth_client *ac = osdc->client->monc.auth;
4494
4495         ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD);
4496         return ceph_monc_validate_auth(&osdc->client->monc);
4497 }
4498
4499 static int osd_sign_message(struct ceph_msg *msg)
4500 {
4501         struct ceph_osd *o = msg->con->private;
4502         struct ceph_auth_handshake *auth = &o->o_auth;
4503
4504         return ceph_auth_sign_message(auth, msg);
4505 }
4506
4507 static int osd_check_message_signature(struct ceph_msg *msg)
4508 {
4509         struct ceph_osd *o = msg->con->private;
4510         struct ceph_auth_handshake *auth = &o->o_auth;
4511
4512         return ceph_auth_check_message_signature(auth, msg);
4513 }
4514
4515 static const struct ceph_connection_operations osd_con_ops = {
4516         .get = get_osd_con,
4517         .put = put_osd_con,
4518         .dispatch = dispatch,
4519         .get_authorizer = get_authorizer,
4520         .verify_authorizer_reply = verify_authorizer_reply,
4521         .invalidate_authorizer = invalidate_authorizer,
4522         .alloc_msg = alloc_msg,
4523         .sign_message = osd_sign_message,
4524         .check_message_signature = osd_check_message_signature,
4525         .fault = osd_fault,
4526 };