4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2010, 2015, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/ldlm/ldlm_lockd.c
38 * Author: Peter Braam <braam@clusterfs.com>
39 * Author: Phil Schwan <phil@clusterfs.com>
42 #define DEBUG_SUBSYSTEM S_LDLM
44 #include "../../include/linux/libcfs/libcfs.h"
45 #include "../include/lustre_dlm.h"
46 #include "../include/obd_class.h"
47 #include <linux/list.h>
48 #include "ldlm_internal.h"
50 static int ldlm_num_threads;
51 module_param(ldlm_num_threads, int, 0444);
52 MODULE_PARM_DESC(ldlm_num_threads, "number of DLM service threads to start");
54 static char *ldlm_cpts;
55 module_param(ldlm_cpts, charp, 0444);
56 MODULE_PARM_DESC(ldlm_cpts, "CPU partitions ldlm threads should run on");
58 static struct mutex ldlm_ref_mutex;
59 static int ldlm_refcount;
61 static struct kobject *ldlm_kobj;
62 struct kset *ldlm_ns_kset;
63 static struct kset *ldlm_svc_kset;
65 struct ldlm_cb_async_args {
66 struct ldlm_cb_set_arg *ca_set_arg;
67 struct ldlm_lock *ca_lock;
72 static struct ldlm_state *ldlm_state;
76 #define ELT_TERMINATE 2
82 * blp_prio_list is used for callbacks that should be handled
83 * as a priority. It is used for LDLM_FL_DISCARD_DATA requests.
86 struct list_head blp_prio_list;
89 * blp_list is used for all other callbacks which are likely
90 * to take longer to process.
92 struct list_head blp_list;
94 wait_queue_head_t blp_waitq;
95 struct completion blp_comp;
96 atomic_t blp_num_threads;
97 atomic_t blp_busy_threads;
102 struct ldlm_bl_work_item {
103 struct list_head blwi_entry;
104 struct ldlm_namespace *blwi_ns;
105 struct ldlm_lock_desc blwi_ld;
106 struct ldlm_lock *blwi_lock;
107 struct list_head blwi_head;
109 struct completion blwi_comp;
110 enum ldlm_cancel_flags blwi_flags;
111 int blwi_mem_pressure;
115 * Callback handler for receiving incoming blocking ASTs.
117 * This can only happen on client side.
119 void ldlm_handle_bl_callback(struct ldlm_namespace *ns,
120 struct ldlm_lock_desc *ld, struct ldlm_lock *lock)
124 LDLM_DEBUG(lock, "client blocking AST callback handler");
126 lock_res_and_lock(lock);
127 ldlm_set_cbpending(lock);
129 if (ldlm_is_cancel_on_block(lock))
130 ldlm_set_cancel(lock);
132 do_ast = !lock->l_readers && !lock->l_writers;
133 unlock_res_and_lock(lock);
137 "Lock %p already unused, calling callback (%p)\n", lock,
138 lock->l_blocking_ast);
139 if (lock->l_blocking_ast)
140 lock->l_blocking_ast(lock, ld, lock->l_ast_data,
144 "Lock %p is referenced, will be cancelled later\n",
148 LDLM_DEBUG(lock, "client blocking callback handler END");
149 LDLM_LOCK_RELEASE(lock);
153 * Callback handler for receiving incoming completion ASTs.
155 * This only can happen on client side.
157 static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
158 struct ldlm_namespace *ns,
159 struct ldlm_request *dlm_req,
160 struct ldlm_lock *lock)
166 LDLM_DEBUG(lock, "client completion callback handler START");
168 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE)) {
169 int to = cfs_time_seconds(1);
172 set_current_state(TASK_INTERRUPTIBLE);
173 schedule_timeout(to);
174 if (lock->l_granted_mode == lock->l_req_mode ||
175 ldlm_is_destroyed(lock))
180 lvb_len = req_capsule_get_size(&req->rq_pill, &RMF_DLM_LVB, RCL_CLIENT);
182 LDLM_ERROR(lock, "Fail to get lvb_len, rc = %d", lvb_len);
185 } else if (lvb_len > 0) {
186 if (lock->l_lvb_len > 0) {
187 /* for extent lock, lvb contains ost_lvb{}. */
188 LASSERT(lock->l_lvb_data);
190 if (unlikely(lock->l_lvb_len < lvb_len)) {
191 LDLM_ERROR(lock, "Replied LVB is larger than expectation, expected = %d, replied = %d",
192 lock->l_lvb_len, lvb_len);
196 } else if (ldlm_has_layout(lock)) { /* for layout lock, lvb has
201 lvb_data = kzalloc(lvb_len, GFP_NOFS);
203 LDLM_ERROR(lock, "No memory: %d.\n", lvb_len);
208 lock_res_and_lock(lock);
209 LASSERT(!lock->l_lvb_data);
210 lock->l_lvb_type = LVB_T_LAYOUT;
211 lock->l_lvb_data = lvb_data;
212 lock->l_lvb_len = lvb_len;
213 unlock_res_and_lock(lock);
217 lock_res_and_lock(lock);
218 if (ldlm_is_destroyed(lock) ||
219 lock->l_granted_mode == lock->l_req_mode) {
220 /* bug 11300: the lock has already been granted */
221 unlock_res_and_lock(lock);
222 LDLM_DEBUG(lock, "Double grant race happened");
227 /* If we receive the completion AST before the actual enqueue returned,
228 * then we might need to switch lock modes, resources, or extents.
230 if (dlm_req->lock_desc.l_granted_mode != lock->l_req_mode) {
231 lock->l_req_mode = dlm_req->lock_desc.l_granted_mode;
232 LDLM_DEBUG(lock, "completion AST, new lock mode");
235 if (lock->l_resource->lr_type != LDLM_PLAIN) {
236 ldlm_convert_policy_to_local(req->rq_export,
237 dlm_req->lock_desc.l_resource.lr_type,
238 &dlm_req->lock_desc.l_policy_data,
239 &lock->l_policy_data);
240 LDLM_DEBUG(lock, "completion AST, new policy data");
243 ldlm_resource_unlink_lock(lock);
244 if (memcmp(&dlm_req->lock_desc.l_resource.lr_name,
245 &lock->l_resource->lr_name,
246 sizeof(lock->l_resource->lr_name)) != 0) {
247 unlock_res_and_lock(lock);
248 rc = ldlm_lock_change_resource(ns, lock,
249 &dlm_req->lock_desc.l_resource.lr_name);
251 LDLM_ERROR(lock, "Failed to allocate resource");
254 LDLM_DEBUG(lock, "completion AST, new resource");
255 CERROR("change resource!\n");
256 lock_res_and_lock(lock);
259 if (dlm_req->lock_flags & LDLM_FL_AST_SENT) {
260 /* BL_AST locks are not needed in LRU.
261 * Let ldlm_cancel_lru() be fast.
263 ldlm_lock_remove_from_lru(lock);
264 lock->l_flags |= LDLM_FL_CBPENDING | LDLM_FL_BL_AST;
265 LDLM_DEBUG(lock, "completion AST includes blocking AST");
268 if (lock->l_lvb_len > 0) {
269 rc = ldlm_fill_lvb(lock, &req->rq_pill, RCL_CLIENT,
270 lock->l_lvb_data, lvb_len);
272 unlock_res_and_lock(lock);
277 ldlm_grant_lock(lock, &ast_list);
278 unlock_res_and_lock(lock);
280 LDLM_DEBUG(lock, "callback handler finished, about to run_ast_work");
282 /* Let Enqueue to call osc_lock_upcall() and initialize l_ast_data */
283 OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 2);
285 ldlm_run_ast_work(ns, &ast_list, LDLM_WORK_CP_AST);
287 LDLM_DEBUG_NOLOCK("client completion callback handler END (lock %p)",
293 lock_res_and_lock(lock);
294 ldlm_set_failed(lock);
295 unlock_res_and_lock(lock);
296 wake_up(&lock->l_waitq);
298 LDLM_LOCK_RELEASE(lock);
302 * Callback handler for receiving incoming glimpse ASTs.
304 * This only can happen on client side. After handling the glimpse AST
305 * we also consider dropping the lock here if it is unused locally for a
308 static void ldlm_handle_gl_callback(struct ptlrpc_request *req,
309 struct ldlm_namespace *ns,
310 struct ldlm_request *dlm_req,
311 struct ldlm_lock *lock)
315 LDLM_DEBUG(lock, "client glimpse AST callback handler");
317 if (lock->l_glimpse_ast)
318 rc = lock->l_glimpse_ast(lock, req);
320 if (req->rq_repmsg) {
327 lock_res_and_lock(lock);
328 if (lock->l_granted_mode == LCK_PW &&
329 !lock->l_readers && !lock->l_writers &&
330 cfs_time_after(cfs_time_current(),
331 cfs_time_add(lock->l_last_used,
332 cfs_time_seconds(10)))) {
333 unlock_res_and_lock(lock);
334 if (ldlm_bl_to_thread_lock(ns, NULL, lock))
335 ldlm_handle_bl_callback(ns, NULL, lock);
339 unlock_res_and_lock(lock);
340 LDLM_LOCK_RELEASE(lock);
343 static int ldlm_callback_reply(struct ptlrpc_request *req, int rc)
345 if (req->rq_no_reply)
349 if (!req->rq_packed_final) {
350 rc = lustre_pack_reply(req, 1, NULL, NULL);
354 return ptlrpc_reply(req);
357 static int __ldlm_bl_to_thread(struct ldlm_bl_work_item *blwi,
358 enum ldlm_cancel_flags cancel_flags)
360 struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
362 spin_lock(&blp->blp_lock);
363 if (blwi->blwi_lock && ldlm_is_discard_data(blwi->blwi_lock)) {
364 /* add LDLM_FL_DISCARD_DATA requests to the priority list */
365 list_add_tail(&blwi->blwi_entry, &blp->blp_prio_list);
367 /* other blocking callbacks are added to the regular list */
368 list_add_tail(&blwi->blwi_entry, &blp->blp_list);
370 spin_unlock(&blp->blp_lock);
372 wake_up(&blp->blp_waitq);
374 /* can not check blwi->blwi_flags as blwi could be already freed in
377 if (!(cancel_flags & LCF_ASYNC))
378 wait_for_completion(&blwi->blwi_comp);
383 static inline void init_blwi(struct ldlm_bl_work_item *blwi,
384 struct ldlm_namespace *ns,
385 struct ldlm_lock_desc *ld,
386 struct list_head *cancels, int count,
387 struct ldlm_lock *lock,
388 enum ldlm_cancel_flags cancel_flags)
390 init_completion(&blwi->blwi_comp);
391 INIT_LIST_HEAD(&blwi->blwi_head);
393 if (memory_pressure_get())
394 blwi->blwi_mem_pressure = 1;
397 blwi->blwi_flags = cancel_flags;
401 list_add(&blwi->blwi_head, cancels);
402 list_del_init(cancels);
403 blwi->blwi_count = count;
405 blwi->blwi_lock = lock;
410 * Queues a list of locks \a cancels containing \a count locks
411 * for later processing by a blocking thread. If \a count is zero,
412 * then the lock referenced as \a lock is queued instead.
414 * The blocking thread would then call ->l_blocking_ast callback in the lock.
415 * If list addition fails an error is returned and caller is supposed to
416 * call ->l_blocking_ast itself.
418 static int ldlm_bl_to_thread(struct ldlm_namespace *ns,
419 struct ldlm_lock_desc *ld,
420 struct ldlm_lock *lock,
421 struct list_head *cancels, int count,
422 enum ldlm_cancel_flags cancel_flags)
424 if (cancels && count == 0)
427 if (cancel_flags & LCF_ASYNC) {
428 struct ldlm_bl_work_item *blwi;
430 blwi = kzalloc(sizeof(*blwi), GFP_NOFS);
433 init_blwi(blwi, ns, ld, cancels, count, lock, cancel_flags);
435 return __ldlm_bl_to_thread(blwi, cancel_flags);
437 /* if it is synchronous call do minimum mem alloc, as it could
438 * be triggered from kernel shrinker
440 struct ldlm_bl_work_item blwi;
442 memset(&blwi, 0, sizeof(blwi));
443 init_blwi(&blwi, ns, ld, cancels, count, lock, cancel_flags);
444 return __ldlm_bl_to_thread(&blwi, cancel_flags);
448 int ldlm_bl_to_thread_lock(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
449 struct ldlm_lock *lock)
451 return ldlm_bl_to_thread(ns, ld, lock, NULL, 0, LCF_ASYNC);
454 int ldlm_bl_to_thread_list(struct ldlm_namespace *ns, struct ldlm_lock_desc *ld,
455 struct list_head *cancels, int count,
456 enum ldlm_cancel_flags cancel_flags)
458 return ldlm_bl_to_thread(ns, ld, NULL, cancels, count, cancel_flags);
461 /* Setinfo coming from Server (eg MDT) to Client (eg MDC)! */
462 static int ldlm_handle_setinfo(struct ptlrpc_request *req)
464 struct obd_device *obd = req->rq_export->exp_obd;
470 DEBUG_REQ(D_HSM, req, "%s: handle setinfo\n", obd->obd_name);
472 req_capsule_set(&req->rq_pill, &RQF_OBD_SET_INFO);
474 key = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY);
476 DEBUG_REQ(D_IOCTL, req, "no set_info key");
479 keylen = req_capsule_get_size(&req->rq_pill, &RMF_SETINFO_KEY,
481 val = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_VAL);
483 DEBUG_REQ(D_IOCTL, req, "no set_info val");
486 vallen = req_capsule_get_size(&req->rq_pill, &RMF_SETINFO_VAL,
489 /* We are responsible for swabbing contents of val */
491 if (KEY_IS(KEY_HSM_COPYTOOL_SEND))
492 /* Pass it on to mdc (the "export" in this case) */
493 rc = obd_set_info_async(req->rq_svc_thread->t_env,
495 sizeof(KEY_HSM_COPYTOOL_SEND),
496 KEY_HSM_COPYTOOL_SEND,
499 DEBUG_REQ(D_WARNING, req, "ignoring unknown key %s", key);
504 static inline void ldlm_callback_errmsg(struct ptlrpc_request *req,
505 const char *msg, int rc,
506 struct lustre_handle *handle)
508 DEBUG_REQ((req->rq_no_reply || rc) ? D_WARNING : D_DLMTRACE, req,
509 "%s: [nid %s] [rc %d] [lock %#llx]",
510 msg, libcfs_id2str(req->rq_peer), rc,
511 handle ? handle->cookie : 0);
512 if (req->rq_no_reply)
513 CWARN("No reply was sent, maybe cause bug 21636.\n");
515 CWARN("Send reply failed, maybe cause bug 21636.\n");
518 static int ldlm_handle_qc_callback(struct ptlrpc_request *req)
520 struct obd_quotactl *oqctl;
521 struct client_obd *cli = &req->rq_export->exp_obd->u.cli;
523 oqctl = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL);
525 CERROR("Can't unpack obd_quotactl\n");
529 oqctl->qc_stat = ptlrpc_status_ntoh(oqctl->qc_stat);
531 cli->cl_qchk_stat = oqctl->qc_stat;
535 /* TODO: handle requests in a similar way as MDT: see mdt_handle_common() */
536 static int ldlm_callback_handler(struct ptlrpc_request *req)
538 struct ldlm_namespace *ns;
539 struct ldlm_request *dlm_req;
540 struct ldlm_lock *lock;
543 /* Requests arrive in sender's byte order. The ptlrpc service
544 * handler has already checked and, if necessary, byte-swapped the
545 * incoming request message body, but I am responsible for the
549 /* do nothing for sec context finalize */
550 if (lustre_msg_get_opc(req->rq_reqmsg) == SEC_CTX_FINI)
553 req_capsule_init(&req->rq_pill, req, RCL_SERVER);
555 if (!req->rq_export) {
556 rc = ldlm_callback_reply(req, -ENOTCONN);
557 ldlm_callback_errmsg(req, "Operate on unconnected server",
562 LASSERT(req->rq_export->exp_obd);
564 switch (lustre_msg_get_opc(req->rq_reqmsg)) {
565 case LDLM_BL_CALLBACK:
566 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_BL_CALLBACK_NET))
569 case LDLM_CP_CALLBACK:
570 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CP_CALLBACK_NET))
573 case LDLM_GL_CALLBACK:
574 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_GL_CALLBACK_NET))
578 rc = ldlm_handle_setinfo(req);
579 ldlm_callback_reply(req, rc);
581 case OBD_QC_CALLBACK:
582 req_capsule_set(&req->rq_pill, &RQF_QC_CALLBACK);
583 if (OBD_FAIL_CHECK(OBD_FAIL_OBD_QC_CALLBACK_NET))
585 rc = ldlm_handle_qc_callback(req);
586 ldlm_callback_reply(req, rc);
589 CERROR("unknown opcode %u\n",
590 lustre_msg_get_opc(req->rq_reqmsg));
591 ldlm_callback_reply(req, -EPROTO);
595 ns = req->rq_export->exp_obd->obd_namespace;
598 req_capsule_set(&req->rq_pill, &RQF_LDLM_CALLBACK);
600 dlm_req = req_capsule_client_get(&req->rq_pill, &RMF_DLM_REQ);
602 rc = ldlm_callback_reply(req, -EPROTO);
603 ldlm_callback_errmsg(req, "Operate without parameter", rc,
608 /* Force a known safe race, send a cancel to the server for a lock
609 * which the server has already started a blocking callback on.
611 if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CANCEL_BL_CB_RACE) &&
612 lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) {
613 rc = ldlm_cli_cancel(&dlm_req->lock_handle[0], 0);
615 CERROR("ldlm_cli_cancel: %d\n", rc);
618 lock = ldlm_handle2lock_long(&dlm_req->lock_handle[0], 0);
620 CDEBUG(D_DLMTRACE, "callback on lock %#llx - lock disappeared\n",
621 dlm_req->lock_handle[0].cookie);
622 rc = ldlm_callback_reply(req, -EINVAL);
623 ldlm_callback_errmsg(req, "Operate with invalid parameter", rc,
624 &dlm_req->lock_handle[0]);
628 if (ldlm_is_fail_loc(lock) &&
629 lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK)
630 OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
632 /* Copy hints/flags (e.g. LDLM_FL_DISCARD_DATA) from AST. */
633 lock_res_and_lock(lock);
634 lock->l_flags |= ldlm_flags_from_wire(dlm_req->lock_flags &
636 if (lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) {
637 /* If somebody cancels lock and cache is already dropped,
638 * or lock is failed before cp_ast received on client,
639 * we can tell the server we have no lock. Otherwise, we
640 * should send cancel after dropping the cache.
642 if ((ldlm_is_canceling(lock) && ldlm_is_bl_done(lock)) ||
643 ldlm_is_failed(lock)) {
644 LDLM_DEBUG(lock, "callback on lock %#llx - lock disappeared\n",
645 dlm_req->lock_handle[0].cookie);
646 unlock_res_and_lock(lock);
647 LDLM_LOCK_RELEASE(lock);
648 rc = ldlm_callback_reply(req, -EINVAL);
649 ldlm_callback_errmsg(req, "Operate on stale lock", rc,
650 &dlm_req->lock_handle[0]);
653 /* BL_AST locks are not needed in LRU.
654 * Let ldlm_cancel_lru() be fast.
656 ldlm_lock_remove_from_lru(lock);
657 ldlm_set_bl_ast(lock);
659 unlock_res_and_lock(lock);
661 /* We want the ost thread to get this reply so that it can respond
662 * to ost requests (write cache writeback) that might be triggered
665 * But we'd also like to be able to indicate in the reply that we're
666 * cancelling right now, because it's unused, or have an intent result
667 * in the reply, so we might have to push the responsibility for sending
668 * the reply down into the AST handlers, alas.
671 switch (lustre_msg_get_opc(req->rq_reqmsg)) {
672 case LDLM_BL_CALLBACK:
673 CDEBUG(D_INODE, "blocking ast\n");
674 req_capsule_extend(&req->rq_pill, &RQF_LDLM_BL_CALLBACK);
675 if (!ldlm_is_cancel_on_block(lock)) {
676 rc = ldlm_callback_reply(req, 0);
677 if (req->rq_no_reply || rc)
678 ldlm_callback_errmsg(req, "Normal process", rc,
679 &dlm_req->lock_handle[0]);
681 if (ldlm_bl_to_thread_lock(ns, &dlm_req->lock_desc, lock))
682 ldlm_handle_bl_callback(ns, &dlm_req->lock_desc, lock);
684 case LDLM_CP_CALLBACK:
685 CDEBUG(D_INODE, "completion ast\n");
686 req_capsule_extend(&req->rq_pill, &RQF_LDLM_CP_CALLBACK);
687 ldlm_callback_reply(req, 0);
688 ldlm_handle_cp_callback(req, ns, dlm_req, lock);
690 case LDLM_GL_CALLBACK:
691 CDEBUG(D_INODE, "glimpse ast\n");
692 req_capsule_extend(&req->rq_pill, &RQF_LDLM_GL_CALLBACK);
693 ldlm_handle_gl_callback(req, ns, dlm_req, lock);
696 LBUG(); /* checked above */
702 static struct ldlm_bl_work_item *ldlm_bl_get_work(struct ldlm_bl_pool *blp)
704 struct ldlm_bl_work_item *blwi = NULL;
705 static unsigned int num_bl;
707 spin_lock(&blp->blp_lock);
708 /* process a request from the blp_list at least every blp_num_threads */
709 if (!list_empty(&blp->blp_list) &&
710 (list_empty(&blp->blp_prio_list) || num_bl == 0))
711 blwi = list_entry(blp->blp_list.next,
712 struct ldlm_bl_work_item, blwi_entry);
714 if (!list_empty(&blp->blp_prio_list))
715 blwi = list_entry(blp->blp_prio_list.next,
716 struct ldlm_bl_work_item,
720 if (++num_bl >= atomic_read(&blp->blp_num_threads))
722 list_del(&blwi->blwi_entry);
724 spin_unlock(&blp->blp_lock);
729 /* This only contains temporary data until the thread starts */
730 struct ldlm_bl_thread_data {
731 char bltd_name[CFS_CURPROC_COMM_MAX];
732 struct ldlm_bl_pool *bltd_blp;
733 struct completion bltd_comp;
737 static int ldlm_bl_thread_main(void *arg);
739 static int ldlm_bl_thread_start(struct ldlm_bl_pool *blp)
741 struct ldlm_bl_thread_data bltd = { .bltd_blp = blp };
742 struct task_struct *task;
744 init_completion(&bltd.bltd_comp);
745 bltd.bltd_num = atomic_read(&blp->blp_num_threads);
746 snprintf(bltd.bltd_name, sizeof(bltd.bltd_name),
747 "ldlm_bl_%02d", bltd.bltd_num);
748 task = kthread_run(ldlm_bl_thread_main, &bltd, "%s", bltd.bltd_name);
750 CERROR("cannot start LDLM thread ldlm_bl_%02d: rc %ld\n",
751 atomic_read(&blp->blp_num_threads), PTR_ERR(task));
752 return PTR_ERR(task);
754 wait_for_completion(&bltd.bltd_comp);
760 * Main blocking requests processing thread.
762 * Callers put locks into its queue by calling ldlm_bl_to_thread.
763 * This thread in the end ends up doing actual call to ->l_blocking_ast
766 static int ldlm_bl_thread_main(void *arg)
768 struct ldlm_bl_pool *blp;
771 struct ldlm_bl_thread_data *bltd = arg;
773 blp = bltd->bltd_blp;
775 atomic_inc(&blp->blp_num_threads);
776 atomic_inc(&blp->blp_busy_threads);
778 complete(&bltd->bltd_comp);
779 /* cannot use bltd after this, it is only on caller's stack */
783 struct l_wait_info lwi = { 0 };
784 struct ldlm_bl_work_item *blwi = NULL;
787 blwi = ldlm_bl_get_work(blp);
790 atomic_dec(&blp->blp_busy_threads);
791 l_wait_event_exclusive(blp->blp_waitq,
792 (blwi = ldlm_bl_get_work(blp)),
794 busy = atomic_inc_return(&blp->blp_busy_threads);
796 busy = atomic_read(&blp->blp_busy_threads);
800 /* added by ldlm_cleanup() */
803 /* Not fatal if racy and have a few too many threads */
804 if (unlikely(busy < blp->blp_max_threads &&
805 busy >= atomic_read(&blp->blp_num_threads) &&
806 !blwi->blwi_mem_pressure))
807 /* discard the return value, we tried */
808 ldlm_bl_thread_start(blp);
810 if (blwi->blwi_mem_pressure)
811 memory_pressure_set();
813 if (blwi->blwi_count) {
815 /* The special case when we cancel locks in LRU
816 * asynchronously, we pass the list of locks here.
817 * Thus locks are marked LDLM_FL_CANCELING, but NOT
818 * canceled locally yet.
820 count = ldlm_cli_cancel_list_local(&blwi->blwi_head,
823 ldlm_cli_cancel_list(&blwi->blwi_head, count, NULL,
826 ldlm_handle_bl_callback(blwi->blwi_ns, &blwi->blwi_ld,
829 if (blwi->blwi_mem_pressure)
830 memory_pressure_clr();
832 if (blwi->blwi_flags & LCF_ASYNC)
835 complete(&blwi->blwi_comp);
838 atomic_dec(&blp->blp_busy_threads);
839 atomic_dec(&blp->blp_num_threads);
840 complete(&blp->blp_comp);
844 static int ldlm_setup(void);
845 static int ldlm_cleanup(void);
847 int ldlm_get_ref(void)
851 mutex_lock(&ldlm_ref_mutex);
852 if (++ldlm_refcount == 1) {
857 mutex_unlock(&ldlm_ref_mutex);
861 EXPORT_SYMBOL(ldlm_get_ref);
863 void ldlm_put_ref(void)
865 mutex_lock(&ldlm_ref_mutex);
866 if (ldlm_refcount == 1) {
867 int rc = ldlm_cleanup();
870 CERROR("ldlm_cleanup failed: %d\n", rc);
876 mutex_unlock(&ldlm_ref_mutex);
878 EXPORT_SYMBOL(ldlm_put_ref);
880 extern unsigned int ldlm_cancel_unused_locks_before_replay;
882 static ssize_t cancel_unused_locks_before_replay_show(struct kobject *kobj,
883 struct attribute *attr,
886 return sprintf(buf, "%d\n", ldlm_cancel_unused_locks_before_replay);
889 static ssize_t cancel_unused_locks_before_replay_store(struct kobject *kobj,
890 struct attribute *attr,
897 rc = kstrtoul(buffer, 10, &val);
901 ldlm_cancel_unused_locks_before_replay = val;
905 LUSTRE_RW_ATTR(cancel_unused_locks_before_replay);
907 /* These are for root of /sys/fs/lustre/ldlm */
908 static struct attribute *ldlm_attrs[] = {
909 &lustre_attr_cancel_unused_locks_before_replay.attr,
913 static struct attribute_group ldlm_attr_group = {
917 static int ldlm_setup(void)
919 static struct ptlrpc_service_conf conf;
920 struct ldlm_bl_pool *blp = NULL;
927 ldlm_state = kzalloc(sizeof(*ldlm_state), GFP_NOFS);
931 ldlm_kobj = kobject_create_and_add("ldlm", lustre_kobj);
937 rc = sysfs_create_group(ldlm_kobj, &ldlm_attr_group);
941 ldlm_ns_kset = kset_create_and_add("namespaces", NULL, ldlm_kobj);
947 ldlm_svc_kset = kset_create_and_add("services", NULL, ldlm_kobj);
948 if (!ldlm_svc_kset) {
953 rc = ldlm_debugfs_setup();
957 memset(&conf, 0, sizeof(conf));
958 conf = (typeof(conf)) {
959 .psc_name = "ldlm_cbd",
960 .psc_watchdog_factor = 2,
962 .bc_nbufs = LDLM_CLIENT_NBUFS,
963 .bc_buf_size = LDLM_BUFSIZE,
964 .bc_req_max_size = LDLM_MAXREQSIZE,
965 .bc_rep_max_size = LDLM_MAXREPSIZE,
966 .bc_req_portal = LDLM_CB_REQUEST_PORTAL,
967 .bc_rep_portal = LDLM_CB_REPLY_PORTAL,
970 .tc_thr_name = "ldlm_cb",
971 .tc_thr_factor = LDLM_THR_FACTOR,
972 .tc_nthrs_init = LDLM_NTHRS_INIT,
973 .tc_nthrs_base = LDLM_NTHRS_BASE,
974 .tc_nthrs_max = LDLM_NTHRS_MAX,
975 .tc_nthrs_user = ldlm_num_threads,
976 .tc_cpu_affinity = 1,
977 .tc_ctx_tags = LCT_MD_THREAD | LCT_DT_THREAD,
980 .cc_pattern = ldlm_cpts,
983 .so_req_handler = ldlm_callback_handler,
986 ldlm_state->ldlm_cb_service =
987 ptlrpc_register_service(&conf, ldlm_svc_kset,
988 ldlm_svc_debugfs_dir);
989 if (IS_ERR(ldlm_state->ldlm_cb_service)) {
990 CERROR("failed to start service\n");
991 rc = PTR_ERR(ldlm_state->ldlm_cb_service);
992 ldlm_state->ldlm_cb_service = NULL;
996 blp = kzalloc(sizeof(*blp), GFP_NOFS);
1001 ldlm_state->ldlm_bl_pool = blp;
1003 spin_lock_init(&blp->blp_lock);
1004 INIT_LIST_HEAD(&blp->blp_list);
1005 INIT_LIST_HEAD(&blp->blp_prio_list);
1006 init_waitqueue_head(&blp->blp_waitq);
1007 atomic_set(&blp->blp_num_threads, 0);
1008 atomic_set(&blp->blp_busy_threads, 0);
1010 if (ldlm_num_threads == 0) {
1011 blp->blp_min_threads = LDLM_NTHRS_INIT;
1012 blp->blp_max_threads = LDLM_NTHRS_MAX;
1014 blp->blp_min_threads = blp->blp_max_threads =
1015 min_t(int, LDLM_NTHRS_MAX, max_t(int, LDLM_NTHRS_INIT,
1019 for (i = 0; i < blp->blp_min_threads; i++) {
1020 rc = ldlm_bl_thread_start(blp);
1025 rc = ldlm_pools_init();
1027 CERROR("Failed to initialize LDLM pools: %d\n", rc);
1037 static int ldlm_cleanup(void)
1039 if (!list_empty(ldlm_namespace_list(LDLM_NAMESPACE_SERVER)) ||
1040 !list_empty(ldlm_namespace_list(LDLM_NAMESPACE_CLIENT))) {
1041 CERROR("ldlm still has namespaces; clean these up first.\n");
1042 ldlm_dump_all_namespaces(LDLM_NAMESPACE_SERVER, D_DLMTRACE);
1043 ldlm_dump_all_namespaces(LDLM_NAMESPACE_CLIENT, D_DLMTRACE);
1049 if (ldlm_state->ldlm_bl_pool) {
1050 struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
1052 while (atomic_read(&blp->blp_num_threads) > 0) {
1053 struct ldlm_bl_work_item blwi = { .blwi_ns = NULL };
1055 init_completion(&blp->blp_comp);
1057 spin_lock(&blp->blp_lock);
1058 list_add_tail(&blwi.blwi_entry, &blp->blp_list);
1059 wake_up(&blp->blp_waitq);
1060 spin_unlock(&blp->blp_lock);
1062 wait_for_completion(&blp->blp_comp);
1068 if (ldlm_state->ldlm_cb_service)
1069 ptlrpc_unregister_service(ldlm_state->ldlm_cb_service);
1072 kset_unregister(ldlm_ns_kset);
1074 kset_unregister(ldlm_svc_kset);
1076 kobject_put(ldlm_kobj);
1078 ldlm_debugfs_cleanup();
1088 mutex_init(&ldlm_ref_mutex);
1089 mutex_init(ldlm_namespace_lock(LDLM_NAMESPACE_SERVER));
1090 mutex_init(ldlm_namespace_lock(LDLM_NAMESPACE_CLIENT));
1091 ldlm_resource_slab = kmem_cache_create("ldlm_resources",
1092 sizeof(struct ldlm_resource), 0,
1093 SLAB_HWCACHE_ALIGN, NULL);
1094 if (!ldlm_resource_slab)
1097 ldlm_lock_slab = kmem_cache_create("ldlm_locks",
1098 sizeof(struct ldlm_lock), 0,
1099 SLAB_HWCACHE_ALIGN | SLAB_DESTROY_BY_RCU, NULL);
1100 if (!ldlm_lock_slab) {
1101 kmem_cache_destroy(ldlm_resource_slab);
1105 ldlm_interval_slab = kmem_cache_create("interval_node",
1106 sizeof(struct ldlm_interval),
1107 0, SLAB_HWCACHE_ALIGN, NULL);
1108 if (!ldlm_interval_slab) {
1109 kmem_cache_destroy(ldlm_resource_slab);
1110 kmem_cache_destroy(ldlm_lock_slab);
1113 #if LUSTRE_TRACKS_LOCK_EXP_REFS
1114 class_export_dump_hook = ldlm_dump_export_locks;
1119 void ldlm_exit(void)
1122 CERROR("ldlm_refcount is %d in ldlm_exit!\n", ldlm_refcount);
1123 kmem_cache_destroy(ldlm_resource_slab);
1124 /* ldlm_lock_put() use RCU to call ldlm_lock_free, so need call
1125 * synchronize_rcu() to wait a grace period elapsed, so that
1126 * ldlm_lock_free() get a chance to be called.
1129 kmem_cache_destroy(ldlm_lock_slab);
1130 kmem_cache_destroy(ldlm_interval_slab);