ceph: use complete_all and wake_up_all
authorYehuda Sadeh <yehuda@hq.newdream.net>
Tue, 27 Jul 2010 20:11:08 +0000 (13:11 -0700)
committerSage Weil <sage@newdream.net>
Tue, 27 Jul 2010 20:11:17 +0000 (13:11 -0700)
This fixes an issue triggered by running concurrent syncs. One of the syncs
would go through while the other would just hang indefinitely. In any case, we
never actually want to wake a single waiter, so the *_all functions should
be used.

Signed-off-by: Yehuda Sadeh <yehuda@hq.newdream.net>
Signed-off-by: Sage Weil <sage@newdream.net>
fs/ceph/caps.c
fs/ceph/file.c
fs/ceph/inode.c
fs/ceph/mds_client.c
fs/ceph/mon_client.c
fs/ceph/osd_client.c

index 6afc1af..b81be9a 100644 (file)
@@ -627,7 +627,7 @@ retry:
        if (fmode >= 0)
                __ceph_get_fmode(ci, fmode);
        spin_unlock(&inode->i_lock);
-       wake_up(&ci->i_cap_wq);
+       wake_up_all(&ci->i_cap_wq);
        return 0;
 }
 
@@ -1181,7 +1181,7 @@ static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
        }
 
        if (wake)
-               wake_up(&ci->i_cap_wq);
+               wake_up_all(&ci->i_cap_wq);
 
        return delayed;
 }
@@ -2153,7 +2153,7 @@ void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
        else if (flushsnaps)
                ceph_flush_snaps(ci);
        if (wake)
-               wake_up(&ci->i_cap_wq);
+               wake_up_all(&ci->i_cap_wq);
        if (put)
                iput(inode);
 }
@@ -2229,7 +2229,7 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
                iput(inode);
        } else if (complete_capsnap) {
                ceph_flush_snaps(ci);
-               wake_up(&ci->i_cap_wq);
+               wake_up_all(&ci->i_cap_wq);
        }
        if (drop_capsnap)
                iput(inode);
@@ -2405,7 +2405,7 @@ static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
        if (queue_invalidate)
                ceph_queue_invalidate(inode);
        if (wake)
-               wake_up(&ci->i_cap_wq);
+               wake_up_all(&ci->i_cap_wq);
 
        if (check_caps == 1)
                ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_AUTHONLY,
@@ -2460,7 +2460,7 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
                                         struct ceph_inode_info,
                                         i_flushing_item)->vfs_inode);
                mdsc->num_cap_flushing--;
-               wake_up(&mdsc->cap_flushing_wq);
+               wake_up_all(&mdsc->cap_flushing_wq);
                dout(" inode %p now !flushing\n", inode);
 
                if (ci->i_dirty_caps == 0) {
@@ -2472,7 +2472,7 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
                }
        }
        spin_unlock(&mdsc->cap_dirty_lock);
-       wake_up(&ci->i_cap_wq);
+       wake_up_all(&ci->i_cap_wq);
 
 out:
        spin_unlock(&inode->i_lock);
index 6251a15..7c08698 100644 (file)
@@ -265,7 +265,7 @@ int ceph_release(struct inode *inode, struct file *file)
        kmem_cache_free(ceph_file_cachep, cf);
 
        /* wake up anyone waiting for caps on this inode */
-       wake_up(&ci->i_cap_wq);
+       wake_up_all(&ci->i_cap_wq);
        return 0;
 }
 
index 3582e79..389f9db 100644 (file)
@@ -1501,7 +1501,7 @@ retry:
        if (wrbuffer_refs == 0)
                ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
        if (wake)
-               wake_up(&ci->i_cap_wq);
+               wake_up_all(&ci->i_cap_wq);
 }
 
 
index 416c08d..dd440bd 100644 (file)
@@ -868,7 +868,7 @@ static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap,
 {
        struct ceph_inode_info *ci = ceph_inode(inode);
 
-       wake_up(&ci->i_cap_wq);
+       wake_up_all(&ci->i_cap_wq);
        if (arg) {
                spin_lock(&inode->i_lock);
                ci->i_wanted_max_size = 0;
@@ -1564,7 +1564,7 @@ static void complete_request(struct ceph_mds_client *mdsc,
        if (req->r_callback)
                req->r_callback(mdsc, req);
        else
-               complete(&req->r_completion);
+               complete_all(&req->r_completion);
 }
 
 /*
@@ -1932,7 +1932,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
        if (head->safe) {
                req->r_got_safe = true;
                __unregister_request(mdsc, req);
-               complete(&req->r_safe_completion);
+               complete_all(&req->r_safe_completion);
 
                if (req->r_got_unsafe) {
                        /*
@@ -1947,7 +1947,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
 
                        /* last unsafe request during umount? */
                        if (mdsc->stopping && !__get_oldest_req(mdsc))
-                               complete(&mdsc->safe_umount_waiters);
+                               complete_all(&mdsc->safe_umount_waiters);
                        mutex_unlock(&mdsc->mutex);
                        goto out;
                }
@@ -2126,7 +2126,7 @@ static void handle_session(struct ceph_mds_session *session,
                        pr_info("mds%d reconnect denied\n", session->s_mds);
                remove_session_caps(session);
                wake = 1; /* for good measure */
-               complete(&mdsc->session_close_waiters);
+               complete_all(&mdsc->session_close_waiters);
                kick_requests(mdsc, mds);
                break;
 
index cc115ea..54fe01c 100644 (file)
@@ -345,7 +345,7 @@ static void ceph_monc_handle_map(struct ceph_mon_client *monc,
 
 out:
        mutex_unlock(&monc->mutex);
-       wake_up(&client->auth_wq);
+       wake_up_all(&client->auth_wq);
 }
 
 /*
@@ -462,7 +462,7 @@ static void handle_statfs_reply(struct ceph_mon_client *monc,
        }
        mutex_unlock(&monc->mutex);
        if (req) {
-               complete(&req->completion);
+               complete_all(&req->completion);
                put_generic_request(req);
        }
        return;
@@ -718,7 +718,7 @@ static void handle_auth_reply(struct ceph_mon_client *monc,
                                     monc->m_auth->front_max);
        if (ret < 0) {
                monc->client->auth_err = ret;
-               wake_up(&monc->client->auth_wq);
+               wake_up_all(&monc->client->auth_wq);
        } else if (ret > 0) {
                __send_prepared_auth_request(monc, ret);
        } else if (!was_auth && monc->auth->ops->is_authenticated(monc->auth)) {
index 92b7251..e385223 100644 (file)
@@ -862,12 +862,12 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
        if (req->r_callback)
                req->r_callback(req, msg);
        else
-               complete(&req->r_completion);
+               complete_all(&req->r_completion);
 
        if (flags & CEPH_OSD_FLAG_ONDISK) {
                if (req->r_safe_callback)
                        req->r_safe_callback(req, msg);
-               complete(&req->r_safe_completion);  /* fsync waiter */
+               complete_all(&req->r_safe_completion);  /* fsync waiter */
        }
 
 done:
@@ -1083,7 +1083,7 @@ done:
        if (newmap)
                kick_requests(osdc, NULL);
        up_read(&osdc->map_sem);
-       wake_up(&osdc->client->auth_wq);
+       wake_up_all(&osdc->client->auth_wq);
        return;
 
 bad: