mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
synced 2026-02-19 12:25:40 -05:00
gfs2: Update to the evict / remote delete documentation
Try to be a bit more clear and remove some duplications. We cannot actually get rid of the verification step eventually, so remove the comment saying so. Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
This commit is contained in:
@@ -965,13 +965,16 @@ static void gfs2_try_evict(struct gfs2_glock *gl)
|
||||
|
||||
/*
|
||||
* If there is contention on the iopen glock and we have an inode, try
|
||||
* to grab and release the inode so that it can be evicted. This will
|
||||
* allow the remote node to go ahead and delete the inode without us
|
||||
* having to do it, which will avoid rgrp glock thrashing.
|
||||
* to grab and release the inode so that it can be evicted. The
|
||||
* GIF_DEFER_DELETE flag indicates to gfs2_evict_inode() that the inode
|
||||
* should not be deleted locally. This will allow the remote node to
|
||||
* go ahead and delete the inode without us having to do it, which will
|
||||
* avoid rgrp glock thrashing.
|
||||
*
|
||||
* The remote node is likely still holding the corresponding inode
|
||||
* glock, so it will run before we get to verify that the delete has
|
||||
* happened below.
|
||||
* happened below. (Verification is triggered by the call to
|
||||
* gfs2_queue_verify_delete() in gfs2_evict_inode().)
|
||||
*/
|
||||
spin_lock(&gl->gl_lockref.lock);
|
||||
ip = gl->gl_object;
|
||||
@@ -1027,26 +1030,8 @@ static void delete_work_func(struct work_struct *work)
|
||||
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
|
||||
bool verify_delete = test_and_clear_bit(GLF_VERIFY_DELETE, &gl->gl_flags);
|
||||
|
||||
if (test_and_clear_bit(GLF_TRY_TO_EVICT, &gl->gl_flags)) {
|
||||
/*
|
||||
* If we can evict the inode, give the remote node trying to
|
||||
* delete the inode some time before verifying that the delete
|
||||
* has happened. Otherwise, if we cause contention on the inode glock
|
||||
* immediately, the remote node will think that we still have
|
||||
* the inode in use, and so it will give up waiting.
|
||||
*
|
||||
* If we can't evict the inode, signal to the remote node that
|
||||
* the inode is still in use. We'll later try to delete the
|
||||
* inode locally in gfs2_evict_inode.
|
||||
*
|
||||
* FIXME: We only need to verify that the remote node has
|
||||
* deleted the inode because nodes before this remote delete
|
||||
* rework won't cooperate. At a later time, when we no longer
|
||||
* care about compatibility with such nodes, we can skip this
|
||||
* step entirely.
|
||||
*/
|
||||
if (test_and_clear_bit(GLF_TRY_TO_EVICT, &gl->gl_flags))
|
||||
gfs2_try_evict(gl);
|
||||
}
|
||||
|
||||
if (verify_delete) {
|
||||
u64 no_addr = gl->gl_name.ln_number;
|
||||
|
||||
@@ -1272,9 +1272,9 @@ static enum evict_behavior gfs2_upgrade_iopen_glock(struct inode *inode)
|
||||
* exclusive access to the iopen glock here.
|
||||
*
|
||||
* Otherwise, the other nodes holding the lock will be notified about
|
||||
* our locking request. If they do not have the inode open, they are
|
||||
* expected to evict the cached inode and release the lock, allowing us
|
||||
* to proceed.
|
||||
* our locking request (see iopen_go_callback()). If they do not have
|
||||
* the inode open, they are expected to evict the cached inode and
|
||||
* release the lock, allowing us to proceed.
|
||||
*
|
||||
* Otherwise, if they cannot evict the inode, they are expected to poke
|
||||
* the inode glock (note: not the iopen glock). We will notice that
|
||||
|
||||
Reference in New Issue
Block a user