summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJeff Layton <jlayton@redhat.com>2010-10-06 19:51:12 -0400
committerSteve French <sfrench@us.ibm.com>2010-10-08 03:31:21 +0000
commit2de970ff69bbcc5a4b7440df669a595b2b1acd73 (patch)
treeec710893fcd1aa6313a3bbe2eafb3f463b364afc
parent3aa1c8c2900065a51268430ab48a1b42fdfe5b45 (diff)
cifs: implement recurring workqueue job to prune old tcons
Create a workqueue job that cleans out unused tlinks. For now, it uses a hardcoded expire time of 10 minutes. When it's done, the work rearms itself. On umount, the work is cancelled before tearing down the tlink tree. Signed-off-by: Jeff Layton <jlayton@redhat.com> Signed-off-by: Steve French <sfrench@us.ibm.com>
-rw-r--r--fs/cifs/cifs_fs_sb.h1
-rw-r--r--fs/cifs/connect.c58
2 files changed, 58 insertions, 1 deletions
diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h
index 5ce57bdf1865..586ee3d527d2 100644
--- a/fs/cifs/cifs_fs_sb.h
+++ b/fs/cifs/cifs_fs_sb.h
@@ -59,5 +59,6 @@ struct cifs_sb_info {
char *mountdata; /* mount options received at mount time */
#endif
struct backing_dev_info bdi;
+ struct delayed_work prune_tlinks;
};
#endif /* _CIFS_FS_SB_H */
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index e65f72d1f23b..1092e9e839c2 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -110,11 +110,13 @@ struct smb_vol {
struct nls_table *local_nls;
};
+/* FIXME: should these be tunable? */
#define TLINK_ERROR_EXPIRE (1 * HZ)
-
+#define TLINK_IDLE_EXPIRE (600 * HZ)
static int ipv4_connect(struct TCP_Server_Info *server);
static int ipv6_connect(struct TCP_Server_Info *server);
+static void cifs_prune_tlinks(struct work_struct *work);
/*
* cifs tcp session reconnection
@@ -2494,6 +2496,8 @@ convert_delimiter(char *path, char delim)
static void setup_cifs_sb(struct smb_vol *pvolume_info,
struct cifs_sb_info *cifs_sb)
{
+ INIT_DELAYED_WORK(&cifs_sb->prune_tlinks, cifs_prune_tlinks);
+
if (pvolume_info->rsize > CIFSMaxBufSize) {
cERROR(1, "rsize %d too large, using MaxBufSize",
pvolume_info->rsize);
@@ -2899,6 +2903,9 @@ remote_path_check:
spin_unlock(&cifs_sb->tlink_tree_lock);
radix_tree_preload_end();
+ queue_delayed_work(system_nrt_wq, &cifs_sb->prune_tlinks,
+ TLINK_IDLE_EXPIRE);
+
mount_fail_check:
/* on error free sesinfo and tcon struct if needed */
if (rc) {
@@ -3090,6 +3097,8 @@ cifs_umount(struct super_block *sb, struct cifs_sb_info *cifs_sb)
struct tcon_link *tlink[8];
unsigned long index = 0;
+ cancel_delayed_work_sync(&cifs_sb->prune_tlinks);
+
do {
spin_lock(&cifs_sb->tlink_tree_lock);
ret = radix_tree_gang_lookup(&cifs_sb->tlink_tree,
@@ -3363,3 +3372,50 @@ wait_for_construction:
return tlink;
}
+
+/*
+ * periodic workqueue job that scans tcon_tree for a superblock and closes
+ * out tcons.
+ */
+static void
+cifs_prune_tlinks(struct work_struct *work)
+{
+ struct cifs_sb_info *cifs_sb = container_of(work, struct cifs_sb_info,
+ prune_tlinks.work);
+ struct tcon_link *tlink[8];
+ unsigned long now = jiffies;
+ unsigned long index = 0;
+ int i, ret;
+
+ do {
+ spin_lock(&cifs_sb->tlink_tree_lock);
+ ret = radix_tree_gang_lookup(&cifs_sb->tlink_tree,
+ (void **)tlink, index,
+ ARRAY_SIZE(tlink));
+ /* increment index for next pass */
+ if (ret > 0)
+ index = tlink[ret - 1]->tl_index + 1;
+ for (i = 0; i < ret; i++) {
+ if (test_bit(TCON_LINK_MASTER, &tlink[i]->tl_flags) ||
+ atomic_read(&tlink[i]->tl_count) != 0 ||
+ time_after(tlink[i]->tl_time + TLINK_IDLE_EXPIRE,
+ now)) {
+ tlink[i] = NULL;
+ continue;
+ }
+ cifs_get_tlink(tlink[i]);
+ clear_bit(TCON_LINK_IN_TREE, &tlink[i]->tl_flags);
+ radix_tree_delete(&cifs_sb->tlink_tree,
+ tlink[i]->tl_index);
+ }
+ spin_unlock(&cifs_sb->tlink_tree_lock);
+
+ for (i = 0; i < ret; i++) {
+ if (tlink[i] != NULL)
+ cifs_put_tlink(tlink[i]);
+ }
+ } while (ret != 0);
+
+ queue_delayed_work(system_nrt_wq, &cifs_sb->prune_tlinks,
+ TLINK_IDLE_EXPIRE);
+}