1 // SPDX-License-Identifier: GPL-2.0
3 * Functions to handle the cached directory entries
5 * Copyright (c) 2022, Ronnie Sahlberg <lsahlber@redhat.com>
8 #include <linux/namei.h>
10 #include "cifsproto.h"
11 #include "cifs_debug.h"
12 #include "smb2proto.h"
13 #include "cached_dir.h"
15 static struct cached_fid *init_cached_dir(const char *path);
16 static void free_cached_dir(struct cached_fid *cfid);
18 static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids,
22 struct cached_fid *cfid;
24 spin_lock(&cfids->cfid_list_lock);
25 list_for_each_entry(cfid, &cfids->entries, entry) {
26 if (!strcmp(cfid->path, path)) {
28 * If it doesn't have a lease it is either not yet
29 * fully cached or it may be in the process of
30 * being deleted due to a lease break.
32 if (!cfid->has_lease) {
33 spin_unlock(&cfids->cfid_list_lock);
36 kref_get(&cfid->refcount);
37 spin_unlock(&cfids->cfid_list_lock);
42 spin_unlock(&cfids->cfid_list_lock);
45 if (cfids->num_entries >= MAX_CACHED_FIDS) {
46 spin_unlock(&cfids->cfid_list_lock);
49 cfid = init_cached_dir(path);
51 spin_unlock(&cfids->cfid_list_lock);
56 list_add(&cfid->entry, &cfids->entries);
58 kref_get(&cfid->refcount);
59 spin_unlock(&cfids->cfid_list_lock);
63 static struct dentry *
64 path_to_dentry(struct cifs_sb_info *cifs_sb, const char *path)
66 struct dentry *dentry;
70 sep = CIFS_DIR_SEP(cifs_sb);
71 dentry = dget(cifs_sb->root);
75 struct inode *dir = d_inode(dentry);
78 if (!S_ISDIR(dir->i_mode)) {
80 dentry = ERR_PTR(-ENOTDIR);
91 while (*s && *s != sep)
94 child = lookup_positive_unlocked(p, dentry, s - p);
97 } while (!IS_ERR(dentry));
102 * Open the and cache a directory handle.
103 * If error then *cfid is not initialized.
105 int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
107 struct cifs_sb_info *cifs_sb,
108 bool lookup_only, struct cached_fid **ret_cfid)
110 struct cifs_ses *ses;
111 struct TCP_Server_Info *server;
112 struct cifs_open_parms oparms;
113 struct smb2_create_rsp *o_rsp = NULL;
114 struct smb2_query_info_rsp *qi_rsp = NULL;
116 struct smb_rqst rqst[2];
117 struct kvec rsp_iov[2];
118 struct kvec open_iov[SMB2_CREATE_IOV_SIZE];
119 struct kvec qi_iov[1];
121 __le16 *utf16_path = NULL;
122 u8 oplock = SMB2_OPLOCK_LEVEL_II;
123 struct cifs_fid *pfid;
124 struct dentry *dentry = NULL;
125 struct cached_fid *cfid;
126 struct cached_fids *cfids;
128 if (tcon == NULL || tcon->cfids == NULL || tcon->nohandlecache ||
129 is_smb1_server(tcon->ses->server))
133 server = ses->server;
136 if (!server->ops->new_lease_key)
139 if (cifs_sb->root == NULL)
142 utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
146 cfid = find_or_create_cached_dir(cfids, path, lookup_only);
152 * At this point we either have a lease already and we can just
153 * return it. If not we are guaranteed to be the only thread accessing
156 if (cfid->has_lease) {
163 * We do not hold the lock for the open because in case
164 * SMB2_open needs to reconnect.
165 * This is safe because no other thread will be able to get a ref
166 * to the cfid until we have finished opening the file and (possibly)
169 if (smb3_encryption_required(tcon))
170 flags |= CIFS_TRANSFORM_REQ;
173 server->ops->new_lease_key(pfid);
175 memset(rqst, 0, sizeof(rqst));
176 resp_buftype[0] = resp_buftype[1] = CIFS_NO_BUFFER;
177 memset(rsp_iov, 0, sizeof(rsp_iov));
180 memset(&open_iov, 0, sizeof(open_iov));
181 rqst[0].rq_iov = open_iov;
182 rqst[0].rq_nvec = SMB2_CREATE_IOV_SIZE;
185 oparms.create_options = cifs_create_options(cifs_sb, CREATE_NOT_FILE);
186 oparms.desired_access = FILE_READ_ATTRIBUTES;
187 oparms.disposition = FILE_OPEN;
189 oparms.reconnect = false;
191 rc = SMB2_open_init(tcon, server,
192 &rqst[0], &oplock, &oparms, utf16_path);
195 smb2_set_next_command(tcon, &rqst[0]);
197 memset(&qi_iov, 0, sizeof(qi_iov));
198 rqst[1].rq_iov = qi_iov;
201 rc = SMB2_query_info_init(tcon, server,
202 &rqst[1], COMPOUND_FID,
203 COMPOUND_FID, FILE_ALL_INFORMATION,
205 sizeof(struct smb2_file_all_info) +
206 PATH_MAX * 2, 0, NULL);
210 smb2_set_related(&rqst[1]);
212 rc = compound_send_recv(xid, ses, server,
214 resp_buftype, rsp_iov);
216 if (rc == -EREMCHG) {
217 tcon->need_reconnect = true;
218 pr_warn_once("server share %s deleted\n",
224 atomic_inc(&tcon->num_remote_opens);
226 o_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
227 oparms.fid->persistent_fid = o_rsp->PersistentFileId;
228 oparms.fid->volatile_fid = o_rsp->VolatileFileId;
229 #ifdef CONFIG_CIFS_DEBUG2
230 oparms.fid->mid = le64_to_cpu(o_rsp->hdr.MessageId);
231 #endif /* CIFS_DEBUG2 */
233 if (o_rsp->OplockLevel != SMB2_OPLOCK_LEVEL_LEASE)
237 smb2_parse_contexts(server, o_rsp,
239 oparms.fid->lease_key, &oplock,
242 qi_rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
243 if (le32_to_cpu(qi_rsp->OutputBufferLength) < sizeof(struct smb2_file_all_info))
245 if (!smb2_validate_and_copy_iov(
246 le16_to_cpu(qi_rsp->OutputBufferOffset),
247 sizeof(struct smb2_file_all_info),
248 &rsp_iov[1], sizeof(struct smb2_file_all_info),
249 (char *)&cfid->file_all_info))
250 cfid->file_all_info_is_valid = true;
253 dentry = dget(cifs_sb->root);
255 dentry = path_to_dentry(cifs_sb, path);
259 cfid->dentry = dentry;
261 cfid->time = jiffies;
262 cfid->is_open = true;
263 cfid->has_lease = true;
267 SMB2_open_free(&rqst[0]);
268 SMB2_query_info_free(&rqst[1]);
269 free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
270 free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
271 spin_lock(&cfids->cfid_list_lock);
272 if (!cfid->has_lease) {
274 list_del(&cfid->entry);
275 cfid->on_list = false;
276 cfids->num_entries--;
280 spin_unlock(&cfids->cfid_list_lock);
282 free_cached_dir(cfid);
292 int open_cached_dir_by_dentry(struct cifs_tcon *tcon,
293 struct dentry *dentry,
294 struct cached_fid **ret_cfid)
296 struct cached_fid *cfid;
297 struct cached_fids *cfids = tcon->cfids;
302 spin_lock(&cfids->cfid_list_lock);
303 list_for_each_entry(cfid, &cfids->entries, entry) {
304 if (dentry && cfid->dentry == dentry) {
305 cifs_dbg(FYI, "found a cached root file handle by dentry\n");
306 kref_get(&cfid->refcount);
308 spin_unlock(&cfids->cfid_list_lock);
312 spin_unlock(&cfids->cfid_list_lock);
317 smb2_close_cached_fid(struct kref *ref)
319 struct cached_fid *cfid = container_of(ref, struct cached_fid,
322 spin_lock(&cfid->cfids->cfid_list_lock);
324 list_del(&cfid->entry);
325 cfid->on_list = false;
326 cfid->cfids->num_entries--;
328 spin_unlock(&cfid->cfids->cfid_list_lock);
334 SMB2_close(0, cfid->tcon, cfid->fid.persistent_fid,
335 cfid->fid.volatile_fid);
338 free_cached_dir(cfid);
341 void close_cached_dir(struct cached_fid *cfid)
343 kref_put(&cfid->refcount, smb2_close_cached_fid);
347 * Called from cifs_kill_sb when we unmount a share
349 void close_all_cached_dirs(struct cifs_sb_info *cifs_sb)
351 struct rb_root *root = &cifs_sb->tlink_tree;
352 struct rb_node *node;
353 struct cached_fid *cfid;
354 struct cifs_tcon *tcon;
355 struct tcon_link *tlink;
356 struct cached_fids *cfids;
358 for (node = rb_first(root); node; node = rb_next(node)) {
359 tlink = rb_entry(node, struct tcon_link, tl_rbnode);
360 tcon = tlink_tcon(tlink);
366 list_for_each_entry(cfid, &cfids->entries, entry) {
374 * Invalidate all cached dirs when a TCON has been reset
375 * due to a session loss.
377 void invalidate_all_cached_dirs(struct cifs_tcon *tcon)
379 struct cached_fids *cfids = tcon->cfids;
380 struct cached_fid *cfid, *q;
381 struct list_head entry;
383 INIT_LIST_HEAD(&entry);
384 spin_lock(&cfids->cfid_list_lock);
385 list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
386 list_del(&cfid->entry);
387 list_add(&cfid->entry, &entry);
388 cfids->num_entries--;
389 cfid->is_open = false;
390 /* To prevent race with smb2_cached_lease_break() */
391 kref_get(&cfid->refcount);
393 spin_unlock(&cfids->cfid_list_lock);
395 list_for_each_entry_safe(cfid, q, &entry, entry) {
396 cfid->on_list = false;
397 list_del(&cfid->entry);
398 cancel_work_sync(&cfid->lease_break);
399 if (cfid->has_lease) {
401 * We lease was never cancelled from the server so we
402 * need to drop the reference.
404 spin_lock(&cfids->cfid_list_lock);
405 cfid->has_lease = false;
406 spin_unlock(&cfids->cfid_list_lock);
407 kref_put(&cfid->refcount, smb2_close_cached_fid);
409 /* Drop the extra reference opened above*/
410 kref_put(&cfid->refcount, smb2_close_cached_fid);
415 smb2_cached_lease_break(struct work_struct *work)
417 struct cached_fid *cfid = container_of(work,
418 struct cached_fid, lease_break);
420 spin_lock(&cfid->cfids->cfid_list_lock);
421 cfid->has_lease = false;
422 spin_unlock(&cfid->cfids->cfid_list_lock);
423 kref_put(&cfid->refcount, smb2_close_cached_fid);
426 int cached_dir_lease_break(struct cifs_tcon *tcon, __u8 lease_key[16])
428 struct cached_fids *cfids = tcon->cfids;
429 struct cached_fid *cfid;
434 spin_lock(&cfids->cfid_list_lock);
435 list_for_each_entry(cfid, &cfids->entries, entry) {
436 if (cfid->has_lease &&
439 SMB2_LEASE_KEY_SIZE)) {
442 * We found a lease remove it from the list
443 * so no threads can access it.
445 list_del(&cfid->entry);
446 cfid->on_list = false;
447 cfids->num_entries--;
449 queue_work(cifsiod_wq,
451 spin_unlock(&cfids->cfid_list_lock);
455 spin_unlock(&cfids->cfid_list_lock);
459 static struct cached_fid *init_cached_dir(const char *path)
461 struct cached_fid *cfid;
463 cfid = kzalloc(sizeof(*cfid), GFP_ATOMIC);
466 cfid->path = kstrdup(path, GFP_ATOMIC);
472 INIT_WORK(&cfid->lease_break, smb2_cached_lease_break);
473 INIT_LIST_HEAD(&cfid->entry);
474 INIT_LIST_HEAD(&cfid->dirents.entries);
475 mutex_init(&cfid->dirents.de_mutex);
476 spin_lock_init(&cfid->fid_lock);
477 kref_init(&cfid->refcount);
481 static void free_cached_dir(struct cached_fid *cfid)
483 struct cached_dirent *dirent, *q;
489 * Delete all cached dirent names
491 list_for_each_entry_safe(dirent, q, &cfid->dirents.entries, entry) {
492 list_del(&dirent->entry);
502 struct cached_fids *init_cached_dirs(void)
504 struct cached_fids *cfids;
506 cfids = kzalloc(sizeof(*cfids), GFP_KERNEL);
509 spin_lock_init(&cfids->cfid_list_lock);
510 INIT_LIST_HEAD(&cfids->entries);
515 * Called from tconInfoFree when we are tearing down the tcon.
516 * There are no active users or open files/directories at this point.
518 void free_cached_dirs(struct cached_fids *cfids)
520 struct cached_fid *cfid, *q;
521 struct list_head entry;
523 INIT_LIST_HEAD(&entry);
524 spin_lock(&cfids->cfid_list_lock);
525 list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
526 cfid->on_list = false;
527 cfid->is_open = false;
528 list_del(&cfid->entry);
529 list_add(&cfid->entry, &entry);
531 spin_unlock(&cfids->cfid_list_lock);
533 list_for_each_entry_safe(cfid, q, &entry, entry) {
534 list_del(&cfid->entry);
535 free_cached_dir(cfid);