Merge master.kernel.org:/pub/scm/linux/kernel/git/tmlind/linux-omap-upstream into...
[sfrench/cifs-2.6.git] / fs / nfs / delegation.c
1 /*
2  * linux/fs/nfs/delegation.c
3  *
4  * Copyright (C) 2004 Trond Myklebust
5  *
6  * NFS file delegation management
7  *
8  */
9 #include <linux/completion.h>
10 #include <linux/kthread.h>
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/spinlock.h>
14
15 #include <linux/nfs4.h>
16 #include <linux/nfs_fs.h>
17 #include <linux/nfs_xdr.h>
18
19 #include "nfs4_fs.h"
20 #include "delegation.h"
21 #include "internal.h"
22
23 static struct nfs_delegation *nfs_alloc_delegation(void)
24 {
25         return (struct nfs_delegation *)kmalloc(sizeof(struct nfs_delegation), GFP_KERNEL);
26 }
27
28 static void nfs_free_delegation(struct nfs_delegation *delegation)
29 {
30         if (delegation->cred)
31                 put_rpccred(delegation->cred);
32         kfree(delegation);
33 }
34
35 static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_state *state)
36 {
37         struct inode *inode = state->inode;
38         struct file_lock *fl;
39         int status;
40
41         for (fl = inode->i_flock; fl != 0; fl = fl->fl_next) {
42                 if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
43                         continue;
44                 if ((struct nfs_open_context *)fl->fl_file->private_data != ctx)
45                         continue;
46                 status = nfs4_lock_delegation_recall(state, fl);
47                 if (status >= 0)
48                         continue;
49                 switch (status) {
50                         default:
51                                 printk(KERN_ERR "%s: unhandled error %d.\n",
52                                                 __FUNCTION__, status);
53                         case -NFS4ERR_EXPIRED:
54                                 /* kill_proc(fl->fl_pid, SIGLOST, 1); */
55                         case -NFS4ERR_STALE_CLIENTID:
56                                 nfs4_schedule_state_recovery(NFS_SERVER(inode)->nfs_client);
57                                 goto out_err;
58                 }
59         }
60         return 0;
61 out_err:
62         return status;
63 }
64
65 static void nfs_delegation_claim_opens(struct inode *inode)
66 {
67         struct nfs_inode *nfsi = NFS_I(inode);
68         struct nfs_open_context *ctx;
69         struct nfs4_state *state;
70         int err;
71
72 again:
73         spin_lock(&inode->i_lock);
74         list_for_each_entry(ctx, &nfsi->open_files, list) {
75                 state = ctx->state;
76                 if (state == NULL)
77                         continue;
78                 if (!test_bit(NFS_DELEGATED_STATE, &state->flags))
79                         continue;
80                 get_nfs_open_context(ctx);
81                 spin_unlock(&inode->i_lock);
82                 err = nfs4_open_delegation_recall(ctx->dentry, state);
83                 if (err >= 0)
84                         err = nfs_delegation_claim_locks(ctx, state);
85                 put_nfs_open_context(ctx);
86                 if (err != 0)
87                         return;
88                 goto again;
89         }
90         spin_unlock(&inode->i_lock);
91 }
92
93 /*
94  * Set up a delegation on an inode
95  */
96 void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
97 {
98         struct nfs_delegation *delegation = NFS_I(inode)->delegation;
99
100         if (delegation == NULL)
101                 return;
102         memcpy(delegation->stateid.data, res->delegation.data,
103                         sizeof(delegation->stateid.data));
104         delegation->type = res->delegation_type;
105         delegation->maxsize = res->maxsize;
106         put_rpccred(cred);
107         delegation->cred = get_rpccred(cred);
108         delegation->flags &= ~NFS_DELEGATION_NEED_RECLAIM;
109         NFS_I(inode)->delegation_state = delegation->type;
110         smp_wmb();
111 }
112
113 /*
114  * Set up a delegation on an inode
115  */
116 int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res)
117 {
118         struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
119         struct nfs_inode *nfsi = NFS_I(inode);
120         struct nfs_delegation *delegation;
121         int status = 0;
122
123         /* Ensure we first revalidate the attributes and page cache! */
124         if ((nfsi->cache_validity & (NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_ATTR)))
125                 __nfs_revalidate_inode(NFS_SERVER(inode), inode);
126
127         delegation = nfs_alloc_delegation();
128         if (delegation == NULL)
129                 return -ENOMEM;
130         memcpy(delegation->stateid.data, res->delegation.data,
131                         sizeof(delegation->stateid.data));
132         delegation->type = res->delegation_type;
133         delegation->maxsize = res->maxsize;
134         delegation->change_attr = nfsi->change_attr;
135         delegation->cred = get_rpccred(cred);
136         delegation->inode = inode;
137
138         spin_lock(&clp->cl_lock);
139         if (nfsi->delegation == NULL) {
140                 list_add(&delegation->super_list, &clp->cl_delegations);
141                 nfsi->delegation = delegation;
142                 nfsi->delegation_state = delegation->type;
143                 delegation = NULL;
144         } else {
145                 if (memcmp(&delegation->stateid, &nfsi->delegation->stateid,
146                                         sizeof(delegation->stateid)) != 0 ||
147                                 delegation->type != nfsi->delegation->type) {
148                         printk("%s: server %u.%u.%u.%u, handed out a duplicate delegation!\n",
149                                         __FUNCTION__, NIPQUAD(clp->cl_addr.sin_addr));
150                         status = -EIO;
151                 }
152         }
153         spin_unlock(&clp->cl_lock);
154         kfree(delegation);
155         return status;
156 }
157
158 static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *delegation)
159 {
160         int res = 0;
161
162         res = nfs4_proc_delegreturn(inode, delegation->cred, &delegation->stateid);
163         nfs_free_delegation(delegation);
164         return res;
165 }
166
167 /* Sync all data to disk upon delegation return */
168 static void nfs_msync_inode(struct inode *inode)
169 {
170         filemap_fdatawrite(inode->i_mapping);
171         nfs_wb_all(inode);
172         filemap_fdatawait(inode->i_mapping);
173 }
174
175 /*
176  * Basic procedure for returning a delegation to the server
177  */
178 int __nfs_inode_return_delegation(struct inode *inode)
179 {
180         struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
181         struct nfs_inode *nfsi = NFS_I(inode);
182         struct nfs_delegation *delegation;
183         int res = 0;
184
185         nfs_msync_inode(inode);
186         down_read(&clp->cl_sem);
187         /* Guard against new delegated open calls */
188         down_write(&nfsi->rwsem);
189         spin_lock(&clp->cl_lock);
190         delegation = nfsi->delegation;
191         if (delegation != NULL) {
192                 list_del_init(&delegation->super_list);
193                 nfsi->delegation = NULL;
194                 nfsi->delegation_state = 0;
195         }
196         spin_unlock(&clp->cl_lock);
197         nfs_delegation_claim_opens(inode);
198         up_write(&nfsi->rwsem);
199         up_read(&clp->cl_sem);
200         nfs_msync_inode(inode);
201
202         if (delegation != NULL)
203                 res = nfs_do_return_delegation(inode, delegation);
204         return res;
205 }
206
207 /*
208  * Return all delegations associated to a super block
209  */
210 void nfs_return_all_delegations(struct super_block *sb)
211 {
212         struct nfs_client *clp = NFS_SB(sb)->nfs_client;
213         struct nfs_delegation *delegation;
214         struct inode *inode;
215
216         if (clp == NULL)
217                 return;
218 restart:
219         spin_lock(&clp->cl_lock);
220         list_for_each_entry(delegation, &clp->cl_delegations, super_list) {
221                 if (delegation->inode->i_sb != sb)
222                         continue;
223                 inode = igrab(delegation->inode);
224                 if (inode == NULL)
225                         continue;
226                 spin_unlock(&clp->cl_lock);
227                 nfs_inode_return_delegation(inode);
228                 iput(inode);
229                 goto restart;
230         }
231         spin_unlock(&clp->cl_lock);
232 }
233
234 int nfs_do_expire_all_delegations(void *ptr)
235 {
236         struct nfs_client *clp = ptr;
237         struct nfs_delegation *delegation;
238         struct inode *inode;
239
240         allow_signal(SIGKILL);
241 restart:
242         spin_lock(&clp->cl_lock);
243         if (test_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state) != 0)
244                 goto out;
245         if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0)
246                 goto out;
247         list_for_each_entry(delegation, &clp->cl_delegations, super_list) {
248                 inode = igrab(delegation->inode);
249                 if (inode == NULL)
250                         continue;
251                 spin_unlock(&clp->cl_lock);
252                 nfs_inode_return_delegation(inode);
253                 iput(inode);
254                 goto restart;
255         }
256 out:
257         spin_unlock(&clp->cl_lock);
258         nfs_put_client(clp);
259         module_put_and_exit(0);
260 }
261
262 void nfs_expire_all_delegations(struct nfs_client *clp)
263 {
264         struct task_struct *task;
265
266         __module_get(THIS_MODULE);
267         atomic_inc(&clp->cl_count);
268         task = kthread_run(nfs_do_expire_all_delegations, clp,
269                         "%u.%u.%u.%u-delegreturn",
270                         NIPQUAD(clp->cl_addr.sin_addr));
271         if (!IS_ERR(task))
272                 return;
273         nfs_put_client(clp);
274         module_put(THIS_MODULE);
275 }
276
277 /*
278  * Return all delegations following an NFS4ERR_CB_PATH_DOWN error.
279  */
280 void nfs_handle_cb_pathdown(struct nfs_client *clp)
281 {
282         struct nfs_delegation *delegation;
283         struct inode *inode;
284
285         if (clp == NULL)
286                 return;
287 restart:
288         spin_lock(&clp->cl_lock);
289         list_for_each_entry(delegation, &clp->cl_delegations, super_list) {
290                 inode = igrab(delegation->inode);
291                 if (inode == NULL)
292                         continue;
293                 spin_unlock(&clp->cl_lock);
294                 nfs_inode_return_delegation(inode);
295                 iput(inode);
296                 goto restart;
297         }
298         spin_unlock(&clp->cl_lock);
299 }
300
301 struct recall_threadargs {
302         struct inode *inode;
303         struct nfs_client *clp;
304         const nfs4_stateid *stateid;
305
306         struct completion started;
307         int result;
308 };
309
310 static int recall_thread(void *data)
311 {
312         struct recall_threadargs *args = (struct recall_threadargs *)data;
313         struct inode *inode = igrab(args->inode);
314         struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
315         struct nfs_inode *nfsi = NFS_I(inode);
316         struct nfs_delegation *delegation;
317
318         daemonize("nfsv4-delegreturn");
319
320         nfs_msync_inode(inode);
321         down_read(&clp->cl_sem);
322         down_write(&nfsi->rwsem);
323         spin_lock(&clp->cl_lock);
324         delegation = nfsi->delegation;
325         if (delegation != NULL && memcmp(delegation->stateid.data,
326                                 args->stateid->data,
327                                 sizeof(delegation->stateid.data)) == 0) {
328                 list_del_init(&delegation->super_list);
329                 nfsi->delegation = NULL;
330                 nfsi->delegation_state = 0;
331                 args->result = 0;
332         } else {
333                 delegation = NULL;
334                 args->result = -ENOENT;
335         }
336         spin_unlock(&clp->cl_lock);
337         complete(&args->started);
338         nfs_delegation_claim_opens(inode);
339         up_write(&nfsi->rwsem);
340         up_read(&clp->cl_sem);
341         nfs_msync_inode(inode);
342
343         if (delegation != NULL)
344                 nfs_do_return_delegation(inode, delegation);
345         iput(inode);
346         module_put_and_exit(0);
347 }
348
349 /*
350  * Asynchronous delegation recall!
351  */
352 int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid)
353 {
354         struct recall_threadargs data = {
355                 .inode = inode,
356                 .stateid = stateid,
357         };
358         int status;
359
360         init_completion(&data.started);
361         __module_get(THIS_MODULE);
362         status = kernel_thread(recall_thread, &data, CLONE_KERNEL);
363         if (status < 0)
364                 goto out_module_put;
365         wait_for_completion(&data.started);
366         return data.result;
367 out_module_put:
368         module_put(THIS_MODULE);
369         return status;
370 }
371
372 /*
373  * Retrieve the inode associated with a delegation
374  */
375 struct inode *nfs_delegation_find_inode(struct nfs_client *clp, const struct nfs_fh *fhandle)
376 {
377         struct nfs_delegation *delegation;
378         struct inode *res = NULL;
379         spin_lock(&clp->cl_lock);
380         list_for_each_entry(delegation, &clp->cl_delegations, super_list) {
381                 if (nfs_compare_fh(fhandle, &NFS_I(delegation->inode)->fh) == 0) {
382                         res = igrab(delegation->inode);
383                         break;
384                 }
385         }
386         spin_unlock(&clp->cl_lock);
387         return res;
388 }
389
390 /*
391  * Mark all delegations as needing to be reclaimed
392  */
393 void nfs_delegation_mark_reclaim(struct nfs_client *clp)
394 {
395         struct nfs_delegation *delegation;
396         spin_lock(&clp->cl_lock);
397         list_for_each_entry(delegation, &clp->cl_delegations, super_list)
398                 delegation->flags |= NFS_DELEGATION_NEED_RECLAIM;
399         spin_unlock(&clp->cl_lock);
400 }
401
402 /*
403  * Reap all unclaimed delegations after reboot recovery is done
404  */
405 void nfs_delegation_reap_unclaimed(struct nfs_client *clp)
406 {
407         struct nfs_delegation *delegation, *n;
408         LIST_HEAD(head);
409         spin_lock(&clp->cl_lock);
410         list_for_each_entry_safe(delegation, n, &clp->cl_delegations, super_list) {
411                 if ((delegation->flags & NFS_DELEGATION_NEED_RECLAIM) == 0)
412                         continue;
413                 list_move(&delegation->super_list, &head);
414                 NFS_I(delegation->inode)->delegation = NULL;
415                 NFS_I(delegation->inode)->delegation_state = 0;
416         }
417         spin_unlock(&clp->cl_lock);
418         while(!list_empty(&head)) {
419                 delegation = list_entry(head.next, struct nfs_delegation, super_list);
420                 list_del(&delegation->super_list);
421                 nfs_free_delegation(delegation);
422         }
423 }
424
425 int nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode)
426 {
427         struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
428         struct nfs_inode *nfsi = NFS_I(inode);
429         struct nfs_delegation *delegation;
430         int res = 0;
431
432         if (nfsi->delegation_state == 0)
433                 return 0;
434         spin_lock(&clp->cl_lock);
435         delegation = nfsi->delegation;
436         if (delegation != NULL) {
437                 memcpy(dst->data, delegation->stateid.data, sizeof(dst->data));
438                 res = 1;
439         }
440         spin_unlock(&clp->cl_lock);
441         return res;
442 }