staging: lustre: remove cfs_time_before()
[sfrench/cifs-2.6.git] / drivers / staging / lustre / lustre / libcfs / upcall_cache.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2012, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * libcfs/libcfs/upcall_cache.c
37  *
38  * Supplementary groups cache.
39  */
40 #define DEBUG_SUBSYSTEM S_SEC
41
42 #include "../../include/linux/libcfs/lucache.h"
43
44 static struct upcall_cache_entry *alloc_entry(struct upcall_cache *cache,
45                                               __u64 key, void *args)
46 {
47         struct upcall_cache_entry *entry;
48
49         LIBCFS_ALLOC(entry, sizeof(*entry));
50         if (!entry)
51                 return NULL;
52
53         UC_CACHE_SET_NEW(entry);
54         INIT_LIST_HEAD(&entry->ue_hash);
55         entry->ue_key = key;
56         atomic_set(&entry->ue_refcount, 0);
57         init_waitqueue_head(&entry->ue_waitq);
58         if (cache->uc_ops->init_entry)
59                 cache->uc_ops->init_entry(entry, args);
60         return entry;
61 }
62
63 /* protected by cache lock */
64 static void free_entry(struct upcall_cache *cache,
65                        struct upcall_cache_entry *entry)
66 {
67         if (cache->uc_ops->free_entry)
68                 cache->uc_ops->free_entry(cache, entry);
69
70         list_del(&entry->ue_hash);
71         CDEBUG(D_OTHER, "destroy cache entry %p for key "LPU64"\n",
72                entry, entry->ue_key);
73         LIBCFS_FREE(entry, sizeof(*entry));
74 }
75
76 static inline int upcall_compare(struct upcall_cache *cache,
77                                  struct upcall_cache_entry *entry,
78                                  __u64 key, void *args)
79 {
80         if (entry->ue_key != key)
81                 return -1;
82
83         if (cache->uc_ops->upcall_compare)
84                 return cache->uc_ops->upcall_compare(cache, entry, key, args);
85
86         return 0;
87 }
88
89 static inline int downcall_compare(struct upcall_cache *cache,
90                                    struct upcall_cache_entry *entry,
91                                    __u64 key, void *args)
92 {
93         if (entry->ue_key != key)
94                 return -1;
95
96         if (cache->uc_ops->downcall_compare)
97                 return cache->uc_ops->downcall_compare(cache, entry, key, args);
98
99         return 0;
100 }
101
102 static inline void get_entry(struct upcall_cache_entry *entry)
103 {
104         atomic_inc(&entry->ue_refcount);
105 }
106
107 static inline void put_entry(struct upcall_cache *cache,
108                              struct upcall_cache_entry *entry)
109 {
110         if (atomic_dec_and_test(&entry->ue_refcount) &&
111             (UC_CACHE_IS_INVALID(entry) || UC_CACHE_IS_EXPIRED(entry))) {
112                 free_entry(cache, entry);
113         }
114 }
115
116 static int check_unlink_entry(struct upcall_cache *cache,
117                               struct upcall_cache_entry *entry)
118 {
119         if (UC_CACHE_IS_VALID(entry) &&
120             time_before(cfs_time_current(), entry->ue_expire))
121                 return 0;
122
123         if (UC_CACHE_IS_ACQUIRING(entry)) {
124                 if (entry->ue_acquire_expire == 0 ||
125                     time_before(cfs_time_current(), entry->ue_acquire_expire))
126                         return 0;
127
128                 UC_CACHE_SET_EXPIRED(entry);
129                 wake_up_all(&entry->ue_waitq);
130         } else if (!UC_CACHE_IS_INVALID(entry)) {
131                 UC_CACHE_SET_EXPIRED(entry);
132         }
133
134         list_del_init(&entry->ue_hash);
135         if (!atomic_read(&entry->ue_refcount))
136                 free_entry(cache, entry);
137         return 1;
138 }
139
140 static inline int refresh_entry(struct upcall_cache *cache,
141                          struct upcall_cache_entry *entry)
142 {
143         LASSERT(cache->uc_ops->do_upcall);
144         return cache->uc_ops->do_upcall(cache, entry);
145 }
146
147 struct upcall_cache_entry *upcall_cache_get_entry(struct upcall_cache *cache,
148                                                   __u64 key, void *args)
149 {
150         struct upcall_cache_entry *entry = NULL, *new = NULL, *next;
151         struct list_head *head;
152         wait_queue_t wait;
153         int rc, found;
154
155         LASSERT(cache);
156
157         head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
158 find_again:
159         found = 0;
160         spin_lock(&cache->uc_lock);
161         list_for_each_entry_safe(entry, next, head, ue_hash) {
162                 /* check invalid & expired items */
163                 if (check_unlink_entry(cache, entry))
164                         continue;
165                 if (upcall_compare(cache, entry, key, args) == 0) {
166                         found = 1;
167                         break;
168                 }
169         }
170
171         if (!found) {
172                 if (!new) {
173                         spin_unlock(&cache->uc_lock);
174                         new = alloc_entry(cache, key, args);
175                         if (!new) {
176                                 CERROR("fail to alloc entry\n");
177                                 return ERR_PTR(-ENOMEM);
178                         }
179                         goto find_again;
180                 } else {
181                         list_add(&new->ue_hash, head);
182                         entry = new;
183                 }
184         } else {
185                 if (new) {
186                         free_entry(cache, new);
187                         new = NULL;
188                 }
189                 list_move(&entry->ue_hash, head);
190         }
191         get_entry(entry);
192
193         /* acquire for new one */
194         if (UC_CACHE_IS_NEW(entry)) {
195                 UC_CACHE_SET_ACQUIRING(entry);
196                 UC_CACHE_CLEAR_NEW(entry);
197                 spin_unlock(&cache->uc_lock);
198                 rc = refresh_entry(cache, entry);
199                 spin_lock(&cache->uc_lock);
200                 entry->ue_acquire_expire =
201                         cfs_time_shift(cache->uc_acquire_expire);
202                 if (rc < 0) {
203                         UC_CACHE_CLEAR_ACQUIRING(entry);
204                         UC_CACHE_SET_INVALID(entry);
205                         wake_up_all(&entry->ue_waitq);
206                         if (unlikely(rc == -EREMCHG)) {
207                                 put_entry(cache, entry);
208                                 GOTO(out, entry = ERR_PTR(rc));
209                         }
210                 }
211         }
212         /* someone (and only one) is doing upcall upon this item,
213          * wait it to complete */
214         if (UC_CACHE_IS_ACQUIRING(entry)) {
215                 long expiry = (entry == new) ?
216                               cfs_time_seconds(cache->uc_acquire_expire) :
217                               MAX_SCHEDULE_TIMEOUT;
218                 long left;
219
220                 init_waitqueue_entry(&wait, current);
221                 add_wait_queue(&entry->ue_waitq, &wait);
222                 set_current_state(TASK_INTERRUPTIBLE);
223                 spin_unlock(&cache->uc_lock);
224
225                 left = schedule_timeout(expiry);
226
227                 spin_lock(&cache->uc_lock);
228                 remove_wait_queue(&entry->ue_waitq, &wait);
229                 if (UC_CACHE_IS_ACQUIRING(entry)) {
230                         /* we're interrupted or upcall failed in the middle */
231                         rc = left > 0 ? -EINTR : -ETIMEDOUT;
232                         CERROR("acquire for key "LPU64": error %d\n",
233                                entry->ue_key, rc);
234                         put_entry(cache, entry);
235                         GOTO(out, entry = ERR_PTR(rc));
236                 }
237         }
238
239         /* invalid means error, don't need to try again */
240         if (UC_CACHE_IS_INVALID(entry)) {
241                 put_entry(cache, entry);
242                 GOTO(out, entry = ERR_PTR(-EIDRM));
243         }
244
245         /* check expired
246          * We can't refresh the existing one because some
247          * memory might be shared by multiple processes.
248          */
249         if (check_unlink_entry(cache, entry)) {
250                 /* if expired, try again. but if this entry is
251                  * created by me but too quickly turn to expired
252                  * without any error, should at least give a
253                  * chance to use it once.
254                  */
255                 if (entry != new) {
256                         put_entry(cache, entry);
257                         spin_unlock(&cache->uc_lock);
258                         new = NULL;
259                         goto find_again;
260                 }
261         }
262
263         /* Now we know it's good */
264 out:
265         spin_unlock(&cache->uc_lock);
266         return entry;
267 }
268 EXPORT_SYMBOL(upcall_cache_get_entry);
269
270 void upcall_cache_put_entry(struct upcall_cache *cache,
271                             struct upcall_cache_entry *entry)
272 {
273         if (!entry) {
274                 return;
275         }
276
277         LASSERT(atomic_read(&entry->ue_refcount) > 0);
278         spin_lock(&cache->uc_lock);
279         put_entry(cache, entry);
280         spin_unlock(&cache->uc_lock);
281 }
282 EXPORT_SYMBOL(upcall_cache_put_entry);
283
284 int upcall_cache_downcall(struct upcall_cache *cache, __u32 err, __u64 key,
285                           void *args)
286 {
287         struct upcall_cache_entry *entry = NULL;
288         struct list_head *head;
289         int found = 0, rc = 0;
290
291         LASSERT(cache);
292
293         head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
294
295         spin_lock(&cache->uc_lock);
296         list_for_each_entry(entry, head, ue_hash) {
297                 if (downcall_compare(cache, entry, key, args) == 0) {
298                         found = 1;
299                         get_entry(entry);
300                         break;
301                 }
302         }
303
304         if (!found) {
305                 CDEBUG(D_OTHER, "%s: upcall for key "LPU64" not expected\n",
306                        cache->uc_name, key);
307                 /* haven't found, it's possible */
308                 spin_unlock(&cache->uc_lock);
309                 return -EINVAL;
310         }
311
312         if (err) {
313                 CDEBUG(D_OTHER, "%s: upcall for key "LPU64" returned %d\n",
314                        cache->uc_name, entry->ue_key, err);
315                 GOTO(out, rc = -EINVAL);
316         }
317
318         if (!UC_CACHE_IS_ACQUIRING(entry)) {
319                 CDEBUG(D_RPCTRACE,"%s: found uptodate entry %p (key "LPU64")\n",
320                        cache->uc_name, entry, entry->ue_key);
321                 GOTO(out, rc = 0);
322         }
323
324         if (UC_CACHE_IS_INVALID(entry) || UC_CACHE_IS_EXPIRED(entry)) {
325                 CERROR("%s: found a stale entry %p (key "LPU64") in ioctl\n",
326                        cache->uc_name, entry, entry->ue_key);
327                 GOTO(out, rc = -EINVAL);
328         }
329
330         spin_unlock(&cache->uc_lock);
331         if (cache->uc_ops->parse_downcall)
332                 rc = cache->uc_ops->parse_downcall(cache, entry, args);
333         spin_lock(&cache->uc_lock);
334         if (rc)
335                 GOTO(out, rc);
336
337         entry->ue_expire = cfs_time_shift(cache->uc_entry_expire);
338         UC_CACHE_SET_VALID(entry);
339         CDEBUG(D_OTHER, "%s: created upcall cache entry %p for key "LPU64"\n",
340                cache->uc_name, entry, entry->ue_key);
341 out:
342         if (rc) {
343                 UC_CACHE_SET_INVALID(entry);
344                 list_del_init(&entry->ue_hash);
345         }
346         UC_CACHE_CLEAR_ACQUIRING(entry);
347         spin_unlock(&cache->uc_lock);
348         wake_up_all(&entry->ue_waitq);
349         put_entry(cache, entry);
350
351         return rc;
352 }
353 EXPORT_SYMBOL(upcall_cache_downcall);
354
355 static void cache_flush(struct upcall_cache *cache, int force)
356 {
357         struct upcall_cache_entry *entry, *next;
358         int i;
359
360         spin_lock(&cache->uc_lock);
361         for (i = 0; i < UC_CACHE_HASH_SIZE; i++) {
362                 list_for_each_entry_safe(entry, next,
363                                          &cache->uc_hashtable[i], ue_hash) {
364                         if (!force && atomic_read(&entry->ue_refcount)) {
365                                 UC_CACHE_SET_EXPIRED(entry);
366                                 continue;
367                         }
368                         LASSERT(!atomic_read(&entry->ue_refcount));
369                         free_entry(cache, entry);
370                 }
371         }
372         spin_unlock(&cache->uc_lock);
373 }
374
375 void upcall_cache_flush_idle(struct upcall_cache *cache)
376 {
377         cache_flush(cache, 0);
378 }
379 EXPORT_SYMBOL(upcall_cache_flush_idle);
380
381 void upcall_cache_flush_all(struct upcall_cache *cache)
382 {
383         cache_flush(cache, 1);
384 }
385 EXPORT_SYMBOL(upcall_cache_flush_all);
386
387 void upcall_cache_flush_one(struct upcall_cache *cache, __u64 key, void *args)
388 {
389         struct list_head *head;
390         struct upcall_cache_entry *entry;
391         int found = 0;
392
393         head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
394
395         spin_lock(&cache->uc_lock);
396         list_for_each_entry(entry, head, ue_hash) {
397                 if (upcall_compare(cache, entry, key, args) == 0) {
398                         found = 1;
399                         break;
400                 }
401         }
402
403         if (found) {
404                 CWARN("%s: flush entry %p: key "LPU64", ref %d, fl %x, "
405                       "cur %lu, ex %ld/%ld\n",
406                       cache->uc_name, entry, entry->ue_key,
407                       atomic_read(&entry->ue_refcount), entry->ue_flags,
408                       get_seconds(), entry->ue_acquire_expire,
409                       entry->ue_expire);
410                 UC_CACHE_SET_EXPIRED(entry);
411                 if (!atomic_read(&entry->ue_refcount))
412                         free_entry(cache, entry);
413         }
414         spin_unlock(&cache->uc_lock);
415 }
416 EXPORT_SYMBOL(upcall_cache_flush_one);
417
418 struct upcall_cache *upcall_cache_init(const char *name, const char *upcall,
419                                        struct upcall_cache_ops *ops)
420 {
421         struct upcall_cache *cache;
422         int i;
423
424         LIBCFS_ALLOC(cache, sizeof(*cache));
425         if (!cache)
426                 return ERR_PTR(-ENOMEM);
427
428         spin_lock_init(&cache->uc_lock);
429         rwlock_init(&cache->uc_upcall_rwlock);
430         for (i = 0; i < UC_CACHE_HASH_SIZE; i++)
431                 INIT_LIST_HEAD(&cache->uc_hashtable[i]);
432         strncpy(cache->uc_name, name, sizeof(cache->uc_name) - 1);
433         /* upcall pathname proc tunable */
434         strncpy(cache->uc_upcall, upcall, sizeof(cache->uc_upcall) - 1);
435         cache->uc_entry_expire = 20 * 60;
436         cache->uc_acquire_expire = 30;
437         cache->uc_ops = ops;
438
439         return cache;
440 }
441 EXPORT_SYMBOL(upcall_cache_init);
442
443 void upcall_cache_cleanup(struct upcall_cache *cache)
444 {
445         if (!cache)
446                 return;
447         upcall_cache_flush_all(cache);
448         LIBCFS_FREE(cache, sizeof(*cache));
449 }
450 EXPORT_SYMBOL(upcall_cache_cleanup);