4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2012, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * libcfs/libcfs/upcall_cache.c
38 * Supplementary groups cache.
40 #define DEBUG_SUBSYSTEM S_SEC
42 #include "../../include/linux/libcfs/lucache.h"
44 static struct upcall_cache_entry *alloc_entry(struct upcall_cache *cache,
45 __u64 key, void *args)
47 struct upcall_cache_entry *entry;
49 LIBCFS_ALLOC(entry, sizeof(*entry));
53 UC_CACHE_SET_NEW(entry);
54 INIT_LIST_HEAD(&entry->ue_hash);
56 atomic_set(&entry->ue_refcount, 0);
57 init_waitqueue_head(&entry->ue_waitq);
58 if (cache->uc_ops->init_entry)
59 cache->uc_ops->init_entry(entry, args);
63 /* protected by cache lock */
64 static void free_entry(struct upcall_cache *cache,
65 struct upcall_cache_entry *entry)
67 if (cache->uc_ops->free_entry)
68 cache->uc_ops->free_entry(cache, entry);
70 list_del(&entry->ue_hash);
71 CDEBUG(D_OTHER, "destroy cache entry %p for key "LPU64"\n",
72 entry, entry->ue_key);
73 LIBCFS_FREE(entry, sizeof(*entry));
76 static inline int upcall_compare(struct upcall_cache *cache,
77 struct upcall_cache_entry *entry,
78 __u64 key, void *args)
80 if (entry->ue_key != key)
83 if (cache->uc_ops->upcall_compare)
84 return cache->uc_ops->upcall_compare(cache, entry, key, args);
89 static inline int downcall_compare(struct upcall_cache *cache,
90 struct upcall_cache_entry *entry,
91 __u64 key, void *args)
93 if (entry->ue_key != key)
96 if (cache->uc_ops->downcall_compare)
97 return cache->uc_ops->downcall_compare(cache, entry, key, args);
102 static inline void get_entry(struct upcall_cache_entry *entry)
104 atomic_inc(&entry->ue_refcount);
107 static inline void put_entry(struct upcall_cache *cache,
108 struct upcall_cache_entry *entry)
110 if (atomic_dec_and_test(&entry->ue_refcount) &&
111 (UC_CACHE_IS_INVALID(entry) || UC_CACHE_IS_EXPIRED(entry))) {
112 free_entry(cache, entry);
116 static int check_unlink_entry(struct upcall_cache *cache,
117 struct upcall_cache_entry *entry)
119 if (UC_CACHE_IS_VALID(entry) &&
120 time_before(cfs_time_current(), entry->ue_expire))
123 if (UC_CACHE_IS_ACQUIRING(entry)) {
124 if (entry->ue_acquire_expire == 0 ||
125 time_before(cfs_time_current(), entry->ue_acquire_expire))
128 UC_CACHE_SET_EXPIRED(entry);
129 wake_up_all(&entry->ue_waitq);
130 } else if (!UC_CACHE_IS_INVALID(entry)) {
131 UC_CACHE_SET_EXPIRED(entry);
134 list_del_init(&entry->ue_hash);
135 if (!atomic_read(&entry->ue_refcount))
136 free_entry(cache, entry);
140 static inline int refresh_entry(struct upcall_cache *cache,
141 struct upcall_cache_entry *entry)
143 LASSERT(cache->uc_ops->do_upcall);
144 return cache->uc_ops->do_upcall(cache, entry);
147 struct upcall_cache_entry *upcall_cache_get_entry(struct upcall_cache *cache,
148 __u64 key, void *args)
150 struct upcall_cache_entry *entry = NULL, *new = NULL, *next;
151 struct list_head *head;
157 head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
160 spin_lock(&cache->uc_lock);
161 list_for_each_entry_safe(entry, next, head, ue_hash) {
162 /* check invalid & expired items */
163 if (check_unlink_entry(cache, entry))
165 if (upcall_compare(cache, entry, key, args) == 0) {
173 spin_unlock(&cache->uc_lock);
174 new = alloc_entry(cache, key, args);
176 CERROR("fail to alloc entry\n");
177 return ERR_PTR(-ENOMEM);
181 list_add(&new->ue_hash, head);
186 free_entry(cache, new);
189 list_move(&entry->ue_hash, head);
193 /* acquire for new one */
194 if (UC_CACHE_IS_NEW(entry)) {
195 UC_CACHE_SET_ACQUIRING(entry);
196 UC_CACHE_CLEAR_NEW(entry);
197 spin_unlock(&cache->uc_lock);
198 rc = refresh_entry(cache, entry);
199 spin_lock(&cache->uc_lock);
200 entry->ue_acquire_expire =
201 cfs_time_shift(cache->uc_acquire_expire);
203 UC_CACHE_CLEAR_ACQUIRING(entry);
204 UC_CACHE_SET_INVALID(entry);
205 wake_up_all(&entry->ue_waitq);
206 if (unlikely(rc == -EREMCHG)) {
207 put_entry(cache, entry);
208 GOTO(out, entry = ERR_PTR(rc));
212 /* someone (and only one) is doing upcall upon this item,
213 * wait it to complete */
214 if (UC_CACHE_IS_ACQUIRING(entry)) {
215 long expiry = (entry == new) ?
216 cfs_time_seconds(cache->uc_acquire_expire) :
217 MAX_SCHEDULE_TIMEOUT;
220 init_waitqueue_entry(&wait, current);
221 add_wait_queue(&entry->ue_waitq, &wait);
222 set_current_state(TASK_INTERRUPTIBLE);
223 spin_unlock(&cache->uc_lock);
225 left = schedule_timeout(expiry);
227 spin_lock(&cache->uc_lock);
228 remove_wait_queue(&entry->ue_waitq, &wait);
229 if (UC_CACHE_IS_ACQUIRING(entry)) {
230 /* we're interrupted or upcall failed in the middle */
231 rc = left > 0 ? -EINTR : -ETIMEDOUT;
232 CERROR("acquire for key "LPU64": error %d\n",
234 put_entry(cache, entry);
235 GOTO(out, entry = ERR_PTR(rc));
239 /* invalid means error, don't need to try again */
240 if (UC_CACHE_IS_INVALID(entry)) {
241 put_entry(cache, entry);
242 GOTO(out, entry = ERR_PTR(-EIDRM));
246 * We can't refresh the existing one because some
247 * memory might be shared by multiple processes.
249 if (check_unlink_entry(cache, entry)) {
250 /* if expired, try again. but if this entry is
251 * created by me but too quickly turn to expired
252 * without any error, should at least give a
253 * chance to use it once.
256 put_entry(cache, entry);
257 spin_unlock(&cache->uc_lock);
263 /* Now we know it's good */
265 spin_unlock(&cache->uc_lock);
268 EXPORT_SYMBOL(upcall_cache_get_entry);
270 void upcall_cache_put_entry(struct upcall_cache *cache,
271 struct upcall_cache_entry *entry)
277 LASSERT(atomic_read(&entry->ue_refcount) > 0);
278 spin_lock(&cache->uc_lock);
279 put_entry(cache, entry);
280 spin_unlock(&cache->uc_lock);
282 EXPORT_SYMBOL(upcall_cache_put_entry);
284 int upcall_cache_downcall(struct upcall_cache *cache, __u32 err, __u64 key,
287 struct upcall_cache_entry *entry = NULL;
288 struct list_head *head;
289 int found = 0, rc = 0;
293 head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
295 spin_lock(&cache->uc_lock);
296 list_for_each_entry(entry, head, ue_hash) {
297 if (downcall_compare(cache, entry, key, args) == 0) {
305 CDEBUG(D_OTHER, "%s: upcall for key "LPU64" not expected\n",
306 cache->uc_name, key);
307 /* haven't found, it's possible */
308 spin_unlock(&cache->uc_lock);
313 CDEBUG(D_OTHER, "%s: upcall for key "LPU64" returned %d\n",
314 cache->uc_name, entry->ue_key, err);
315 GOTO(out, rc = -EINVAL);
318 if (!UC_CACHE_IS_ACQUIRING(entry)) {
319 CDEBUG(D_RPCTRACE,"%s: found uptodate entry %p (key "LPU64")\n",
320 cache->uc_name, entry, entry->ue_key);
324 if (UC_CACHE_IS_INVALID(entry) || UC_CACHE_IS_EXPIRED(entry)) {
325 CERROR("%s: found a stale entry %p (key "LPU64") in ioctl\n",
326 cache->uc_name, entry, entry->ue_key);
327 GOTO(out, rc = -EINVAL);
330 spin_unlock(&cache->uc_lock);
331 if (cache->uc_ops->parse_downcall)
332 rc = cache->uc_ops->parse_downcall(cache, entry, args);
333 spin_lock(&cache->uc_lock);
337 entry->ue_expire = cfs_time_shift(cache->uc_entry_expire);
338 UC_CACHE_SET_VALID(entry);
339 CDEBUG(D_OTHER, "%s: created upcall cache entry %p for key "LPU64"\n",
340 cache->uc_name, entry, entry->ue_key);
343 UC_CACHE_SET_INVALID(entry);
344 list_del_init(&entry->ue_hash);
346 UC_CACHE_CLEAR_ACQUIRING(entry);
347 spin_unlock(&cache->uc_lock);
348 wake_up_all(&entry->ue_waitq);
349 put_entry(cache, entry);
353 EXPORT_SYMBOL(upcall_cache_downcall);
355 static void cache_flush(struct upcall_cache *cache, int force)
357 struct upcall_cache_entry *entry, *next;
360 spin_lock(&cache->uc_lock);
361 for (i = 0; i < UC_CACHE_HASH_SIZE; i++) {
362 list_for_each_entry_safe(entry, next,
363 &cache->uc_hashtable[i], ue_hash) {
364 if (!force && atomic_read(&entry->ue_refcount)) {
365 UC_CACHE_SET_EXPIRED(entry);
368 LASSERT(!atomic_read(&entry->ue_refcount));
369 free_entry(cache, entry);
372 spin_unlock(&cache->uc_lock);
375 void upcall_cache_flush_idle(struct upcall_cache *cache)
377 cache_flush(cache, 0);
379 EXPORT_SYMBOL(upcall_cache_flush_idle);
381 void upcall_cache_flush_all(struct upcall_cache *cache)
383 cache_flush(cache, 1);
385 EXPORT_SYMBOL(upcall_cache_flush_all);
387 void upcall_cache_flush_one(struct upcall_cache *cache, __u64 key, void *args)
389 struct list_head *head;
390 struct upcall_cache_entry *entry;
393 head = &cache->uc_hashtable[UC_CACHE_HASH_INDEX(key)];
395 spin_lock(&cache->uc_lock);
396 list_for_each_entry(entry, head, ue_hash) {
397 if (upcall_compare(cache, entry, key, args) == 0) {
404 CWARN("%s: flush entry %p: key "LPU64", ref %d, fl %x, "
405 "cur %lu, ex %ld/%ld\n",
406 cache->uc_name, entry, entry->ue_key,
407 atomic_read(&entry->ue_refcount), entry->ue_flags,
408 get_seconds(), entry->ue_acquire_expire,
410 UC_CACHE_SET_EXPIRED(entry);
411 if (!atomic_read(&entry->ue_refcount))
412 free_entry(cache, entry);
414 spin_unlock(&cache->uc_lock);
416 EXPORT_SYMBOL(upcall_cache_flush_one);
418 struct upcall_cache *upcall_cache_init(const char *name, const char *upcall,
419 struct upcall_cache_ops *ops)
421 struct upcall_cache *cache;
424 LIBCFS_ALLOC(cache, sizeof(*cache));
426 return ERR_PTR(-ENOMEM);
428 spin_lock_init(&cache->uc_lock);
429 rwlock_init(&cache->uc_upcall_rwlock);
430 for (i = 0; i < UC_CACHE_HASH_SIZE; i++)
431 INIT_LIST_HEAD(&cache->uc_hashtable[i]);
432 strncpy(cache->uc_name, name, sizeof(cache->uc_name) - 1);
433 /* upcall pathname proc tunable */
434 strncpy(cache->uc_upcall, upcall, sizeof(cache->uc_upcall) - 1);
435 cache->uc_entry_expire = 20 * 60;
436 cache->uc_acquire_expire = 30;
441 EXPORT_SYMBOL(upcall_cache_init);
443 void upcall_cache_cleanup(struct upcall_cache *cache)
447 upcall_cache_flush_all(cache);
448 LIBCFS_FREE(cache, sizeof(*cache));
450 EXPORT_SYMBOL(upcall_cache_cleanup);